repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
rehandalal/buchner
|
buchner/cmdline.py
|
1
|
4058
|
import os
import os.path
import string
from optparse import OptionParser
from buchner import __version__
USAGE = '%prog [options] [command] [command-options]'
VERSION = '%prog ' + __version__
def build_parser(usage):
parser = OptionParser(usage=usage, version=VERSION)
return parser
DIGIT_TO_WORD = {
'0': 'zero',
'1': 'one',
'2': 'two',
'3': 'three',
'4': 'four',
'5': 'five',
'6': 'six',
'7': 'seven',
'8': 'eight',
'9': 'nine'
}
def clean_project_module(s):
s = s.lower()
s = ''.join([char for char in s
if char in string.ascii_letters + string.digits])
if s[0] in string.digits:
s = DIGIT_TO_WORD[s[0]] + s[1:]
return s
def perror(s):
print s
def create(command, argv):
parser = build_parser('%prog create <PROJECTNAME>')
parser.add_option(
'--noinput',
action='store_true',
default=False,
help='runs buchner without requiring input')
(options, args) = parser.parse_args(argv)
if not args:
perror('ERROR: You must provide a project name.')
return 1
project_name = args[0]
project_module = clean_project_module(project_name.lower())
if not options.noinput:
# Ask them for project module name and then double-check it's
# valid.
new_project_module = raw_input(
'Python module name for your project: [{0}] '.format(project_module))
new_project_module = new_project_module.strip()
else:
new_project_module = project_module
if not new_project_module:
new_project_module = project_module
if new_project_module != clean_project_module(new_project_module):
perror(
'ERROR: "{0}" is not a valid Python module name.'.format(
new_project_module))
return 1
project_module = new_project_module
project_dir = os.path.abspath(project_module)
if os.path.exists(project_dir):
perror(
'ERROR: Cannot create "{0}"--something is in the way.'.format(
project_dir))
return 1
# Walk the project-template and create all files and directories
# replacing:
#
# * PROJECTMODULE -> project_module
project_template_dir = os.path.join(os.path.dirname(__file__),
'project-template')
for root, dirs, files in os.walk(project_template_dir):
rel_root = root[len(project_template_dir)+1:]
for f in files:
source = os.path.join(root, f)
dest = os.path.join(project_dir, rel_root, f)
dest = dest.replace('PROJECTMODULE', project_module)
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
fp = open(source, 'rb')
data = fp.read()
fp.close()
data = data.replace('PROJECTMODULE', project_module)
fp = open(dest, 'wb')
fp.write(data)
fp.close()
print 'create file: {0}'.format(dest)
print 'Done!'
return 0
HANDLERS = (
('create', create, 'Creates a new buchner project.'),)
def cmdline_handler(scriptname, argv):
print '%s version %s' % (scriptname, __version__)
# TODO: Rewrite using subparsers.
handlers = HANDLERS
if not argv or '-h' in argv or '--help' in argv:
parser = build_parser("%prog [command]")
parser.print_help()
print ''
print 'Commands:'
for command_str, _, command_help in handlers:
print ' %-14s %s' % (command_str, command_help)
return 0
if '--version' in argv:
# We've already printed the version, so we can just exit.
return 0
command = argv.pop(0)
for (cmd, fun, hlp) in handlers:
if cmd == command:
return fun(command, argv)
perror('Command "{0}" does not exist.'.format(command))
for cmd, fun, hlp in handlers:
perror(' %-14s %s' % (cmd, hlp))
return 1
|
bsd-3-clause
| 4,306,350,212,375,323,000 | 24.3625 | 81 | 0.569246 | false | 3.716117 | false | false | false |
thomasw/convertly
|
converter/views.py
|
1
|
3286
|
from converter import *
from django.template import RequestContext, Context, loader
from django.shortcuts import render_to_response
from django.http import HttpResponse, Http404
from converter.forms import UploadDoc
from zipfile import ZipFile
def index(request):
""" Interface for uploading, converting, and downloading documents """
form = UploadDoc()
return render_to_response('index.phtml', {'form':form}, context_instance=RequestContext(request))
def download(request, job_id):
""" Given a job id will provide the user's browser with a converted archive """
clean()
tmpdir = tempfile.gettempdir()
jobdir = os.path.join(tmpdir, tmp_prefix + job_id)
#check if job exists
if not os.path.isdir(jobdir): raise Http404
#find files to zip
files = get_jobfiles(jobdir)
#create zip archive
archive = ZipFile(tempfile.mkstemp()[1], 'w')
for f in files:
name, arcname = str(f), str(f[len(jobdir) + 1:])
archive.write(name, arcname)
archive.close()
#return archive
f = file(archive.filename)
contents = f.read()
f.close()
rm(archive.filename)
filename = os.path.basename(job_id) + '.zip'
mimetype = 'application/zip'
response = HttpResponse(contents, mimetype=mimetype)
response['Content-Disposition'] = 'attachment; filename=%s' % (filename,)
return response
def upload(request):
""" Accepts docx files to be converted to html """
if request.method == 'POST':
clean()
form = UploadDoc(request.POST, request.FILES)
if form.is_valid():
#Move uploaded file to job directory
upload = request.FILES['file']
source = upload.temporary_file_path()
jobdir = tempfile.mkdtemp(prefix=tmp_prefix)
dest = os.path.join(jobdir, upload.name)
os.rename(source, dest)
#Process an individual docx file
if has_extension(upload.name, 'docx'):
files = [dest,]
#Process a zip archive, only pulls docx in root dir of archive
if has_extension(upload.name, 'zip'):
#read archive
archive = ZipFile(dest, 'r')
members = archive.namelist()
members = filter(lambda f: has_extension(f, 'docx'), members)
members = filter(lambda f: len(f.split(os.sep)) == 1, members)
if not members: return error('No docx files found in root directory of archive.')
#extract each item
for m in members:
try:
f = file(os.path.join(jobdir,m), 'w')
f.write(archive.read(m))
f.close()
except:
return error('An error occurred trying to extract files from archive.')
#add docx files to file list
files = os.listdir(jobdir)
files = filter(lambda f: f.split('.')[-1] == 'docx', files)
files = map(lambda f: os.path.join(jobdir, f), files)
#Convert files in job
for f in files:
input = f
output = os.path.join(jobdir, remove_ext(f) + '.html')
im = 'cdwsiodocx'
om = 'cdwsiodocx'
job = Zombie(input, output)
if job.convert():
job.finalize()
else:
e = sys.stderr.last_error()
return error('There was an error converting the document provided "%s"' % e)
context = {'id': os.path.basename(jobdir)[len(tmp_prefix):]}
return render_to_response('result.phtml', context)
return render_to_response('errors.phtml', {'errors':form.errors})
|
mit
| 6,260,761,604,533,570,000 | 31.215686 | 98 | 0.664029 | false | 3.282717 | false | false | false |
propdata/scrum-pm
|
scrum_pm/artifacts/migrations/0001_initial.py
|
1
|
5055
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Sprint'
db.create_table('artifacts_sprint', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='sprint_created', to=orm['auth.User'])),
('starts', self.gf('django.db.models.fields.DateTimeField')()),
('ends', self.gf('django.db.models.fields.DateTimeField')()),
('retrospective', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('artifacts', ['Sprint'])
def backwards(self, orm):
# Deleting model 'Sprint'
db.delete_table('artifacts_sprint')
models = {
'artifacts.sprint': {
'Meta': {'object_name': 'Sprint'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sprint_created'", 'to': "orm['auth.User']"}),
'ends': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'retrospective': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'starts': ('django.db.models.fields.DateTimeField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['artifacts']
|
mit
| 6,145,486,394,306,137,000 | 65.526316 | 182 | 0.563403 | false | 3.730627 | false | false | false |
davidpaulrosser/Forms
|
forms/util/mesh.py
|
1
|
3205
|
"""
This module provides various mesh utilities for cleaning and extruding.
"""
import pymel.core as pm
import maya.mel as mel
"""
Combine multiple polygon meshes with the option of removing duplicate internal faces.
Parameters:
instanceGroup -- A group of meshes to combine ( pymel.core.general.group )
meshName -- A name for the mesh output ( default "mesh" )
duplicateFaces -- Optionally remove lamina and the faces they share ( default False )
Return:
mesh -- ( pymel.core.nodetypes.Transform(u'') )
"""
def combineClean( instanceGroup, meshName, duplicateFaces = False ):
print( "Combining mesh" )
mesh = pm.polyUnite( instanceGroup, name = meshName, constructionHistory = False )
#print( "Merging %i" % len( mesh[ 0 ].vtx ) + " verticies" )
pm.polyMergeVertex( mesh[ 0 ].vtx, distance = 0.1 )
#print( "Reduced to %i" % mesh[ 0 ].numVertices() + " verticies" )
if duplicateFaces:
print( "Cleaning up faces" )
pm.select( mesh[ 0 ] )
pm.selectType( polymeshFace = True )
pm.polySelectConstraint( mode = 3, type = 0x0008, topology = 2 )
# Don't ask me how I did this
mel.eval('polyCleanupArgList 3 { "0","2","0","0","0","0","0","0","0","1e-005","0","1e-005","1","0.3","0","-1","1" };')
pm.delete()
pm.polySelectConstraint( mode = 0, topology = 0 )
pm.selectType( polymeshFace = False )
pm.selectMode( object = True )
print( "Faces reduced" )
if pm.PyNode( instanceGroup ).exists():
pm.delete( instanceGroup )
pm.delete( constructionHistory = True )
pm.select( clear = True )
print( "Cleaning up complete" )
return mesh
"""
Create a wireframe style mesh
Ported from jh_polyWire.mel http://www.creativecrash.com/maya/downloads/scripts-plugins/modeling/poly-tools/c/convert-to-polywire
Parameters:
mesh -- The mesh to convert ( pm.core.nodetypes.Mesh )
gridSize -- The thickness of the borders ( default 0.9 )
depth -- The depth of the extrusion. The value is relative to the scale of the model ( default 0.5 )
extrudeMode -- The extrusion mode. 0 to scale the faces in world space, 1 to translate the faces in local space ( default 1 )
"""
def polyWire( mesh, gridSize = 0.9, depth = 0.5, extrudeMode = 0 ):
# Select the faces
pm.select( mesh[ 0 ].f )
# Extrude and scale the faces
extrude = pm.polyExtrudeFacet( constructionHistory = True, keepFacesTogether = False, divisions = 1, twist = 0, taper = 1, off = 0 )
pm.PyNode( extrude[ 0 ] ).localScale.set( [ gridSize, gridSize, gridSize ] )
# Delete inner faces
pm.delete()
pm.select( mesh[ 0 ].f )
# Extrude the faces
extrude = pm.polyExtrudeFacet( constructionHistory = True, keepFacesTogether = True, divisions = 1, twist = 0, taper = 1, off = 0 )
if extrudeMode == 0:
pm.PyNode( extrude[ 0 ] ).scale.set( [ depth, depth, depth ] )
elif extrudeMode == 1:
pm.PyNode( extrude[ 0 ] ).localTranslate.set( [ 0, 0, depth ] )
pm.select( clear = True )
|
mit
| -1,726,906,930,116,694,500 | 29.235849 | 136 | 0.616849 | false | 3.409574 | false | false | false |
jerjorg/BZI
|
BZI/convergence.py
|
1
|
6793
|
import numpy as np
import matplotlib.pyplot as plt
import time
from BZI.symmetry import make_ptvecs
from BZI.sampling import make_grid
from BZI.pseudopots import Al_PP
from BZI.integration import monte_carlo
from BZI.plots import PlotMesh
class Convergence(object):
""" Compare integrations of pseudo-potentials by creating convergence plots.
Args:
pseudo_potential (function): a pseudo-potential function taken from
BZI.pseudopots
cutoff (float): the energy cutoff of the pseudo-potential
cell_type (str): the geometry of the integration cell
cell_constant (float): the size of the integration cell
offset (list): a vector that offsets the grid from the origin and is
given in grid coordinates.
grid_types (list): a list of grid types
grid_constants (list): a list of grid constants
integration_methods (list): a list of integration methods
Attributes:
pseudo_potential (function): a pseudo-potential function taken from
BZI.pseudopots
cell_type (str): the geometry of the integration cell.
cell_constant (float): the size of the integration cell.
cell_vectors (np.ndarray): an array vectors as columns of a 3x3 numpy
array that is used to create the cell
grid_types (list): a list of grid types
grid_constants (list): a list of grid constants
integration_methods (list): a list of integration methods
answer (float): the expected result of integration
errors (list): a list of errors for each grid type
nspts (list): a list of the number of sampling points for each grid type
integrals (list): a list of integral value for each grid type and constant
times (list): a list of the amount of time taken computing the grid
generation and integration.
"""
def __init__(self, pseudo_potential=None, cutoff=None, cell_centering=None,
cell_constants=None, cell_angles=None, offset=None,
grid_types=None, grid_constants=None,
integration_methods=None, origin=None, random = None):
self.pseudo_potential = pseudo_potential or Al_PP
self.cutoff = cutoff or 4.
self.cell_centering = cell_centering or "prim"
self.cell_constants = cell_constants or [1.]*3
self.cell_angles = cell_angles or [np.pi/2]*3
self.cell_vectors = make_ptvecs(self.cell_centering, self.cell_constants,
self.cell_angles)
self.grid_centerings = grid_centerings or ["prim", "base", "body", "face"]
self.grid_constants = grid_constants or [1/n for n in range(2,11)]
self.offset = offset or [0.,0.,0.]
# self.integration_methods = integration_methods or [rectangle_method]
self.origin = origin or [0.,0.,0.]
self.random = random or False
def compare_grids(self, answer, plot=False, save=False):
self.answer = answer
if self.random:
nm = len(self.grid_types)
self.nspts = [[] for _ in range(nm + 1)]
self.errors = [[] for _ in range(nm + 1)]
self.integrals = [[] for _ in range(nm + 1)]
self.times = [[] for _ in range(nm + 1)]
npts_list = [2**n for n in range(8,14)]
for npts in npts_list:
time1 = time.time()
integral = monte_carlo(self.pseudo_potential,
self.cell_vectors,
npts,
self.cutoff)
self.nspts[nm].append(npts)
self.integrals[nm].append(integral)
self.times[nm].append((time.time() - time1))
self.errors[nm].append(np.abs(self.integrals[nm][-1] - answer))
else:
self.nspts = [[] for _ in range(len(self.grid_types))]
self.errors = [[] for _ in range(len(self.grid_types))]
self.integrals = [[] for _ in range(len(self.grid_types))]
self.times = [[] for _ in range(len(self.grid_types))]
integration_method = self.integration_methods[0]
for (i,grid_centering) in enumerate(self.grid_centering_list):
for grid_consts in self.grid_constants_list:
for grid_angles in grid_angles_list:
grid_vecs = make_ptvecs(grid_centering, grid_consts, grid_angles)
time1 = time.time()
npts, integral = integration_method(self.pseudo_potential,
self.cell_vectors,
grid_vecs,
self.offset,
self.origin,
self.cutoff)
self.nspts[i].append(npts)
self.integrals[i].append(integral)
self.times[i].append((time.time() - time1))
self.errors[i].append(np.abs(self.integrals[i][-1] - answer))
if save:
np.save("%s_times" %self.pseudo_potential, self.times)
np.save("%s_integrals" %self.pseudo_potential, self.integrals)
np.save("%s_errors" %self.pseudo_potential, self.errors)
if plot:
if self.random:
plt.loglog(self.nspts[nm], self.errors[nm], label="random", color="orange")
for i in range(len(self.grid_types)):
plt.loglog(self.nspts[i], self.errors[i], label=self.grid_types[i])
plt.xlabel("Number of samping points")
plt.ylabel("Error")
test = [1./n**(2./3) for n in self.nspts[0]]
plt.loglog(self.nspts[0], test, label="1/n**(2/3)")
lgd = plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.grid()
plt.show()
plt.close()
for i in range(len(self.grid_types)):
plt.loglog(self.nspts[i], self.times[i], label=self.grid_types[i])
plt.xlabel("Number of samping points")
plt.ylabel("Time (s)")
lgd = plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.grid()
plt.show()
plt.close()
def plot_grid(self,i,j):
"""Plot one of the grids in the convergence plot.
"""
grid_vecs = make_ptvecs(self.grid_types[i], self.grid_constants[j])
grid_pts = make_grid(self.rcell_vectors, gr_vecs, self.offset)
PlotMesh(grid_pts, self.rcell_vectors, self.offset)
|
gpl-3.0
| -3,526,169,047,747,902,000 | 48.583942 | 91 | 0.554541 | false | 4.048272 | false | false | false |
stxnext-kindergarten/presence-analyzer-pzarebski
|
src/presence_analyzer/utils.py
|
1
|
3612
|
# -*- coding: utf-8 -*-
"""
Helper functions used in views.
"""
import csv
from json import dumps
from functools import wraps
from datetime import datetime
from flask import Response
from presence_analyzer.main import app
import logging
log = logging.getLogger(__name__) # pylint: disable=invalid-name
def jsonify(function):
"""
Creates a response with the JSON representation of wrapped function result.
"""
@wraps(function)
def inner(*args, **kwargs):
"""
This docstring will be overridden by @wraps decorator.
"""
return Response(
dumps(function(*args, **kwargs)),
mimetype='application/json'
)
return inner
def get_data():
"""
Extracts presence data from CSV file and groups it by user_id.
It creates structure like this:
data = {
'user_id': {
datetime.date(2013, 10, 1): {
'start': datetime.time(9, 0, 0),
'end': datetime.time(17, 30, 0),
},
datetime.date(2013, 10, 2): {
'start': datetime.time(8, 30, 0),
'end': datetime.time(16, 45, 0),
},
}
}
"""
data = {}
with open(app.config['DATA_CSV'], 'r') as csvfile:
presence_reader = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(presence_reader):
if len(row) != 4:
# ignore header and footer lines
continue
try:
user_id = int(row[0])
date = datetime.strptime(row[1], '%Y-%m-%d').date()
start = datetime.strptime(row[2], '%H:%M:%S').time()
end = datetime.strptime(row[3], '%H:%M:%S').time()
except (ValueError, TypeError):
log.debug('Problem with line %d: ', i, exc_info=True)
data.setdefault(user_id, {})[date] = {'start': start, 'end': end}
return data
def group_by_weekday(items):
"""
Groups presence entries by weekday.
"""
result = [[] for i in range(7)] # one list for every day in week
for date in items:
start = items[date]['start']
end = items[date]['end']
result[date.weekday()].append(interval(start, end))
return result
def seconds_since_midnight(time):
"""
Calculates amount of seconds since midnight.
"""
return time.hour * 3600 + time.minute * 60 + time.second
def interval(start, end):
"""
Calculates inverval in seconds between two datetime.time objects.
"""
return seconds_since_midnight(end) - seconds_since_midnight(start)
def mean(items):
"""
Calculates arithmetic mean. Returns zero for empty lists.
"""
return float(sum(items)) / len(items) if len(items) > 0 else 0
def group_by_weekday_start_end(items):
"""
Groups start time and end time by weekday.
It creates structure like this:
result = [
{
'start': [39973, 35827, 31253, 32084, 40358],
'end': [70900, 61024, 61184, 55828, 70840],
},
{
'start': [33058, 39177, 31018],
'end': [61740, 71032, 70742],
}
]
"""
result = [{} for i in range(7)] # one dict for every day in week
for date in items:
start = items[date]['start']
end = items[date]['end']
result[date.weekday()].setdefault('start', []).append(
seconds_since_midnight(start)
)
result[date.weekday()].setdefault('end', []).append(
seconds_since_midnight(end)
)
return result
|
mit
| 8,764,287,398,231,358,000 | 26.157895 | 79 | 0.551495 | false | 3.875536 | false | false | false |
robot-tools/iconograph
|
server/modules/persistent.py
|
1
|
1150
|
#!/usr/bin/python3
import argparse
import os
import icon_lib
parser = argparse.ArgumentParser(description='iconograph persistent')
parser.add_argument(
'--chroot-path',
dest='chroot_path',
action='store',
required=True)
FLAGS = parser.parse_args()
def main():
module = icon_lib.IconModule(FLAGS.chroot_path)
os.mkdir(os.path.join(FLAGS.chroot_path, 'persistent'))
tool_path = os.path.join(FLAGS.chroot_path, 'icon', 'persistent')
os.makedirs(tool_path, exist_ok=True)
script = os.path.join(tool_path, 'startup.sh')
with open(script, 'w') as fh:
os.chmod(fh.fileno(), 0o755)
fh.write("""\
#!/bin/bash
set -ex
e2fsck -y LABEL=PERSISTENT
mount -o noatime LABEL=PERSISTENT /persistent
""")
with module.ServiceFile('persistent.service') as fh:
fh.write("""
[Unit]
Description=Mount /persistent
DefaultDependencies=no
Conflicts=shutdown.target
After=systemd-remount-fs.service
Before=sysinit.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/icon/persistent/startup.sh
[Install]
WantedBy=sysinit.target
""")
module.EnableService('persistent.service')
if __name__ == '__main__':
main()
|
apache-2.0
| 3,711,239,642,971,571,700 | 19.175439 | 69 | 0.713913 | false | 3.125 | false | false | false |
ratoaq2/deluge
|
packaging/win32/deluge-bbfreeze.py
|
1
|
7783
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Calum Lind <[email protected]>
# Copyright (C) 2010 Damien Churchill <[email protected]>
# Copyright (C) 2009-2010 Andrew Resch <[email protected]>
# Copyright (C) 2009 Jesper Lund <[email protected]>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
from __future__ import print_function
import glob
import os
import re
import shutil
import sys
import bbfreeze
import gtk
from win32verstamp import stamp
import deluge.common
class VersionInfo(object):
def __init__(self, version, internalname=None, originalfilename=None,
comments=None, company=None, description=None,
_copyright=None, trademarks=None, product=None, dll=False,
debug=False, verbose=True):
parts = version.split('.')
while len(parts) < 4:
parts.append('0')
self.version = '.'.join(parts)
self.internal_name = internalname
self.original_filename = originalfilename
self.comments = comments
self.company = company
self.description = description
self.copyright = _copyright
self.trademarks = trademarks
self.product = product
self.dll = dll
self.debug = debug
self.verbose = verbose
DEBUG = False
if len(sys.argv) == 2 and sys.argv[1].lower() == 'debug':
DEBUG = True
# Get build_version from installed deluge.
build_version = deluge.common.get_version()
python_path = os.path.dirname(sys.executable)
if python_path.endswith('Scripts'):
python_path = python_path[:-8]
gtk_root = os.path.join(gtk.__path__[0], '..', 'runtime')
build_dir = os.path.join('build-win32', 'deluge-bbfreeze-' + build_version)
if DEBUG:
print('Python Path: %s' % python_path)
print('Gtk Path: %s' % gtk_root)
print('bbfreeze Output Path: %s' % build_dir)
print('Freezing Deluge %s...' % build_version)
# Disable printing to console for bbfreezing.
if not DEBUG:
sys.stdout = open(os.devnull, 'w')
# Include python modules not picked up automatically by bbfreeze.
includes = ('libtorrent', 'cairo', 'pangocairo', 'atk', 'pango', 'twisted.internet.utils',
'gio', 'gzip', 'email.mime.multipart', 'email.mime.text', '_cffi_backend')
excludes = ('numpy', 'OpenGL', 'psyco', 'win32ui', 'unittest')
def recipe_gtk_override(mf):
# Override bbfreeze function so that it includes all gtk libraries
# in the installer so users don't require a separate GTK+ installation.
return True
bbfreeze.recipes.recipe_gtk_and_friends = recipe_gtk_override
# Workaround for "ImportError: The 'packaging' package is required" with setuptools > 18.8.
# (https://github.com/pypa/setuptools/issues/517)
bbfreeze.recipes.recipe_pkg_resources = bbfreeze.recipes.include_whole_package('pkg_resources')
fzr = bbfreeze.Freezer(build_dir, includes=includes, excludes=excludes)
fzr.include_py = False
fzr.setIcon(os.path.join(os.path.dirname(deluge.common.__file__), 'ui', 'data', 'pixmaps', 'deluge.ico'))
# TODO: Can/should we grab the script list from setup.py entry_points somehow.
# Hide cmd console popup for these console entries force gui_script True.
force_gui = ['deluge-web', 'deluged']
for force_script in force_gui:
script_path = os.path.join(python_path, 'Scripts', force_script + '-script.py')
shutil.copy(script_path, script_path.replace('script', 'debug-script'))
script_list = []
for script in glob.glob(os.path.join(python_path, 'Scripts\\deluge*-script.py*')):
# Copy the scripts to remove the '-script' suffix before adding to freezer.
new_script = script.replace('-script', '')
shutil.copy(script, new_script)
gui_script = False
script_splitext = os.path.splitext(os.path.basename(new_script))
if script_splitext[1] == '.pyw' or script_splitext[0] in force_gui:
gui_script = True
try:
fzr.addScript(new_script, gui_only=gui_script)
script_list.append(new_script)
except Exception:
os.remove(script)
# Start the freezing process.
fzr()
# Clean up the duplicated scripts.
for script in script_list:
os.remove(script)
# Exclude files which are already included in GTK or Windows. Also exclude unneeded pygame dlls.
excludeDlls = ('MSIMG32.dll', 'MSVCR90.dll', 'MSVCP90.dll', 'MSVCR120.dll',
'POWRPROF.dll', 'DNSAPI.dll', 'USP10.dll', 'MPR.dll',
'jpeg.dll', 'libfreetype-6.dll', 'libpng12-0.dll', 'libtiff.dll',
'SDL_image.dll', 'SDL_ttf.dll')
for exclude_dll in excludeDlls:
try:
os.remove(os.path.join(build_dir, exclude_dll))
except OSError:
pass
# Re-enable printing.
if not DEBUG:
sys.stdout = sys.__stdout__
# Copy gtk locale files.
gtk_locale = os.path.join(gtk_root, 'share/locale')
locale_include_list = ['gtk20.mo', 'locale.alias']
def ignored_files(adir, ignore_filenames):
return [
ignore_file for ignore_file in ignore_filenames
if not os.path.isdir(os.path.join(adir, ignore_file)) and
ignore_file not in locale_include_list
]
shutil.copytree(gtk_locale, os.path.join(build_dir, 'share/locale'), ignore=ignored_files)
# Copy gtk theme files.
theme_include_list = [
[gtk_root, 'share/icons/hicolor/index.theme'],
[gtk_root, 'lib/gtk-2.0/2.10.0/engines'],
[gtk_root, 'share/themes/MS-Windows'],
['DelugeStart Theme', 'lib/gtk-2.0/2.10.0/engines/libmurrine.dll'],
['DelugeStart Theme', 'share/themes/DelugeStart'],
['DelugeStart Theme', 'etc/gtk-2.0/gtkrc']
]
for path_root, path in theme_include_list:
full_path = os.path.join(path_root, path)
if os.path.isdir(full_path):
shutil.copytree(full_path, os.path.join(build_dir, path))
else:
dst_dir = os.path.join(build_dir, os.path.dirname(path))
try:
os.makedirs(dst_dir)
except OSError:
pass
shutil.copy(full_path, dst_dir)
# Add version information to exe files.
for script in script_list:
script_exe = os.path.splitext(os.path.basename(script))[0] + '.exe'
# Don't add to dev build versions.
if not re.search('[a-zA-Z_-]', build_version):
versionInfo = VersionInfo(build_version,
description='Deluge Bittorrent Client',
company='Deluge Team',
product='Deluge',
_copyright='Deluge Team')
stamp(os.path.join(build_dir, script_exe), versionInfo)
# Copy version info to file for nsis script.
with open('VERSION.tmp', 'w') as ver_file:
ver_file.write('build_version = "%s"' % build_version)
# Create the install and uninstall file list for NSIS.
filedir_list = []
for root, dirnames, filenames in os.walk(build_dir):
dirnames.sort()
filenames.sort()
filedir_list.append((root[len(build_dir):], filenames))
with open('install_files.nsh', 'w') as f:
f.write('; Files to install\n')
for dirname, files in filedir_list:
if not dirname:
dirname = os.sep
f.write('\nSetOutPath "$INSTDIR%s"\n' % dirname)
for filename in files:
f.write('File "${BBFREEZE_DIR}%s"\n' % os.path.join(dirname, filename))
with open('uninstall_files.nsh', 'w') as f:
f.write('; Files to uninstall\n')
for dirname, files in reversed(filedir_list):
f.write('\n')
if not dirname:
dirname = os.sep
for filename in files:
f.write('Delete "$INSTDIR%s"\n' % os.path.join(dirname, filename))
f.write('RMDir "$INSTDIR%s"\n' % dirname)
|
gpl-3.0
| 7,283,705,416,891,160,000 | 34.701835 | 105 | 0.654118 | false | 3.341778 | false | false | false |
zplab/rpc-scope
|
scope/gui/microscope_widget.py
|
1
|
23339
|
# This code is licensed under the MIT License (see LICENSE file for details)
from PyQt5 import Qt
import pkg_resources
from . import device_widget
from . import status_widget
from ..simple_rpc import rpc_client
from .. import util
class MicroscopeWidget(device_widget.DeviceWidget):
PROPERTY_ROOT = 'scope.'
PROPERTIES = [
# tuple contains: property, type, and zero or more args that are passed to
# the 'make_'+type+'widget() function.
# ('stand.active_microscopy_method', 'enum', 'stand.available_microscopy_methods'),
('nosepiece.position', 'objective'),
# ('nosepiece.safe_mode', 'bool'),
# ('nosepiece.immersion_mode', 'bool'),
('il.shutter_open', 'bool'),
('tl.shutter_open', 'bool'),
('il.field_wheel', 'enum', 'il.field_wheel_positions'),
('il.filter_cube', 'enum', 'il.filter_cube_values'),
# The final element of the 'tl.aperature_diaphragm' tuple, 'scope.nosepiece.position', indicates
# that 'tl.aperture_diaphragm_range' may change with 'scope.nosepiece.position'. So,
# 'tl.aperture_diaphragm_range' should be refreshed upon 'scope.nosepiece.position' change.
('tl.aperture_diaphragm', 'int', 'tl.aperture_diaphragm_range', 'nosepiece.position'),
('tl.field_diaphragm', 'int', 'tl.field_diaphragm_range', 'nosepiece.position'),
('tl.condenser_retracted', 'bool'),
('stage.xy_fine_control', 'bool'),
('stage.z_fine_control', 'bool'),
# TODO: use hard max values read from scope, if possible
# TODO: If not possible, verify that hard max values are identical across all scopes
# otherwise make this a config parameter.
('stage.x', 'stage_axis_pos', 225),
('stage.y', 'stage_axis_pos', 76),
('stage.z', 'stage_axis_pos', 26)
]
@classmethod
def can_run(cls, scope):
# We're useful if at least one of our properties can be read. Properties that can not be read
# when the widget is created are not shown in the GUI.
for property, *rest in cls.PROPERTIES:
attr = scope
try:
for name in property.split('.'):
attr = getattr(attr, name)
except:
continue
return True
return False
def __init__(self, scope, parent=None):
super().__init__(scope, parent)
self.limit_pixmaps_and_tooltips = LimitPixmapsAndToolTips()
self.setWindowTitle('Stand')
form = Qt.QFormLayout(self)
form.setContentsMargins(0, 0, 0, 0)
form.setVerticalSpacing(4)
form.setLabelAlignment(Qt.Qt.AlignRight | Qt.Qt.AlignVCenter)
form.setFieldGrowthPolicy(Qt.QFormLayout.ExpandingFieldsGrow)
for property, widget_type, *widget_args in self.PROPERTIES:
self.make_widgets_for_property(self.PROPERTY_ROOT + property, widget_type, widget_args)
if hasattr(scope, 'job_runner'):
form.addRow(status_widget.StatusWidget(scope))
def get_scope_attr(self, property):
"""look up an attribute on the scope object by property name, which is
expected to start with 'scope.' -- e.g. 'scope.stage.z_high_soft_limit'
"""
attr = self.scope
for name in property.split('.')[1:]:
attr = getattr(attr, name)
return attr
def make_widgets_for_property(self, property, widget_type, widget_args):
try:
self.get_scope_attr(property)
except AttributeError:
# The property isn't available for this scope object, so don't
# make a widget for it.
return
layout = self.layout()
label = Qt.QLabel(property[len(self.PROPERTY_ROOT):] + ':') # strip the 'scope.' off
label.setSizePolicy(Qt.QSizePolicy.Expanding, Qt.QSizePolicy.Expanding)
widget = getattr(self, 'make_{}_widget'.format(widget_type))(property, *widget_args)
widget.setSizePolicy(Qt.QSizePolicy.Expanding, Qt.QSizePolicy.Expanding)
layout.addRow(label, widget)
def make_bool_widget(self, property):
widget = Qt.QCheckBox()
update = self.subscribe(property, callback=widget.setChecked)
if update is None:
widget.setEnabled(False)
else:
def gui_changed(value):
try:
update(value)
except rpc_client.RPCError as e:
error = 'Could not set {} ({}).'.format(property, e.args[0])
Qt.QMessageBox.warning(self, 'Invalid Value', error)
widget.toggled.connect(gui_changed)
return widget
def make_int_widget(self, property, range_property, range_depends_on_property):
widget = Qt.QWidget()
layout = Qt.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
widget.setLayout(layout)
slider = Qt.QSlider(Qt.Qt.Horizontal)
slider.setTickInterval(1)
layout.addWidget(slider)
spinbox = Qt.QSpinBox()
layout.addWidget(spinbox)
handling_change = util.Condition() # acts as false, except when in a with-block, where it acts as true
def range_changed(_):
if handling_change:
return
with handling_change:
range = self.get_scope_attr(self.PROPERTY_ROOT + range_property)
slider.setRange(*range)
spinbox.setRange(*range)
self.subscribe(self.PROPERTY_ROOT + range_depends_on_property, callback=range_changed, readonly=True)
def prop_changed(value):
if handling_change:
return
with handling_change:
slider.setValue(value)
spinbox.setValue(value)
update = self.subscribe(property, callback=prop_changed)
if update is None:
spinbox.setEnabled(False)
slider.setEnabled(False)
else:
def gui_changed(value):
if handling_change:
return
with handling_change:
update(value)
# TODO: verify the below doesn't blow up without indexing the
# overloaded valueChanged signal as [int]
slider.valueChanged.connect(gui_changed)
spinbox.valueChanged.connect(gui_changed)
return widget
def make_enum_widget(self, property, choices_property):
widget = Qt.QComboBox()
widget.setEditable(False)
widget.addItems(sorted(self.get_scope_attr(self.PROPERTY_ROOT + choices_property)))
update = self.subscribe(property, callback=widget.setCurrentText)
if update is None:
widget.setEnabled(False)
else:
def gui_changed(value):
try:
update(value)
except rpc_client.RPCError as e:
error = 'Could not set {} ({}).'.format(property, e.args[0])
Qt.QMessageBox.warning(self, 'RPC Exception', error)
widget.currentTextChanged.connect(gui_changed)
return widget
def make_objective_widget(self, property):
widget = Qt.QComboBox()
widget.setEditable(False)
mags = self.get_scope_attr(self.PROPERTY_ROOT + 'nosepiece.all_objectives')
model = _ObjectivesModel(mags, widget.font(), self)
widget.setModel(model)
def prop_changed(value):
widget.setCurrentIndex(value)
update = self.subscribe(property, callback=prop_changed)
if update is None:
widget.setEnabled(False)
else:
def gui_changed(value):
try:
update(value)
except rpc_client.RPCError as e:
error = 'Could not set {} ({}).'.format(property, e.args[0])
Qt.QMessageBox.warning(self, 'RPC Exception', error)
# TODO: verify the below doesn't blow up without indexing the
# overloaded currentIndexChanged signal as [int]
widget.currentIndexChanged.connect(gui_changed)
return widget
def make_stage_axis_pos_widget(self, property, axis_max_val):
widget = Qt.QWidget()
vlayout = Qt.QVBoxLayout()
vlayout.setSpacing(0)
vlayout.setContentsMargins(0, 0, 0, 0)
widget.setLayout(vlayout)
axis_name = property.split('.')[-1]
props = self.scope.properties.properties # dict of tracked properties, updated by property client
# [low limits status indicator] [-------<slider>-------] [high limits status indicator]
slider_layout = Qt.QHBoxLayout()
l, t, r, b = slider_layout.getContentsMargins()
slider_layout.setContentsMargins(l, 0, r, 0)
slider_layout.setSpacing(5)
low_limit_status_label = Qt.QLabel()
# NB: *_limit_status_label pixmaps are set here so that layout does not jump when limit status RPC property updates
# are first received
low_limit_status_label.setPixmap(self.limit_pixmaps_and_tooltips.low_no_limit_pm)
slider_layout.addWidget(low_limit_status_label)
pos_slider_factor = 1e3
pos_slider = Qt.QSlider(Qt.Qt.Horizontal)
pos_slider.setEnabled(False)
pos_slider.setRange(0, pos_slider_factor * axis_max_val)
pos_slider.setValue(0)
slider_layout.addWidget(pos_slider)
high_limit_status_label = Qt.QLabel()
high_limit_status_label.setPixmap(self.limit_pixmaps_and_tooltips.high_no_limit_pm)
slider_layout.addWidget(high_limit_status_label)
vlayout.addLayout(slider_layout)
at_ls_property = self.PROPERTY_ROOT + 'stage.at_{}_low_soft_limit'.format(axis_name)
at_lh_property = self.PROPERTY_ROOT + 'stage.at_{}_low_hard_limit'.format(axis_name)
at_hs_property = self.PROPERTY_ROOT + 'stage.at_{}_high_soft_limit'.format(axis_name)
at_hh_property = self.PROPERTY_ROOT + 'stage.at_{}_high_hard_limit'.format(axis_name)
def at_low_limit_prop_changed(_):
try:
at_s = props[at_ls_property]
at_h = props[at_lh_property]
except KeyError:
return
if at_s and at_h:
pm = self.limit_pixmaps_and_tooltips.low_hard_and_soft_limits_pm
tt = self.limit_pixmaps_and_tooltips.low_hard_and_soft_limits_tt
elif at_s:
pm = self.limit_pixmaps_and_tooltips.low_soft_limit_pm
tt = self.limit_pixmaps_and_tooltips.low_soft_limit_tt
elif at_h:
pm = self.limit_pixmaps_and_tooltips.low_hard_limit_pm
tt = self.limit_pixmaps_and_tooltips.low_hard_limit_tt
else:
pm = self.limit_pixmaps_and_tooltips.low_no_limit_pm
tt = self.limit_pixmaps_and_tooltips.low_no_limit_tt
low_limit_status_label.setPixmap(pm)
low_limit_status_label.setToolTip(tt)
self.subscribe(at_ls_property, at_low_limit_prop_changed, readonly=True)
self.subscribe(at_lh_property, at_low_limit_prop_changed, readonly=True)
def at_high_limit_prop_changed(_):
try:
at_s = props[at_hs_property]
at_h = props[at_hh_property]
except KeyError:
return
if at_s and at_h:
pm = self.limit_pixmaps_and_tooltips.high_hard_and_soft_limits_pm
tt = self.limit_pixmaps_and_tooltips.high_hard_and_soft_limits_tt
elif at_s:
pm = self.limit_pixmaps_and_tooltips.high_soft_limit_pm
tt = self.limit_pixmaps_and_tooltips.high_soft_limit_tt
elif at_h:
pm = self.limit_pixmaps_and_tooltips.high_hard_limit_pm
tt = self.limit_pixmaps_and_tooltips.high_hard_limit_tt
else:
pm = self.limit_pixmaps_and_tooltips.high_no_limit_pm
tt = self.limit_pixmaps_and_tooltips.high_no_limit_tt
high_limit_status_label.setPixmap(pm)
high_limit_status_label.setToolTip(tt)
self.subscribe(at_hs_property, at_high_limit_prop_changed, readonly=True)
self.subscribe(at_hh_property, at_high_limit_prop_changed, readonly=True)
# [stop] [low soft limit text edit] [position text edit] [high soft limit text edit] [reset high soft limit button]
buttons_layout = Qt.QHBoxLayout()
l, t, r, b = buttons_layout.getContentsMargins()
buttons_layout.setSpacing(5)
buttons_layout.setContentsMargins(l, 0, r, 0)
stop_button = Qt.QPushButton(widget.style().standardIcon(Qt.QStyle.SP_BrowserStop), '')
stop_button.setToolTip('Stop movement along {} axis.'.format(axis_name))
stop_button.setEnabled(False)
buttons_layout.addWidget(stop_button)
low_limit_text_widget = FocusLossSignalingLineEdit()
low_limit_text_widget.setMaxLength(8)
low_limit_text_validator = Qt.QDoubleValidator()
low_limit_text_validator.setBottom(0)
low_limit_text_widget.setValidator(low_limit_text_validator)
buttons_layout.addWidget(low_limit_text_widget)
pos_text_widget = FocusLossSignalingLineEdit()
pos_text_widget.setMaxLength(8)
pos_text_validator = Qt.QDoubleValidator()
pos_text_widget.setValidator(pos_text_validator)
buttons_layout.addWidget(pos_text_widget)
high_limit_text_widget = FocusLossSignalingLineEdit()
high_limit_text_widget.setMaxLength(8)
high_limit_text_validator = Qt.QDoubleValidator()
high_limit_text_validator.setTop(axis_max_val)
high_limit_text_widget.setValidator(high_limit_text_validator)
buttons_layout.addWidget(high_limit_text_widget)
reset_limits_button = Qt.QPushButton('Reset limits')
reset_limits_button.setToolTip(
'Reset {} soft min and max to the smallest \n and largest acceptable values, respectively.'.format(axis_name)
)
buttons_layout.addWidget(reset_limits_button)
vlayout.addLayout(buttons_layout)
def moving_along_axis_changed(value):
stop_button.setEnabled(value)
self.subscribe('{}stage.moving_along_{}'.format(self.PROPERTY_ROOT, axis_name), moving_along_axis_changed, readonly=True)
def stop_moving_along_axis():
try:
self.get_scope_attr(self.PROPERTY_ROOT+'stage.stop_{}'.format(axis_name))()
except rpc_client.RPCError as e:
error = 'Could not stop movement along {} axis ({}).'.format(axis_name, e.args[0])
Qt.QMessageBox.warning(self, 'RPC Exception', error)
# TODO: verify the below doesn't blow up without indexing the
# overloaded clicked signal as [bool]
stop_button.clicked.connect(stop_moving_along_axis)
# low limit sub-widget
low_limit_property = self.PROPERTY_ROOT + 'stage.{}_low_soft_limit'.format(axis_name)
handling_low_soft_limit_change = util.Condition() # start out false, except when used as with-block context manager
def low_limit_prop_changed(value):
if handling_low_soft_limit_change:
return
with handling_low_soft_limit_change:
low_limit_text_widget.setText(str(value))
pos_text_validator.setBottom(value)
high_limit_text_validator.setBottom(value)
update_low_limit = self.subscribe(low_limit_property, low_limit_prop_changed)
if update_low_limit is None:
low_limit_text_widget.setEnabled(False)
else:
def submit_low_limit_text():
if handling_low_soft_limit_change:
return
with handling_low_soft_limit_change:
try:
new_low_limit = float(low_limit_text_widget.text())
except ValueError:
return
try:
update_low_limit(new_low_limit)
except rpc_client.RPCError as e:
error = 'Could not set {} axis to {} ({}).'.format(axis_name, new_low_limit, e.args[0])
Qt.QMessageBox.warning(self, 'RPC Exception', error)
low_limit_text_widget.returnPressed.connect(submit_low_limit_text)
def low_limit_text_focus_lost():
low_limit_text_widget.setText(str(props.get(low_limit_property, '')))
low_limit_text_widget.focus_lost.connect(low_limit_text_focus_lost)
# position sub-widget
handling_pos_change = util.Condition()
def position_changed(value):
if handling_pos_change:
return
with handling_pos_change:
pos_text_widget.setText(str(value))
pos_slider.setValue(int(value * pos_slider_factor))
self.subscribe(property, position_changed, readonly=True)
get_pos = getattr(self.scope.stage, '_get_{}'.format(axis_name))
set_pos = getattr(self.scope.stage, '_set_{}'.format(axis_name))
def submit_pos_text():
if handling_pos_change:
return
with handling_pos_change:
try:
new_pos = float(pos_text_widget.text())
except ValueError:
return
if new_pos != get_pos():
try:
set_pos(new_pos, async_='fire_and_forget')
except rpc_client.RPCError as e:
error = 'Could not set {} axis to {} ({}).'.format(axis_name, new_pos, e.args[0])
Qt.QMessageBox.warning(self, 'RPC Exception', error)
pos_text_widget.returnPressed.connect(submit_pos_text)
def pos_text_focus_lost():
pos_text_widget.setText(str(props.get(property, '')))
pos_text_widget.focus_lost.connect(pos_text_focus_lost)
# high limit sub-widget
high_limit_property = self.PROPERTY_ROOT + 'stage.{}_high_soft_limit'.format(axis_name)
handling_high_soft_limit_change = util.Condition()
def high_limit_prop_changed(value):
if handling_high_soft_limit_change:
return
with handling_high_soft_limit_change:
high_limit_text_widget.setText(str(value))
pos_text_validator.setTop(value)
low_limit_text_validator.setTop(value)
update_high_limit = self.subscribe(high_limit_property, high_limit_prop_changed)
if update_high_limit is None:
high_limit_text_widget.setEnabled(False)
else:
def submit_high_limit_text():
if handling_high_soft_limit_change:
return
with handling_high_soft_limit_change:
try:
new_high_limit = float(high_limit_text_widget.text())
except ValueError:
return
try:
update_high_limit(new_high_limit)
except rpc_client.RPCError as e:
error = 'Could not set {} axis to {} ({}).'.format(axis_name, new_high_limit, e.args[0])
Qt.QMessageBox.warning(self, 'RPC Exception', error)
high_limit_text_widget.returnPressed.connect(submit_high_limit_text)
def high_limit_text_focus_lost():
high_limit_text_widget.setText(str(props.get(high_limit_property, '')))
high_limit_text_widget.focus_lost.connect(high_limit_text_focus_lost)
def reset_limits_button_clicked(_):
update_low_limit(0.0)
self.get_scope_attr(self.PROPERTY_ROOT + 'stage.reset_{}_high_soft_limit'.format(axis_name))()
# TODO: verify the below doesn't blow up without indexing the
# overloaded clicked signal as [bool]
reset_limits_button.clicked.connect(reset_limits_button_clicked)
# We do not receive events for z high soft limit changes initiated by means other than assigning
# to scope.stage.z_high_soft_limit or calling scope.stage.reset_z_high_soft_limit(). However,
# the scope's physical interface does not offer any way to modify z high soft limit, with one
# possible exception: it would make sense for the limit to change with objective in order to prevent
# head crashing. In case that happens, we refresh z high soft limit upon objective change.
# TODO: verify that this is never needed and get rid of it if so
if axis_name is 'z':
def objective_changed(_):
if handling_high_soft_limit_change:
return
with handling_high_soft_limit_change:
high_limit_text_widget.setText(str(self.get_scope_attr(self.PROPERTY_ROOT + 'stage.z_high_soft_limit')))
self.subscribe(self.PROPERTY_ROOT + 'nosepiece.position', objective_changed, readonly=True)
return widget
class _ObjectivesModel(Qt.QAbstractListModel):
def __init__(self, mags, font, parent=None):
super().__init__(parent)
self.mags = mags
self.empty_pos_font = Qt.QFont(font)
self.empty_pos_font.setItalic(True)
def rowCount(self, _=None):
return len(self.mags)
def flags(self, midx):
f = Qt.Qt.ItemNeverHasChildren
if midx.isValid():
row = midx.row()
if row > 0:
f |= Qt.Qt.ItemIsEnabled | Qt.Qt.ItemIsSelectable
return f
def data(self, midx, role=Qt.Qt.DisplayRole):
if midx.isValid():
row = midx.row()
mag = self.mags[row]
if role == Qt.Qt.DisplayRole:
r = '{}: {}{}'.format(
row,
'BETWEEN POSITIONS' if row == 0 else mag,
'' if mag is None else '×')
return Qt.QVariant(r)
if role == Qt.Qt.FontRole and mag is None:
return Qt.QVariant(self.empty_pos_font)
return Qt.QVariant()
class LimitPixmapsAndToolTips:
def __init__(self, height=25):
flip = Qt.QTransform()
flip.rotate(180)
for icon in ('no_limit', 'soft_limit', 'hard_limit', 'hard_and_soft_limits'):
fname = pkg_resources.resource_filename(__name__, f'limit_icons/{icon}.svg')
im = Qt.QImage(fname).scaledToHeight(height)
setattr(self, 'low_'+icon+'_pm', Qt.QPixmap.fromImage(im))
setattr(self, 'high_'+icon+'_pm', Qt.QPixmap.fromImage(im.transformed(flip)))
setattr(self, 'low_'+icon+'_tt', icon[0].capitalize() + icon[1:].replace('_', ' ') + ' reached.')
setattr(self, 'high_'+icon+'_tt', icon[0].capitalize() + icon[1:].replace('_', ' ') + ' reached.')
class FocusLossSignalingLineEdit(Qt.QLineEdit):
focus_lost = Qt.pyqtSignal()
def focusOutEvent(self, event):
super().focusOutEvent(event)
self.focus_lost.emit()
def sizeHint(self):
hint = super().sizeHint()
hint.setWidth(self.fontMetrics().width('44.57749') * 1.3)
return hint
|
mit
| -6,965,134,118,459,684,000 | 45.769539 | 129 | 0.597395 | false | 3.87417 | false | false | false |
Antreasgr/Random-Graphs
|
Python/SHET.py
|
1
|
8516
|
import os
# import networkx as nx
import numpy
from numpy.random import RandomState
from clique_tree import *
from nx_converters import *
from randomizer import *
from subtrees import *
from datetime import datetime
from Runners import *
from report_generator import *
from enum import Enum
import yaml
from yaml import Loader, Dumper
# from joblib import Parallel, delayed
# import plotter
"""
Create a random chordal graph
"""
def tree_generation(n_vert, rand):
"""
Creates a random tree on n nodes
and create the adjacency lists for each node
"""
tree = [TreeNode(0)]
for uid in range(0, n_vert - 1):
parent, _ = rand.next_element(tree)
newnode = TreeNode(uid + 1)
# update the adjacency lists
newnode.Ax.append(parent)
parent.Ax.append(newnode)
parent.Dx[newnode] = len(parent.Ax) - 1
newnode.Dx[parent] = len(newnode.Ax) - 1
# update helper, children list, parent pointer
parent.children.append(newnode)
newnode.parent = parent
# append to tree
tree.append(newnode)
return tree
def chordal_generation(run, rand):
"""
Generate a random chordal graph with n vertices, k is the algorithm parameter
"""
k = run["Parameters"]["k"]
n = run["Parameters"]["n"]
version = run["Parameters"]["version"]
if 2 * k - 1 > n:
raise Exception("chordal gen parameter k must be lower than n/2")
print("Begin Run ".center(70, "-"))
print("Parameters: ")
formatstr = ''
listvalues = [str(v) for v in run["Parameters"].values()]
listkeys = list(run["Parameters"].keys())
for ii, par in enumerate(listvalues):
formatstr += '{' + str(ii) + ':>' + str(max(len(par), len(listkeys[ii])) + 1) + '} |'
print(formatstr.format(*listkeys))
print(formatstr.format(*listvalues))
print("Times: ".center(70, "-"))
with Timer("t_real_tree", run["Times"]):
tree = tree_generation(n, rand)
with Timer("t_subtrees_2", run["Times"]):
if version == SHETVersion.ConnectedNodes:
connected_nodes(tree, n, rand)
elif version == SHETVersion.PrunedTree:
fraction = run["Parameters"]["edge_fraction"]
barier = run["Parameters"]["barier"]
for sub_tree_index in range(n):
pruned_tree(tree, n, sub_tree_index, fraction, barier, rand)
else:
for node in tree:
node.s = 0
for subtree_index in range(0, n):
sub_tree_gen(tree, k, subtree_index, rand, version)
# convert to networkx, our main algorithm
with Timer("t_ctree", run["Times"]):
nx_chordal, final_cforest = convert_clique_tree_networkx2(tree, n, True)
run["Graphs"]["tree"] = tree
run["Graphs"]["nx_chordal"] = nx_chordal
run["Graphs"]["final_cforest"] = final_cforest
print("End Run".center(70, "-"))
def post_process(run):
out = run["Output"]
graphs = run["Graphs"]
stats = run["Stats"]
times = run["Times"]
# get number of conected components
# stats["ncc"] = nx.number_connected_components(graphs["nx_chordal"])
# calculate time, and ratios
stats["total"] = times["t_real_tree"] + times["t_subtrees_2"] + times["t_ctree"]
# stats["ratio[total/chordal]"] = stats["total"] / float(times["t_chordal"])
# stats["ratio[total/forest]"] = stats["total"] / float(times["t_forestverify"])
# stats["ratio[total/[chordal+forest]]"] = stats["total"] / float(times["t_forestverify"] + times["t_chordal"])
# get output parameters
out["nodes"] = run["Parameters"]["n"] # len(graphs["nx_chordal"].nodes())
out["edges"] = graphs["nx_chordal"].size() # len(graphs["nx_chordal"].edges())
stats["edge_density"] = float(out["edges"]) / (float(out["nodes"] * (out["nodes"] - 1)) / 2)
temp_forest = cForest(1)
temp_forest.ctree.append(graphs["tree"])
# calculate tree output parameters
out["clique_trees"] = [dfs_forest(graphs["final_cforest"], run["Parameters"]["n"])]
ct_stats = out["clique_trees"][0]
ct_stats.max_clique_edge_distribution = (ct_stats.max_size * (ct_stats.max_size - 1) / 2) / out["edges"]
stats["ncc"] = len(graphs["final_cforest"].ctree)
# convert clique forest to nx for export to json
nx_ctrees = None # [convert_tree_networkx(tree) for tree in graphs["final_cforest"].ctree]
# nx_ctrees.insert(0, convert_tree_networkx(graphs["tree"]))
return nx_ctrees
def run_SHET_PRUNED(list_vertices, list_f_s, num_runs):
shet_data = []
for j, num in enumerate(list_vertices):
for f, s in list_f_s[j]:
Runners = []
for i in range(num_runs):
randomizer = Randomizer(2 * num)
Runners.append(runner_factory(num, NAME, None, k=0, edge_fraction=f, barier=s, version=SHETVersion.PrunedTree))
chordal_generation(Runners[-1], randomizer)
trees1 = post_process(Runners[-1])
Runners[-1]["Stats"]["randoms"] = randomizer.total_count
# cleanup some memory
del Runners[-1]["Graphs"]
print(".....Done")
shet_data.append(merge_runners(Runners))
run_reports_data(NAME, shet_data)
def run_normal_SHET(list_vertices, list_k, num_runs):
shet_data = []
for j, num in enumerate(list_vertices):
for factor in list_k[j]:
Runners = []
par_k = int(num * factor)
par_k = max(1, par_k)
par_k = min(num // 2, par_k)
for i in range(num_runs):
randomizer = Randomizer(2 * num)
Runners.append(runner_factory(num, NAME, None, k=par_k, version=SHETVersion.Dict))
chordal_generation(Runners[-1], randomizer)
trees1 = post_process(Runners[-1])
Runners[-1]["Parameters"]["k/n"] = str(par_k / num)
Runners[-1]["Stats"]["randoms"] = randomizer.total_count
# cleanup some memory
del Runners[-1]["Graphs"]
print(".....Done")
# # RUNNER contains all data and statistics
# filename = "Results/SHET/Run_{}_{}_{}.yml".format(num, par_k, datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
# if not os.path.isdir(os.path.dirname(filename)):
# os.makedirs(os.path.dirname(filename))
# with io.open(filename, 'w') as file:
# print_statistics(Runners, file)
shet_data.append(merge_runners(Runners))
run_reports_data(NAME, shet_data)
def run_SHET_Connected_Nodes(list_vertices, list_lamda, num_runs):
shet_data = []
for j, num in enumerate(list_vertices):
for l in list_lamda[j]:
Runners = []
for i in range(num_runs):
randomizer = Randomizer(2 * num)
Runners.append(runner_factory(num, NAME, None, k=0, lamda=l, version=SHETVersion.ConnectedNodes))
chordal_generation(Runners[-1], randomizer)
trees1 = post_process(Runners[-1])
Runners[-1]["Stats"]["randoms"] = randomizer.total_count
# cleanup some memory
del Runners[-1]["Graphs"]
print(".....Done")
shet_data.append(merge_runners(Runners))
run_reports_data(NAME, shet_data)
NAME = "SHET_CNODES"
if __name__ == '__main__':
NUM_VERTICES = [50, 100, 500, 1000, 2500, 5000, 10000]
PAR_K_FACTOR = [
[0.03, 0.1, 0.2, 0.32, 0.49], # 50
[0.04, 0.1, 0.22, 0.33, 0.49], # 100
[0.02, 0.05, 0.08, 0.2, 0.40], # 500
[0.02, 0.05, 0.08, 0.18, 0.33], # 1000
[0.01, 0.04, 0.07, 0.13, 0.36], # 2500
[0.01, 0.04, 0.07, 0.1, 0.36], # 5000
[0.009, 0.03, 0.06, 0.09, 0.33] # 10000
]
PAR_F_S_PRUNED = [
[(0.7, 0.6), (0.14, 0.85), (0.1, 0.93)], # 50
[(0.7, 0.6), (0.14, 0.85), (0.1, 0.93)], # 100
[(0.7, 0.6), (0.14, 0.85), (0.1, 0.93)], # 500
[(0.7, 0.6), (0.14, 0.85), (0.1, 0.93)], # 1000
[(0.7, 0.7), (0.12, 0.9), (0.077, 0.95)], # 2500
[(0.700, 0.75), (0.080, 0.91), (0.045, 0.96)], # 5000
[(0.70, 0.81), (0.060, 0.93), (0.031, 0.96)] # 10000
]
PAR_L = [[0], [0], [0], [0], [0], [0], [0]]
# run_SHET_PRUNED(NUM_VERTICES, PAR_F_S_PRUNED, 3)
# run_normal_SHET(num_runs, PAR_K_FACTOR, 10)
run_SHET_Connected_Nodes(NUM_VERTICES, PAR_L, 5)
|
mit
| 1,985,836,268,281,842,400 | 34.781513 | 127 | 0.564937 | false | 3.184742 | false | false | false |
lmiphay/gentoo-oam
|
oam/eventparser/scanner.py
|
1
|
1799
|
#!/usr/bin/python
from __future__ import print_function
import sys
import os
import subprocess
import logging
import glob
import collections
import unittest
import re
class Scanner:
def __init__(self, report, checker):
self.report = report
self.checker = checker
self.logger = logging.getLogger("oam.eventparser.scanner")
def parse(self):
for chk in self.checker:
self.in_block = False
for line, i in self.report.scan():
match = re.search(chk.RECORD, line)
self.logger.log(logging.INFO, "line: %d, %s", i, line)
if match:
self.logger.log(logging.INFO, "parse-match: %s", str(match.groups()))
self.groups = match.groups()
if self.groups[0]:
self.in_block = True
elif self.groups[-1]:
self.in_block = False
elif self.in_block:
ev = chk.process(line, match)
if ev: yield ev
if self.in_block:
self.report.consume(i, chk.TAG)
if chk.ev != None: yield chk.ev
class ScannerTestCase(unittest.TestCase):
def setUp(self):
self.logger = logging.getLogger("oam.eventparser.scanner.test")
def test_scanner(self):
pass
if __name__ == '__main__':
if len(sys.argv)==1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s')
unittest.main()
else:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
sys.exit(EventParser(sys.argv[1]).run())
|
gpl-2.0
| 8,609,256,467,794,436,000 | 30.017241 | 89 | 0.526959 | false | 4.223005 | true | false | false |
cdeil/gammalib
|
inst/cta/test/test_irf_offset.py
|
1
|
5601
|
#! /usr/bin/env python
# ===========================================================================================#
# This script tests the offset angle dependence of the instrumental response function.
#
# ===========================================================================================#
from gammalib import *
# ====================== #
# Set point source model #
# ====================== #
def ptsrc_model(ra=0.0, dec=0.0):
"""
Set shell model.
"""
# Set shell centre
pos = GSkyDir()
pos.radec_deg(ra, dec)
# Set spatial model
spatial = GModelSpatialPtsrc(pos)
# Set spectral model
spectral = GModelSpectralPlaw(1.0, -2.0)
# Set sky model
model = GModelPointSource(spatial, spectral)
# Return model
return model
# =============== #
# Set shell model #
# =============== #
def shell_model(ra=0.3, dec=0.3, radius=0.3, width=0.1):
"""
Set shell model.
"""
# Set shell centre
center = GSkyDir()
center.radec_deg(ra, dec)
# Set radial model
radial = GModelRadialShell(center, radius, width, False)
# Set spectral model
spectral = GModelSpectralPlaw(1.0, -2.0)
# Set sky model
model = GModelExtendedSource(radial, spectral)
# Return model
return model
# =============== #
# Set disk model #
# =============== #
def disk_model(ra=359.6, dec=-0.2, radius=0.4):
"""
Set disk model.
"""
# Set disk centre
center = GSkyDir()
center.radec_deg(ra, dec)
# Set radial model
radial = GModelRadialDisk(center, radius)
# Set spectral model
spectral = GModelSpectralPlaw(1.0, -2.0)
# Set sky model
model = GModelExtendedSource(radial, spectral)
# Return model
return model
# ================== #
# Set Gaussian model #
# ================== #
def gauss_model(ra=359.6, dec=+0.1, sigma=0.2):
"""
Set Gaussian model.
"""
# Set Gaussian centre
center = GSkyDir()
center.radec_deg(ra, dec)
# Set radial model
radial = GModelRadialGauss(center, sigma)
# Set spectral model
spectral = GModelSpectralPlaw(1.0, -2.0)
# Set sky model
model = GModelExtendedSource(radial, spectral)
# Return model
return model
# ========================== #
# Set binned CTA observation #
# ========================== #
def observation(ra=0.0, dec=0.0, binsz=0.05, npix=200, ebins=10):
"""
Set binned CTA observation.
"""
# Allocate observation
obs = GCTAObservation()
# Set response
obs.response("kb_E_50h_v3", "../caldb")
# Set pointing
dir = GSkyDir()
pnt = GCTAPointing()
dir.radec_deg(ra, dec)
pnt.dir(dir)
obs.pointing(pnt)
# Set
ebounds = GEbounds()
emin = GEnergy()
emax = GEnergy()
emin.TeV(0.1)
emax.TeV(100.0)
ebounds.setlog(emin, emax, ebins)
gti = GGti()
tmin = GTime()
tmax = GTime()
tmin.met(0.0)
tmax.met(1800.0)
gti.append(tmin, tmax)
map = GSkymap("CAR", "CEL", ra, dec, -binsz, binsz, npix, npix, ebins)
cube = GCTAEventCube(map, ebounds, gti)
obs.events(cube)
# Optionally show observation
# print obs
# Return observation
return obs
# ================ #
# Create model map #
# ================ #
def modmap(obs, models, phi=0, theta=0, filename="modmap.fits"):
"""
Create model map.
"""
# Loop over all bins
for bin in obs.events():
# Cast to CTA bin
bin = cast_GCTAEventBin(bin)
# Set bin energy and time as source energy and time (no dispersion)
srcDir = bin.dir()
srcEng = bin.energy()
srcTime = bin.time()
# Compute IRF
irf = 0.0
for model in models:
irf += obs.response().irf(bin, model, srcEng, srcTime, obs) * bin.size()
# Set bin
bin.counts(irf)
# Save observation
obs.save(filename, True)
# Return
return
#==========================#
# Main routine entry point #
#==========================#
if __name__ == '__main__':
"""
Test offset angle dependence of IRF.
"""
# Dump header
print
print "***************************************"
print "* Test offset angle dependence of IRF *"
print "***************************************"
# Set set
set = 2
# Set CTA observation
obs = observation()
print obs
# Set offset angle range
# offsets = [0.0, 1.0, 2.0, 3.0]
offsets = [0.0]
# Loop over offset angles
for offset in offsets:
# Set models
if set == 1:
model1 = ptsrc_model(ra=0.0, dec=offset)
model2 = ptsrc_model(ra=1.0, dec=0.0)
model3 = ptsrc_model(ra=2.0, dec=0.0)
model4 = ptsrc_model(ra=3.0, dec=0.0)
model5 = ptsrc_model(ra=4.0, dec=0.0)
models = [model1, model2, model3, model4, model5]
elif set == 2:
model1 = disk_model(ra=0.0, dec=offset)
model2 = disk_model(ra=1.0, dec=0.0)
model3 = disk_model(ra=2.0, dec=0.0)
model4 = disk_model(ra=3.0, dec=0.0)
model5 = disk_model(ra=4.0, dec=0.0)
models = [model1, model2, model3, model4, model5]
# model = shell_model(ra=0.0, dec=offset)
# model = disk_model(ra=0.0, dec=offset)
# model = gauss_model(ra=0.0, dec=offset)
# Print model
# print model
# Set filename
filename = "modmap_theta%2.2d.fits" % (int(offset * 10.0))
# Create model map
modmap(obs, models, phi=0.0, theta=0.0, filename=filename)
|
gpl-3.0
| 4,921,560,824,327,337,000 | 22.834043 | 94 | 0.518657 | false | 3.296645 | false | false | false |
Ehnonymoose/delver
|
server/delver.py
|
1
|
2586
|
from flask import Flask, request, jsonify, send_from_directory, make_response
from functools import wraps, update_wrapper
from datetime import datetime
from sqlalchemy import and_, or_
from sqlalchemy.sql import select
app = Flask(__name__)
from database import db_session
import query
import models
import json
NUM_RESULTS_PER_QUERY = 15
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
from models import Card, CardPrinting, Set
def serializeCard(card, printing, cardSet):
data = {
'name': card.name,
'layout': card.layout,
'related': [x.name for x in card.related],
'mana': card.manaCost,
'cmc': card.cmc,
'types': card.types,
'rules': card.rules
}
if card.power is not None:
data['power'] = card.power
if card.toughness is not None:
data['toughness'] = card.toughness
if card.loyalty is not None:
data['loyalty'] = card.loyalty
if printing.flavor is not None:
data['flavor'] = printing.flavor
if printing.rarity is not None:
data['rarity'] = printing.rarity
return data
@app.route("/query")
def handleQuery():
tokens = query.parse(request.args.get('q', ''))
print(tokens)
start = request.args.get('start', 0)
try:
start = int(start)
except:
start = 0
print (start)
clauses = query.generateClauses(tokens)
statement = and_(*clauses)
sql = db_session.query(models.Card, models.CardPrinting, models.Set)\
.join(models.CardPrinting).join(models.Set)\
.filter(statement)\
.group_by(models.Card.id).order_by(models.Card.name)
# Get a count of all results
count = sql.count()
# Now get a selection of results
if start > 0:
sql = sql.offset(start)
sql = sql.limit(NUM_RESULTS_PER_QUERY)
print(sql)
results = sql.all()
serializedResults = [ serializeCard(*result) for result in results ]
results = {
'count': count,
'start': start,
'cards': serializedResults
}
return json.dumps(results)
def nocache(view):
@wraps(view)
def no_cache(*args, **kwargs):
response = make_response(view(*args, **kwargs))
response.headers['Last-Modified'] = datetime.now()
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
return update_wrapper(no_cache, view)
@app.route('/', defaults={'path':'index.html'})
@app.route('/<path:path>')
@nocache
def main(path):
return send_from_directory('public', path)
if __name__ == "__main__":
app.run()
|
mit
| 8,030,225,006,713,413,000 | 20.371901 | 119 | 0.678268 | false | 3.093301 | false | false | false |
hlt-bme-hu/eval-embed
|
translate.py
|
1
|
5706
|
import sys
import numpy
from collections import defaultdict
from itertools import chain
import argparse
def renormalize(M):
M /= numpy.linalg.norm(M, axis=1)[:, None]
return
def renormalize_vector(v):
return v / numpy.linalg.norm(v)
def outer(l1, l2):
return list(chain(*[[(x,y) for x in l1] for y in l2]))
def read_embed(file, word_list):
n, dim = map(int, file.readline().strip().split())
W = []
V = defaultdict(list)
i2w = {}
i = 0
multi = False
for line in file:
parts = line.strip().split()
if len(word_list) == 0 or parts[0] in word_list:
W.append(map(float, parts[1:]))
V[parts[0]].append(i)
i2w[i] = parts[0]
if not multi and len(V[parts[0]]) > 1:
multi = True
i += 1
return numpy.array(W), dict(V), i2w, multi
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("type", type=str, choices=["train", "test"])
parser.add_argument("embed1")
parser.add_argument("embed2")
parser.add_argument("seed_dict")
parser.add_argument("--train-mode", dest="train_mode", default="single",
choices=["single", "first", "all"])
parser.add_argument("-n", dest="n", default=5, type=int,
help="number of examples shown")
parser.add_argument("--verbose", help="writes translation examples to stderr",
action="store_true")
parser.add_argument("--fit", dest="fit", type=str, default="lin",
help="seeks for linear or orthogonal transformation",
choices=['lin', 'ortho'])
parser.add_argument("--normalize", default=False, action="store_true",
help="normalizes embedding before fitting the translation matrix")
args = parser.parse_args()
seed_list = [tuple(line.strip().split()) for line in open(args.seed_dict, "r")]
if args.type == "train":
lang1_words = [pair[0] for pair in seed_list]
lang2_words = [pair[1] for pair in seed_list]
else:
if args.verbose:
lang1_words = []
lang2_words = []
else:
lang1_words = [pair[0] for pair in seed_list]
lang2_words = []
W1, V1, i2w1, multi1 = read_embed(open(args.embed1), lang1_words)
W2, V2, i2w2, multi2 = read_embed(open(args.embed2), lang2_words)
if args.type == "train":
M1 = numpy.zeros((0, W1.shape[1]))
M2 = numpy.zeros((0, W2.shape[1]))
if args.train_mode == "single":
if multi1 or multi2:
print >>sys.stderr, "Not a single prototype embedding!"
exit(1)
train_pairs = [(V1[s], V2[t]) for s, t in seed_list if s in V1 and t in V2]
if args.train_mode == "first":
train_pairs = [(p1[0], p2[0]) for p1, p2 in train_pairs]
else:
train_pairs = list(chain(*[outer(p1, p2) for p1, p2 in train_pairs]))
lang1_indices, lang2_indices = zip(*train_pairs)
M1 = W1[lang1_indices, :]
M2 = W2[lang2_indices, :]
if args.normalize:
renormalize(M1)
renormalize(M2)
if args.fit == "lin":
T = numpy.linalg.lstsq(M1, M2)[0]
else:
M=M1.transpose().dot(M2)
U, s, V = numpy.linalg.svd(M, full_matrices=True)
T=U.dot(V)
numpy.savetxt(sys.stdout, T)
else:
T = numpy.loadtxt(sys.stdin)
renormalize(W2)
seed_dict = defaultdict(set)
for source, target in seed_list:
seed_dict[source].add(target)
seed_dict = dict(seed_dict)
for source, targets in seed_dict.iteritems():
weak_hit = W2.shape[0]
weak_answers = list(chain(*[V2[t] for t in targets if t in V2]))
strong_hits = [W2.shape[0]] * len(targets)
strong_answers = [V2[t] if t in V2 else [] for t in targets]
if source in V1:
for s in V1[source]:
translated = renormalize_vector(W1[s].dot(T))
scores = W2.dot(translated)
indices = numpy.argsort(scores)[::-1]
if args.verbose:
closest = (numpy.argsort(W1.dot(W1[s]))[::-1])[:args.n]
for c in closest:
print >>sys.stderr, i2w1[c],
print >>sys.stderr, "->",
for t in indices[:args.n]:
print >>sys.stderr, i2w2[t],
print >>sys.stderr, "|",
for a in targets:
print >>sys.stderr, a,
print >>sys.stderr
if len(weak_answers) > 0:
this_weak_hit = min(list(indices).index(t) for t in weak_answers)
if this_weak_hit < weak_hit:
weak_hit = this_weak_hit
for j in range(len(targets)):
if len(strong_answers[j]) > 0:
this_strong_hit = min(list(indices).index(t) for t in strong_answers[j])
if this_strong_hit < strong_hits[j]:
strong_hits[j] = this_strong_hit
for strong_hit, target in zip(*[strong_hits, targets]):
print weak_hit + 1, strong_hit + 1, source, target
|
lgpl-3.0
| 7,749,965,516,054,434,000 | 38.625 | 100 | 0.502454 | false | 3.739187 | false | false | false |
wilg64/MarkovTweet
|
markovtweet.py
|
1
|
5870
|
import tweepy
import json
import re
import time
import random
def create_api(config_filename):
"""
Creates an authorized tweepy API object given a config file containing
appropriate twitter application keys
:param config_filename: string containing the config filename
:return: the tweepy API object associated with the authorized twitter
application
"""
with open(config_filename) as api_keys:
keys = json.load(api_keys)['twitter']
api_key = keys['API Key']
secret_key = keys['API Secret']
access_tok = keys['Access Token']
access_tok_sec = keys['Access Token Secret']
auth = tweepy.OAuthHandler(api_key,secret_key)
auth.set_access_token(access_tok, access_tok_sec)
api = tweepy.API(auth)
return api
def limit_handled(cursor):
"""
Function to handle api call limits. When limit is reached, the function
will wait 15 minutes before iterating. From Tweepy website
:param cursor:
:return:
"""
while True:
try:
yield cursor.next()
except tweepy.RateLimitError:
time.sleep(15 * 60)
def tokenize(tweet):
"""
Uses regular expressions to tokenize tweets
:param tweet: the text of a given tweet
:return: the tokenization of that tweet as a list
"""
emoticons_str = r"""
(?:
[:=;] #
[oO\-]?
[D\)\]\(\]/\\OpP]
)"""
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'(?:@[\w_]+)', # @-mentions
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags
r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers
r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and '
r'(?:[\w_]+)', # other words
r'(?:\S)' # anything else
]
tokens_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)
return tokens_re.findall(tweet)
class Markov_Chain:
def __init__(self):
self.mc = {}
class Probability_Distribution:
def __init__(self):
self.dist = {}
self.total = 0
def pick(self):
"""
Randomly returns a random token given the current distribution
:return: a random token from the distribution
"""
randnum = random.randrange(self.total)
currDex = 0
for token in self.dist:
currCnt = self.dist[token]
if randnum < currCnt + currDex:
return token
currDex += currCnt
def update(self, token):
"""
Increment the probability of encountering a certain token
:param token: a string containing the token
"""
if token in self.dist:
self.dist[token] += 1
else:
self.dist[token] = 1
self.total += 1
def update_markov_chain(self, tokens):
"""
Updates the markov structure with a new tokenized tweet
:param tokens: list of strings from tokenized tweet
"""
for i in range(1,len(tokens)):
if tokens[i-1] in self.mc:
self.mc[tokens[i-1]].update(tokens[i])
else:
self.mc[tokens[i-1]] = self.Probability_Distribution()
self.mc[tokens[i-1]].update(tokens[i])
#need to account for final token
if i == len(tokens) - 1:
if tokens[i] in self.mc:
self.mc[tokens[i]].update('END_OF_TWEET')
else:
self.mc[tokens[i]] = self.Probability_Distribution()
self.mc[tokens[i]].update('END_OF_TWEET')
def train_on_tweets(self, api, ids, limit = -1):
"""
Trains the given markov chain on the given twitter handles
:param api: the authorized tweepy api object
:param ids: list of ids you'd like to train on
:param limit: limits the number of tweets, default no limit
:return:
"""
for user in ids:
if (limit > 0):
for tweet in limit_handled(tweepy.Cursor(api.user_timeline, id = user).items(limit)):
self.update_markov_chain(tokenize(tweet.text))
else:
for tweet in limit_handled(tweepy.Cursor(api.user_timeline, id = user).items()):
self.update_markov_chain(tokenize(tweet.text))
def save_markov_chain(self, filename):
"""
Serializes a markov chain into a JSON file
:param filename: string containing path
"""
with open(filename, 'w') as outfile:
json.dumps(self.mc)
def load_markov_chain(self, filename):
"""
Loads a previously trained markov chain from a json file
:param filename: string containing path
"""
with open(filename) as infile:
self.mc = json.load(infile)
def generate_next_token(self, token):
"""
Given a token, produces a likely next token
:param token:
:return:
"""
return self.mc[token].pick()
def generate_tweet(self, seed):
"""
Takes an intial word then generates a tweet string
:param seed: the initial word
:return: string containing generated tweet
"""
tweet = seed
while len(tweet) < 140:
try:
next = self.generate_next_token(seed)
if next == "END_OF_TWEET":
break
tweet += " " + next
seed = next
except KeyError:
print "Seed not present in the Markov Chain"
return ""
return tweet
|
mit
| 7,790,373,430,743,105,000 | 27.5 | 101 | 0.529642 | false | 4.031593 | false | false | false |
henryiii/rootpy
|
rootpy/plotting/utils.py
|
1
|
14257
|
# Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
from math import log
import operator
import ROOT
from .canvas import _PadBase
from .hist import _Hist, Hist, HistStack
from .graph import _Graph1DBase, Graph
from ..context import preserve_current_canvas, do_nothing
from ..extern.six.moves import range
__all__ = [
'draw',
'get_limits',
'get_band',
'canvases_with',
'find_all_primitives',
'tick_length_pixels',
]
def draw(plottables, pad=None, same=False,
xaxis=None, yaxis=None,
xtitle=None, ytitle=None,
xlimits=None, ylimits=None,
xdivisions=None, ydivisions=None,
logx=False, logy=False,
**kwargs):
"""
Draw a list of histograms, stacks, and/or graphs.
Parameters
----------
plottables : Hist, Graph, HistStack, or list of such objects
List of objects to draw.
pad : Pad or Canvas, optional (default=None)
The pad to draw onto. If None then use the current global pad.
same : bool, optional (default=False)
If True then use 'SAME' draw option for all objects instead of
all but the first. Use this option if you are drawing onto a pad
that already holds drawn objects.
xaxis : TAxis, optional (default=None)
Use this x-axis or use the x-axis of the first plottable if None.
yaxis : TAxis, optional (default=None)
Use this y-axis or use the y-axis of the first plottable if None.
xtitle : str, optional (default=None)
Set the x-axis title.
ytitle : str, optional (default=None)
Set the y-axis title.
xlimits : tuple, optional (default=None)
Set the x-axis limits with a 2-tuple of (min, max)
ylimits : tuple, optional (default=None)
Set the y-axis limits with a 2-tuple of (min, max)
xdivisions : int, optional (default=None)
Set the number of divisions for the x-axis
ydivisions : int, optional (default=None)
Set the number of divisions for the y-axis
logx : bool, optional (default=False)
If True, then set the x-axis to log scale.
logy : bool, optional (default=False)
If True, then set the y-axis to log scale.
kwargs : dict
All extra arguments are passed to get_limits when determining the axis
limits.
Returns
-------
(xaxis, yaxis), (xmin, xmax, ymin, ymax) : tuple
The axes and axes bounds.
See Also
--------
get_limits
"""
context = preserve_current_canvas if pad else do_nothing
if not isinstance(plottables, (tuple, list)):
plottables = [plottables]
elif not plottables:
raise ValueError("plottables is empty")
with context():
if pad is not None:
pad.cd()
# get the axes limits
xmin, xmax, ymin, ymax = get_limits(plottables,
logx=logx, logy=logy,
**kwargs)
if xlimits is not None:
xmin, xmax = xlimits
if ylimits is not None:
ymin, ymax = ylimits
if not same:
obj = plottables.pop(0)
if isinstance(obj, ROOT.THStack):
obj.SetMinimum(ymin)
obj.SetMaximum(ymax)
obj.Draw()
xaxis = obj.xaxis
yaxis = obj.yaxis
# draw the plottables
for i, obj in enumerate(plottables):
if i == 0 and isinstance(obj, ROOT.THStack):
# use SetMin/Max for y-axis
obj.SetMinimum(ymin)
obj.SetMaximum(ymax)
# ROOT: please fix this...
obj.Draw('SAME')
# set the axes limits and titles
if xaxis is not None:
xaxis.SetLimits(xmin, xmax)
xaxis.SetRangeUser(xmin, xmax)
if xtitle is not None:
xaxis.SetTitle(xtitle)
if xdivisions is not None:
xaxis.SetNdivisions(xdivisions)
if yaxis is not None:
yaxis.SetLimits(ymin, ymax)
yaxis.SetRangeUser(ymin, ymax)
if ytitle is not None:
yaxis.SetTitle(ytitle)
if ydivisions is not None:
yaxis.SetNdivisions(ydivisions)
if pad is None:
pad = ROOT.gPad.func()
pad.SetLogx(bool(logx))
pad.SetLogy(bool(logy))
# redraw axes on top
# axes ticks sometimes get hidden by filled histograms
pad.RedrawAxis()
return (xaxis, yaxis), (xmin, xmax, ymin, ymax)
multiadd = lambda a, b: map(operator.add, a, b)
multisub = lambda a, b: map(operator.sub, a, b)
def _limits_helper(x1, x2, a, b, snap=False):
"""
Given x1, x2, a, b, where:
x1 - x0 x3 - x2
a = ------- , b = -------
x3 - x0 x3 - x0
determine the points x0 and x3:
x0 x1 x2 x3
|----------|-----------------|--------|
"""
if x2 < x1:
raise ValueError("x2 < x1")
if a + b >= 1:
raise ValueError("a + b >= 1")
if a < 0:
raise ValueError("a < 0")
if b < 0:
raise ValueError("b < 0")
if snap:
if x1 >= 0:
x1 = 0
a = 0
elif x2 <= 0:
x2 = 0
b = 0
if x1 == x2 == 0:
# garbage in garbage out
return 0., 1.
elif x1 == x2:
# garbage in garbage out
return x1 - 1., x1 + 1.
if a == 0 and b == 0:
return x1, x2
elif a == 0:
return x1, (x2 - b * x1) / (1 - b)
elif b == 0:
return (x1 - a * x2) / (1 - a), x2
x0 = ((b / a) * x1 + x2 - (x2 - x1) / (1 - a - b)) / (1 + b / a)
x3 = (x2 - x1) / (1 - a - b) + x0
return x0, x3
def get_limits(plottables,
xpadding=0,
ypadding=0.1,
xerror_in_padding=True,
yerror_in_padding=True,
snap=True,
logx=False,
logy=False,
logx_crop_value=1E-5,
logy_crop_value=1E-5,
logx_base=10,
logy_base=10):
"""
Get the axes limits that should be used for a 1D histogram, graph, or stack
of histograms.
Parameters
----------
plottables : Hist, Graph, HistStack, or list of such objects
The object(s) for which visually pleasing plot boundaries are
requested.
xpadding : float or 2-tuple, optional (default=0)
The horizontal padding as a fraction of the final plot width.
ypadding : float or 2-tuple, optional (default=0.1)
The vertical padding as a fraction of the final plot height.
xerror_in_padding : bool, optional (default=True)
If False then exclude the x error bars from the calculation of the plot
width.
yerror_in_padding : bool, optional (default=True)
If False then exclude the y error bars from the calculation of the plot
height.
snap : bool, optional (default=True)
Make the minimum or maximum of the vertical range the x-axis depending
on if the plot maximum and minimum are above or below the x-axis. If
the plot maximum is above the x-axis while the minimum is below the
x-axis, then this option will have no effect.
logx : bool, optional (default=False)
If True, then the x-axis is log scale.
logy : bool, optional (default=False)
If True, then the y-axis is log scale.
logx_crop_value : float, optional (default=1E-5)
If an x-axis is using a logarithmic scale then crop all non-positive
values with this value.
logy_crop_value : float, optional (default=1E-5)
If the y-axis is using a logarithmic scale then crop all non-positive
values with this value.
logx_base : float, optional (default=10)
The base used for the logarithmic scale of the x-axis.
logy_base : float, optional (default=10)
The base used for the logarithmic scale of the y-axis.
Returns
-------
xmin, xmax, ymin, ymax : tuple of plot boundaries
The computed x and y-axis ranges.
"""
try:
import numpy as np
use_numpy = True
except ImportError:
use_numpy = False
if not isinstance(plottables, (list, tuple)):
plottables = [plottables]
xmin = float('+inf')
xmax = float('-inf')
ymin = float('+inf')
ymax = float('-inf')
for h in plottables:
if isinstance(h, HistStack):
h = h.sum
if not isinstance(h, (_Hist, _Graph1DBase)):
raise TypeError(
"unable to determine plot axes ranges "
"from object of type `{0}`".format(
type(h)))
if use_numpy:
y_array_min = y_array_max = np.array(list(h.y()))
if yerror_in_padding:
y_array_min = y_array_min - np.array(list(h.yerrl()))
y_array_max = y_array_max + np.array(list(h.yerrh()))
_ymin = y_array_min.min()
_ymax = y_array_max.max()
else:
y_array_min = y_array_max = list(h.y())
if yerror_in_padding:
y_array_min = multisub(y_array_min, list(h.yerrl()))
y_array_max = multiadd(y_array_max, list(h.yerrh()))
_ymin = min(y_array_min)
_ymax = max(y_array_max)
if isinstance(h, _Graph1DBase):
if use_numpy:
x_array_min = x_array_max = np.array(list(h.x()))
if xerror_in_padding:
x_array_min = x_array_min - np.array(list(h.xerrl()))
x_array_max = x_array_max + np.array(list(h.xerrh()))
_xmin = x_array_min.min()
_xmax = x_array_max.max()
else:
x_array_min = x_array_max = list(h.x())
if xerror_in_padding:
x_array_min = multisub(x_array_min, list(h.xerrl()))
x_array_max = multiadd(x_array_max, list(h.xerrh()))
_xmin = min(x_array_min)
_xmax = max(x_array_max)
else:
_xmin = h.xedgesl(1)
_xmax = h.xedgesh(h.nbins(0))
if logy:
_ymin = max(logy_crop_value, _ymin)
_ymax = max(logy_crop_value, _ymax)
if logx:
_xmin = max(logx_crop_value, _xmin)
_xmax = max(logx_crop_value, _xmax)
if _xmin < xmin:
xmin = _xmin
if _xmax > xmax:
xmax = _xmax
if _ymin < ymin:
ymin = _ymin
if _ymax > ymax:
ymax = _ymax
if isinstance(xpadding, (list, tuple)):
if len(xpadding) != 2:
raise ValueError("xpadding must be of length 2")
xpadding_left = xpadding[0]
xpadding_right = xpadding[1]
else:
xpadding_left = xpadding_right = xpadding
if isinstance(ypadding, (list, tuple)):
if len(ypadding) != 2:
raise ValueError("ypadding must be of length 2")
ypadding_top = ypadding[0]
ypadding_bottom = ypadding[1]
else:
ypadding_top = ypadding_bottom = ypadding
if logx:
x0, x3 = _limits_helper(
log(xmin, logx_base), log(xmax, logx_base),
xpadding_left, xpadding_right)
xmin = logx_base ** x0
xmax = logx_base ** x3
else:
xmin, xmax = _limits_helper(
xmin, xmax, xpadding_left, xpadding_right)
if logy:
y0, y3 = _limits_helper(
log(ymin, logy_base), log(ymax, logy_base),
ypadding_bottom, ypadding_top, snap=False)
ymin = logy_base ** y0
ymax = logy_base ** y3
else:
ymin, ymax = _limits_helper(
ymin, ymax, ypadding_bottom, ypadding_top, snap=snap)
return xmin, xmax, ymin, ymax
def get_band(low_hist, high_hist, middle_hist=None):
"""
Convert the low and high histograms into a TGraphAsymmErrors centered at
the middle histogram if not None otherwise the middle between the low and
high points, to be used to draw a (possibly asymmetric) error band.
"""
npoints = low_hist.nbins(0)
band = Graph(npoints)
for i in range(npoints):
center = low_hist.x(i + 1)
width = low_hist.xwidth(i + 1)
low, high = low_hist.y(i + 1), high_hist.y(i + 1)
if middle_hist is not None:
middle = middle_hist.y(i + 1)
else:
middle = (low + high) / 2.
yerrh = max(high - middle, low - middle, 0)
yerrl = abs(min(high - middle, low - middle, 0))
band.SetPoint(i, center, middle)
band.SetPointError(i, width / 2., width / 2.,
yerrl, yerrh)
return band
def canvases_with(drawable):
"""
Return a list of all canvases where `drawable` has been painted.
Note: This function is inefficient because it inspects all objects on all
canvases, recursively. Avoid calling it if you have a large number of
canvases and primitives.
"""
return [c for c in ROOT.gROOT.GetListOfCanvases()
if drawable in find_all_primitives(c)]
def find_all_primitives(pad):
"""
Recursively find all primities on a pad, even those hiding behind a
GetListOfFunctions() of a primitive
"""
result = []
for primitive in pad.GetListOfPrimitives():
result.append(primitive)
if hasattr(primitive, "GetListOfFunctions"):
result.extend(primitive.GetListOfFunctions())
if hasattr(primitive, "GetHistogram"):
p = primitive.GetHistogram()
if p:
result.append(p)
if isinstance(primitive, ROOT.TPad):
result.extend(find_all_primitives(primitive))
return result
def tick_length_pixels(pad, xaxis, yaxis, xlength, ylength=None):
"""
Set the axes tick lengths in pixels
"""
if ylength is None:
ylength = xlength
xaxis.SetTickLength(xlength / float(pad.height_pixels))
yaxis.SetTickLength(ylength / float(pad.width_pixels))
|
gpl-3.0
| 2,577,957,560,046,515,000 | 30.966368 | 79 | 0.556779 | false | 3.651895 | false | false | false |
q3k/kasownik
|
webapp/models.py
|
1
|
13156
|
#!/usr/bin/env python2
# - * - coding=utf-8 - * -
# Copyright (c) 2015, Sergiusz Bazanski <[email protected]>
# Copyright (c) 2015, Remigiusz Marcinkiewicz <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import datetime
import enum
import json
import re
from sqlalchemy.orm import subqueryload_all
from flask import g
from webapp import app, db, mc, cache_enabled
import directory
class APIKey(db.Model):
id = db.Column(db.Integer, primary_key=True)
secret = db.Column(db.String(64))
member = db.Column(db.Integer, db.ForeignKey("member.id"))
description = db.Column(db.Text)
class MemberTransfer(db.Model):
__tablename__ = "member_transfer"
id = db.Column(db.Integer, primary_key=True)
member = db.Column(db.Integer, db.ForeignKey("member.id"))
transfer_id = db.Column(db.Integer, db.ForeignKey("transfer.id"))
year = db.Column(db.Integer)
month = db.Column(db.Integer)
transfer = db.relationship("Transfer", backref="member_transfers")
def __init__(self, _id, year, month, transfer):
self.id = _id
self.year = year
self.month = month
self.transfer = transfer
class PaymentStatus(enum.Enum):
never_paid = 1 # never paid membership fees
unpaid = 2 # more than 3 fees unapid
okay = 3 # fees paid
class PaymentPolicy(enum.Enum):
normal = "Normal"
extended = "Extended Grace Period"
potato = "Potato"
disabled = "Disabled"
class MembershipType(enum.Enum):
fatty = "Fatty"
starving = "Starving"
class Member(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True)
type = db.Column(db.Enum("starving", "fatty", name="member_types"))
transfers = db.relationship("MemberTransfer",order_by=[db.asc(MemberTransfer.year), db.asc(MemberTransfer.month)])
# old field
active = db.Column(db.Boolean)
api_keys = db.relationship("APIKey")
join_year = db.Column(db.Integer)
join_month = db.Column(db.Integer)
ldap_username = db.Column(db.String(64), unique=True)
# Normal - standard 3 months grace period
# Extended Grace Period - do not shut off account after grace period
# Potato - do not ever shut off account, report falsified payment status
# Disabled - manual disable override, regardless of payment extra
payment_policy = db.Column(db.Enum(*[p.value for p in PaymentPolicy.__members__.values()],
name='payment_policy_types'))
preferred_email = db.Column(db.String(64))
def mt_covers(self, mt):
"""For transfer view - given an mt, should we rowspan?"""
if mt not in self.transfers:
return None
ix = self.transfers.index(mt)
if ix != 0:
# check if the previous mt was covered by the same transfer
if self.transfers[ix-1].transfer.uid == mt.transfer.uid:
return None
# check how many next mts use the same transfer
rowspan = 0
for ix2 in range(ix+1, len(self.transfers)):
if self.transfers[ix2].transfer.uid == mt.transfer.uid:
rowspan += 1
else:
break
if rowspan == 0:
return None
else:
return rowspan + 1
@classmethod
def get_members(kls, deep=False):
"""Gets all members as an SQLAlchemy query.
@param(deep) - whether to do a subqueryload_all and load all transfer data
"""
if deep:
return kls.query.options(subqueryload_all(kls.transfers,
MemberTransfer.transfer)).order_by(kls.username)
else:
return kls.query.order_by(kls.username)
def _yearmonth_increment(self, ym):
y, m = ym
y2, m2 = y, m+1
if m2 > 12:
y2 += 1
m2 = 1
return (y2, m2)
def _yearmonth_scalar(self, ym):
y, m = ym
return y * 12 + (m - 1)
def _get_status_uncached(self):
now_date = datetime.datetime.now()
now = now_date.year * 12 + (now_date.month - 1)
del now_date
status = {}
status['ldap_username'] = self.ldap_username
status['username'] = self.username
status['type'] = self.type
status['payment_policy'] = self.payment_policy
# First check - did we actually get any transfers?
if not self.transfers or self.transfers[0].transfer.uid == app.config['DUMMY_TRANSFER_UID']:
status['payment_status'] = PaymentStatus.never_paid.value
status['months_due'] = None
status['last_paid'] = (None, None)
if self.join_year is not None and self.join_month is not None:
status['joined'] = (self.join_year, self.join_month)
status['next_unpaid'] = self._yearmonth_increment(status['joined'])
else:
status['joined'] = (None, None)
status['next_unpaid'] = (None, None)
status['left'] = False
self._apply_judgement(status)
return status
# Use the join date from SQL, if available
if self.join_year is not None and self.join_month is not None:
joined = (self.join_year, self.join_month)
else:
joined = (self.transfers[0].year, self.transfers[0].month)
joined_scalar = self._yearmonth_scalar(joined)
status['joined'] = joined
most_recent_transfer = (0, 0)
unpaid_months = 0
# Iterate over all payments and figure out how much months are unpaid
previous_transfer = (0, 0)
previous_uid = None
active_payment = True
for mt in self.transfers:
this_transfer = (mt.year, mt.month)
this_scalar = self._yearmonth_scalar(this_transfer)
this_uid = mt.transfer.uid
previous_scalar = self._yearmonth_scalar(previous_transfer)
most_recent_scalar = self._yearmonth_scalar(most_recent_transfer)
# Is this transfer a „not a member anymore” transfer?
if this_uid == app.config['DUMMY_TRANSFER_UID']:
active_payment = False
continue
# Is this the first transfer? See if it was done on time
if previous_uid is None:
unpaid_months += (this_scalar - joined_scalar)
# Apply any missing payments
if active_payment and previous_uid is not None:
unpaid_months += (this_scalar - previous_scalar) - 1
# Is this the most recent payment?
if this_scalar > most_recent_scalar:
most_recent_scalar = this_scalar
most_recent_transfer = this_transfer
active_payment = True
previous_transfer = this_transfer
previous_uid = this_uid
# Apply missing payments from now
if active_payment:
previous_scalar = self._yearmonth_scalar(previous_transfer)
unpaid_months += (now - previous_scalar)
status['months_due'] = unpaid_months
status['payment_status'] = PaymentStatus.okay.value if unpaid_months < 4 else PaymentStatus.unpaid.value
status['last_paid'] = most_recent_transfer
status['left'] = not active_payment
if not active_payment:
status['next_unpaid'] = (None, None)
else:
status['next_unpaid'] = self._yearmonth_increment(status['last_paid'])
self._apply_judgement(status)
return status
def get_list_email(self):
if self.preferred_email:
return self.preferred_email
return '{}@hackerspace.pl'.format(self.ldap_username)
def get_contact_email(self):
if self.preferred_email:
return self.preferred_email
mra = directory.get_member_fields(g.ldap, self.ldap_username,
'mailRoutingAddress')
mra = mra['mailRoutingAddress']
if mra:
return mra
else:
return '{}@hackerspace.pl'.format(self.ldap_username)
def get_status(self):
"""It's better to call this after doing a full select of data."""
cache_key = 'kasownik-payment_status-{}'.format(self.username)
cache_data = mc.get(cache_key)
if cache_data and cache_enabled:
data = json.loads(cache_data)
return data
else:
cache_data = self._get_status_uncached()
mc.set(cache_key, json.dumps(cache_data))
return cache_data
def _apply_judgement(self, status):
"""Check your priviledge, you cisnormative shitlord!"""
if status['left']:
status['judgement'] = False
return
policy = status['payment_policy']
if policy == 'Normal':
if status['payment_status'] == PaymentStatus.okay.value and status['last_paid'][0] is not None:
status['judgement'] = True
else:
status['judgement'] = False
elif policy == 'Extended Grace Period':
status['judgement'] = True
elif policy == 'Potato':
status['judgement'] = True
status['months_due'] = 0
else:
status['judgement'] = False
def get_months_due(self):
status = self.get_status()
return status['months_due']
def get_last_paid(self):
status = self.get_status()
return status['last_paid']
def get_next_unpaid(self):
status = self.get_status()
return status['next_unpaid']
def __init__(self, _id, _username, _type, _active):
self.id = _id
self.username = _username
self.type = _type
self.active = _active
now_date = datetime.datetime.now()
self.join_year = now_date.year
self.join_month = now_date.month
self.ldap_username = _username
self.payment_policy = PaymentPolicy.normal.value
class Transfer(db.Model):
id = db.Column(db.Integer, primary_key=True)
uid = db.Column(db.String(128))
account_from = db.Column(db.String(32))
name_from = db.Column(db.String(256))
amount = db.Column(db.Integer)
title = db.Column(db.String(256))
date = db.Column(db.Date)
ignore = db.Column(db.Boolean)
def __init__(self, _id, _uid, _account_from, _name_from, _amount, _title, _date, _ignore):
self.id = _id
self.uid = _uid
self.account_from = _account_from
self.name_from = _name_from
self.amount = _amount
self.title = _title
self.date = _date
self.ignore = _ignore
def get_short_uid(self):
return self.uid[:16]
def parse_title(self):
m = re.match(ur"^([a-z0-9\-_\.]+) *\- *(fatty|starving|superfatty) *\- *([0-9a-z\-_ąężźćóżłśń \(\),/\.]+$)", self.title.strip().lower())
if not m:
return (None, None, None)
member, _type, title = m.group(1), m.group(2), m.group(3)
if title in [u"składka", u"opłata", u"opłata miesięczna", "skladka"]:
return (member, _type, None)
return member, _type, title
MATCH_OK, MATCH_WRONG_TYPE, MATCH_NO_USER, MATCH_UNPARSEABLE = range(4)
def get_matchability(self):
title = self.parse_title()
if not title[0]:
return self.MATCH_UNPARSEABLE, self.title
member_name = title[0]
member = Member.query.filter_by(username=member_name).first()
if not member:
return self.MATCH_NO_USER, member_name
if (title[1] == 'starving' and self.amount > 50) or (title[1] == 'fatty' and self.amount > 100):
return self.MATCH_WRONG_TYPE, member
if title[2]:
return self.MATCH_WRONG_TYPE, member
return self.MATCH_OK, member
|
bsd-2-clause
| -2,691,253,247,546,642,400 | 35.80112 | 145 | 0.607931 | false | 3.756935 | false | false | false |
perrette/pyglacier
|
pyglacier/plotting.py
|
1
|
1106
|
import matplotlib.pyplot as plt
#
# plotting
#
def plot_elevation(ds, ax=None):
if ax is None:
ax = plt.gca()
ds['hs'].plot(ax=ax,label="surface")
ds['hb'].plot(ax=ax,label="bottom")
# add horizontal line to indicate sea level
ax.hlines(0, ds.x[0], ds.x[-1], linestyle='dashed', color='black')
ds['zb'].plot(ax=ax, color='black', linewidth=2, label="bedrock") # add bedrock
ax.legend(frameon=False, loc="upper right")
def plot_velocity(ds, ax=None):
if ax is None:
ax = plt.gca()
ds = ds.copy()
u = 'u' if 'u' in ds else 'U'
ds[u] = ds[u]*3600*24
ds[u].plot(ax=ax)
ax.set_ylabel('velocity [m/d]')
def plot_glacier(ds):
fig,axes=plt.subplots(2,1,sharex=True)
ax=axes[0]
plot_elevation(ds, ax)
ax=axes[1]
plot_velocity(ds, ax)
ax.set_xlim([ds.x[0], ds.x[-1]])
return fig, axes
def plot_stress(ds):
_v = ["driving", "lat", "long", "basal", "residual"]
try:
ds = ds.take(_v)
except KeyError:
ds = ds.take([k + '_stress' for k in _v])
return ds.to_array(axis='stress').T.plot()
|
mit
| -7,186,009,462,917,936,000 | 25.97561 | 83 | 0.577758 | false | 2.737624 | false | false | false |
samzhang111/wikipedia-jargon
|
all-subjects/make_tf_differences.py
|
1
|
2815
|
from __future__ import print_function
import msgpack
import sys
import os
from collections import defaultdict
from helpers import text_dict_to_term_dict
from WikiExtractor import clean, compact
import pandas as pd
def remove_wikipedia_markup(text):
return compact(clean(text.decode('utf8')))
def print_help_and_exit(msg=''):
if msg:
print('Error: {}\n'.format(msg))
print('Usage: python make_tf_differences.py [n-grams] [path to directory]')
print('The directory should contain files output by grab_texts.py')
sys.exit(1)
if len(sys.argv) <= 2:
print_help_and_exit()
##############################################################
# Read in msgpack files, separating them from simple and en Wikipedia
##############################################################
ngrams = int(sys.argv[1])
text_dir = sys.argv[2]
only = sys.argv[3:]
print('Only calculating for: ', only)
try:
files = os.listdir(text_dir)
except OSError:
print_help_and_exit()
##############################################################
# Organize the text files by subject, then wiki (en or simple)
##############################################################
file_dict = defaultdict(dict)
for f in files:
try:
subject, wiki, _ = f.split('_')
if only and subject not in only:
continue
file_dict[subject][wiki] = f
except ValueError:
print_help_and_exit('Text directory does not contain valid filenames')
for subject in file_dict:
print('Importing ', subject)
with open(os.path.join(text_dir, file_dict[subject]['en'])) as f:
en_text = msgpack.load(f)
en_text = {k: remove_wikipedia_markup(v) for k,v in en_text.items()}
with open(os.path.join(text_dir, file_dict[subject]['simple'])) as f:
sm_text = msgpack.load(f)
sm_text = {k: remove_wikipedia_markup(v) for k,v in sm_text.items()}
print('Calculating term differences')
en_tf, en_counts = text_dict_to_term_dict(en_text, ngrams)
sm_tf, sm_counts = text_dict_to_term_dict(sm_text, ngrams)
sm_terms = set(sm_tf)
en_terms = set(en_tf)
term_differences = {}
for t in sm_terms.union(en_terms):
term_differences[t] = en_tf[t] - sm_tf[t]
sorted_term_difference = sorted(term_differences.items(),
key=lambda x: x[1])
print('Outputting term differences')
td_df = pd.DataFrame(sorted_term_difference, columns=['term',
'term_difference'])
td_df['en_tf'] = td_df.term.apply(lambda x: en_tf[x])
td_df['sm_tf'] = td_df.term.apply(lambda x: sm_tf[x])
try:
os.mkdir('data/term-diffs/ngrams-{}'.format(ngrams))
except OSError:
pass
td_df.to_csv('data/term-diffs/ngrams-{}/{}_td.csv'.format(ngrams, subject),
index=False, encoding='utf8')
|
gpl-3.0
| 5,142,175,881,754,362,000 | 30.988636 | 79 | 0.588988 | false | 3.462485 | false | false | false |
fkie-cad/iva
|
local_repositories/tasks/datetime_utils.py
|
1
|
1476
|
from datetime import datetime, timedelta
TIME_FORMAT = '%H:%M:%S'
def calculate_task_execution_timeout(task_time):
current_datetime = datetime.now()
current_time = get_time_from_datetime(current_datetime)
return calculate_delta_time(task_time, current_time)
def calculate_task_next_execution_datetime(task_time):
current_datetime = get_current_datetime()
current_time = get_time_from_datetime(current_datetime)
if get_time_object(current_time) >= get_time_object(task_time):
current_datetime = add_one_day(current_datetime)
return update_time_in_datetime(current_datetime, task_time)
def get_current_datetime():
return datetime.now()
def calculate_delta_time(time_a_str, time_b_str):
delta_time = (get_time_object(time_a_str) - get_time_object(time_b_str)).seconds
if delta_time > 0:
return delta_time
return 60
def get_time_object(time_a):
return datetime.strptime(time_a, TIME_FORMAT)
def get_time_from_datetime(datetime_):
return datetime_.strftime(TIME_FORMAT)
def verify_time_format(time_str):
try:
datetime.strptime(time_str, TIME_FORMAT)
return True
except ValueError:
return False
def update_time_in_datetime(datetime_, time_str):
time_object = get_time_object(time_str)
return datetime_.replace(hour=time_object.hour, minute=time_object.minute, second=time_object.second)
def add_one_day(datetime_):
return datetime_ + timedelta(days=1)
|
lgpl-3.0
| 7,140,560,285,524,920,000 | 26.867925 | 105 | 0.708672 | false | 3.400922 | false | false | false |
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/effective_network_security_group_py3.py
|
1
|
2060
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveNetworkSecurityGroup(Model):
"""Effective network security group.
:param network_security_group: The ID of network security group that is
applied.
:type network_security_group:
~azure.mgmt.network.v2018_01_01.models.SubResource
:param association: Associated resources.
:type association:
~azure.mgmt.network.v2018_01_01.models.EffectiveNetworkSecurityGroupAssociation
:param effective_security_rules: A collection of effective security rules.
:type effective_security_rules:
list[~azure.mgmt.network.v2018_01_01.models.EffectiveNetworkSecurityRule]
:param tag_map: Mapping of tags to list of IP Addresses included within
the tag.
:type tag_map: dict[str, list[str]]
"""
_attribute_map = {
'network_security_group': {'key': 'networkSecurityGroup', 'type': 'SubResource'},
'association': {'key': 'association', 'type': 'EffectiveNetworkSecurityGroupAssociation'},
'effective_security_rules': {'key': 'effectiveSecurityRules', 'type': '[EffectiveNetworkSecurityRule]'},
'tag_map': {'key': 'tagMap', 'type': '{[str]}'},
}
def __init__(self, *, network_security_group=None, association=None, effective_security_rules=None, tag_map=None, **kwargs) -> None:
super(EffectiveNetworkSecurityGroup, self).__init__(**kwargs)
self.network_security_group = network_security_group
self.association = association
self.effective_security_rules = effective_security_rules
self.tag_map = tag_map
|
mit
| -5,176,465,326,651,729,000 | 44.777778 | 136 | 0.658252 | false | 4.35518 | false | false | false |
raymondanthony/youtube-dl
|
youtube_dl/extractor/pornotube.py
|
1
|
1760
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
unified_strdate,
)
class PornotubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
_TEST = {
'url': 'http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing',
'md5': '374dd6dcedd24234453b295209aa69b6',
'info_dict': {
'id': '1689755',
'ext': 'flv',
'upload_date': '20090708',
'title': 'Marilyn-Monroe-Bathing',
'age_limit': 18
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('videoid')
video_title = mobj.group('title')
# Get webpage content
webpage = self._download_webpage(url, video_id)
# Get the video URL
VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
video_url = self._search_regex(VIDEO_URL_RE, webpage, 'video url')
video_url = compat_urllib_parse.unquote(video_url)
# Get the uploaded date
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, 'upload date', fatal=False)
if upload_date:
upload_date = unified_strdate(upload_date)
age_limit = self._rta_search(webpage)
return {
'id': video_id,
'url': video_url,
'upload_date': upload_date,
'title': video_title,
'ext': 'flv',
'format': 'flv',
'age_limit': age_limit,
}
|
unlicense
| 507,547,985,690,037,800 | 30.428571 | 119 | 0.546591 | false | 3.18264 | false | false | false |
szepeviktor/courier-pythonfilter-custom
|
email-correct.py
|
1
|
9398
|
#!/usr/bin/python
# file: email-correct.py
# -*- coding: utf-8 -*-
import os
import sys
import email
import email.charset
import email.encoders
from email.header import Header
from email.utils import getaddresses
from email.utils import formataddr
from email.utils import parseaddr
from email.utils import make_msgid
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import courier.control
from courier.xfilter import XFilter
from courier.xfilter import XFilterError
from lxml import etree
import html2text
__VERSION__ = '1.10'
SELF = 'email-correct'
DEFAULT_CHARSET = 'ISO-8859-2'
debug = False
#TODO
# unknown encoding: scz-1171-1 - check all .encode() and .decode()
# Courier 0.60.0 /etc/courier/bofh
#opt MIME=none
#opt BOFHBADMIME=accept
# check etree version >= 3
#from lxml import etree; etree.__version__
def is_nonascii(string):
return isinstance(string, basestring) and any(ord(c) & 0x80 for c in string)
def check_preamble(msg, corrected):
if msg.preamble is not None and is_nonascii(msg.preamble):
corrected += ['PREAMBLE']
u_preamble = unicode(msg.preamble, DEFAULT_CHARSET)
msg.preamble = u_preamble.encode('ascii', 'replace')
def check_msgid(msg, corrected):
# rfc2822
if msg.get('message-id') is None:
if msg.get('from') is None:
domain = 'msgid.missing'
else:
name, email = parseaddr(msg.get('from'))
domain = email.split('@')[1]
corrected += ['MESSAGE_ID']
msg['Message-ID'] = make_msgid(domain)
def check_mimeversion(msg, corrected):
# rfc2045
if msg.get('mime-version') is None and (msg.is_multipart() or msg.get('content-transfer-encoding') is not None):
corrected += ['MIME_VERSION']
msg['MIME-Version'] = '1.0'
def check_encoding(part, corrected):
if (part['content-transfer-encoding'] is None or part['content-transfer-encoding'] != '8bit') and is_nonascii(part.get_payload()):
corrected += ['7BIT_TO_8BIT']
del part['content-transfer-encoding']
part['Content-Transfer-Encoding'] = '8bit'
def check_addresses(part, corrected, charset):
# https://tools.ietf.org/html/rfc5504#section-3.2
for header in ('From', 'Sender', 'To', 'Cc', 'Bcc', 'Reply-To', 'Resent-From', 'Resent-Sender', 'Resent-To', 'Resent-Cc', 'Resent-Bcc', 'Resent-Reply-To', 'Return-Path', 'Disposition-Notification-To'):
addresses = part.get_all(header)
if addresses is None:
continue
del part[header]
if len(addresses) > 1:
corrected += ['MULTI_' + header.upper()]
for addressline in addresses:
addrlist = getaddresses([addressline])
new_addrlist = []
for (name, addr) in addrlist:
if is_nonascii(name):
corrected += [header.upper()]
new_name = Header(name, charset, errors='replace').encode().encode('ascii', 'replace')
new_addrlist += [(new_name, addr)]
else:
new_addrlist += [(name, addr)]
part[header] = ', '.join(map(formataddr, new_addrlist))
def is_invalid_header(value):
if value and not isinstance(value, tuple) and is_nonascii(value):
return True
return False
def check_headers(part, corrected, charset):
subject = part['Subject']
if is_invalid_header(subject):
corrected += ['SUBJECT']
part.replace_header('subject', Header(subject, charset).encode().encode('ascii', 'replace'))
maildate = part['Date']
if is_invalid_header(maildate):
corrected += ['DATE']
part.replace_header('date', Header(maildate, charset).encode().encode('ascii', 'replace'))
# indamail.hu problem
mailgeoip = part['X-GeoIP']
if is_invalid_header(mailgeoip):
corrected += ['GEOIP']
part.replace_header('x-geoip', Header(mailgeoip, charset).encode().encode('ascii', 'replace'))
charset = part.get_content_charset() or charset
# attachments
value = part.get_param('name')
if is_invalid_header(value):
corrected += ['NAME']
value = Header(value, charset).encode().encode('ascii', 'replace')
part.set_param('name', value)
value = part.get_param('filename', header='content-disposition')
if is_invalid_header(value):
corrected += ['FILENAME']
value = Header(value, charset).encode().encode('ascii', 'replace')
part.set_param('filename', value, 'Content-Disposition')
def check_htmlonly(msg, corrected):
# Skip if multipart or Content-Type is not HTML
if msg.is_multipart() or msg.get('content-type') is None or msg.get('content-type').split(';')[0].strip().lower() != 'text/html':
return msg
###FIXME How to detect multipart messages without plain text part?
email.charset.add_charset('utf-8', email.charset.QP, email.charset.QP, 'utf-8')
###TODO Messages without <head> should get <base href="http://<FROM_DOMAIN>/"> for relative links.
charset = msg.get_content_charset() or DEFAULT_CHARSET
# New message with alternative multipart MIME-level
new_msg = MIMEMultipart('alternative')
# Loop through the original message's headers and copy those to the new one (except two headers)
for (key, value) in msg.items():
if key.lower() not in ['content-type', 'content-disposition']:
new_msg[key] = value
payload = msg.get_payload(decode=True)
###FIXME Encode (QP) every header line of all parts
### with non-decodable (by Content-Type: <CHARSET>) character
# https://docs.python.org/2/library/email.message.html#email.message.Message.defects
parser = etree.HTMLParser(encoding=str(charset), recover=True)
dom_tree = etree.fromstring(payload, parser)
if debug:
etree.dump(dom_tree, pretty_print=True)
output = etree.tostring(dom_tree, pretty_print=True, method='html')
# Revert to UNICODE
html_payload = output.decode('utf-8')
try:
text_payload = html2text.html2text(html_payload)
except Exception as error:
# English - Hungarian
text_payload = 'No text part - Nincs szoveges resz'
pid = str(os.getpid())
sys.stderr.write(SELF + '[' + pid + '] Exception in html2text: %s; %s; charset=%s\n' % (str(type(error)), str(error), str(charset)))
bf = open('/tmp/' + SELF + '_bodyFile.' + pid, 'w')
# Only the non-convertable (broken) HTML
#bf.write(msg.as_string())
# The whole original message
bf.write(output)
bf.close()
# Creating two MIME parts keeping the character set
part1 = MIMEText(text_payload.encode(str(charset), 'replace'), 'plain', charset)
part2 = MIMEText(html_payload.encode(str(charset), 'replace'), 'html', charset)
part1['Content-Disposition'] = 'inline'
part2['Content-Disposition'] = 'inline'
part1['Content-Description'] = 'Plaintext Version of Message'
part2['Content-Description'] = 'HTML Version of Message'
# Attaching the parts to the new message
new_msg.preamble = 'This is a MIME-formatted message. If you see this text it means that your\nE-mail software does not support MIME-formatted messages.\n'
new_msg.attach(part1)
new_msg.attach(part2)
corrected += ['HTMLONLY']
return new_msg
def initFilter():
# No variables for this module yes
###TODO e.g. DEFAULT_CHARSET, path for exception body files
#courier.config.applyModuleConfig(SELF, globals())
sys.stderr.write('Initialized the "' + SELF + '" ' + __VERSION__ + ' python filter\n')
def doFilter(bodyFile, controlFileList):
corrected = []
try:
xf = XFilter(SELF, bodyFile, controlFileList)
except XFilterError:
sys.stderr.write(SELF + ': Loop + exit\n')
return ''
pid = str(os.getpid())
# Representing an email message:
# https://docs.python.org/2/library/email.message.html
msg = xf.getMessage()
if debug:
to = msg['to']
else:
tolist = courier.control.getRecipientsData(controlFileList)
if tolist is not None:
to = tolist[0][0]
check_preamble(msg, corrected)
check_msgid(msg, corrected)
check_mimeversion(msg, corrected)
for part in msg.walk():
charset = part.get_charset() or part.get_param('charset') or DEFAULT_CHARSET
check_encoding(part, corrected)
check_addresses(part, corrected, charset)
check_headers(part, corrected, charset)
msg = check_htmlonly(msg, corrected)
if corrected:
msg.set_param('corrected', ','.join(corrected), 'X-Email-Correct')
msg.set_param('version', __VERSION__, 'X-Email-Correct')
xf.setMessage(msg)
try:
xf.submit()
except Exception as error:
sys.stderr.write(SELF + '[' + pid + '] Exception in XFilter.submit: %s; %s\n' % (str(type(error)), str(error)))
bf = open('/tmp/' + SELF + '_bodyFile2.' + pid, 'w')
bf.write(msg.as_string())
bf.close()
sys.stderr.write(SELF + '[' + pid + '] To: ' + to + ' corrected=' + ','.join(corrected) + '\n')
elif debug:
sys.stderr.write(SELF + '[' + pid + '] To: ' + to + ' correct\n')
return ''
if __name__ == '__main__':
debug = True
initFilter()
doFilter(sys.argv[1], sys.argv[2:])
|
mit
| -9,201,266,120,644,209,000 | 36.146245 | 205 | 0.632581 | false | 3.607678 | false | false | false |
cloudbase/maas
|
src/provisioningserver/testing/fakeapi.py
|
1
|
6224
|
# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Fake Provisioning API.
:class:`FakeSynchronousProvisioningAPI` is intended to be useful in a Django
environment, or similar, where the Provisioning API is being used via
xmlrpclib.ServerProxy for example.
:class:`FakeAsynchronousProvisioningAPI` is intended to be used in a Twisted
environment, where all functions return :class:`defer.Deferred`s.
"""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = [
"FakeAsynchronousProvisioningAPI",
"FakeSynchronousProvisioningAPI",
]
from base64 import b64encode
from functools import wraps
from provisioningserver.interfaces import IProvisioningAPI
from provisioningserver.utils import filter_dict
from twisted.internet import defer
from zope.interface import implementer
from zope.interface.interface import Method
class FakeProvisioningDatabase(dict):
def __missing__(self, key):
self[key] = {"name": key}
return self[key]
def select(self, keys):
"""Select a subset of this mapping."""
return filter_dict(self, frozenset(keys))
def delete(self, keys):
"""Delete a subset of this mapping."""
for key in keys:
if key in self:
del self[key]
def dump(self):
"""Dump this mapping.
Keys are assumed to be immutable, and values are assumed to have a
`copy` method, like a `dict` for example.
"""
return {
key: value.copy()
for key, value in self.items()
}
@implementer(IProvisioningAPI)
class FakeProvisioningAPIBase:
# TODO: Referential integrity might be a nice thing.
def __init__(self):
super(FakeProvisioningAPIBase, self).__init__()
self.distros = FakeProvisioningDatabase()
self.profiles = FakeProvisioningDatabase()
self.nodes = FakeProvisioningDatabase()
# This records nodes that start/stop commands have been issued
# for. If a node has been started, its name maps to 'start'; if
# it has been stopped, its name maps to 'stop' (whichever
# happened most recently).
self.power_status = {}
def add_distro(self, name, initrd, kernel):
self.distros[name]["initrd"] = initrd
self.distros[name]["kernel"] = kernel
return name
def add_profile(self, name, distro):
self.profiles[name]["distro"] = distro
return name
def add_node(self, name, hostname, profile, power_type, preseed_data):
self.nodes[name]["hostname"] = hostname
self.nodes[name]["profile"] = profile
self.nodes[name]["mac_addresses"] = []
self.nodes[name]["ks_meta"] = {
"MAAS_PRESEED": b64encode(preseed_data),
}
self.nodes[name]["power_type"] = power_type
return name
def modify_distros(self, deltas):
for name, delta in deltas.items():
distro = self.distros[name]
distro.update(delta)
def modify_profiles(self, deltas):
for name, delta in deltas.items():
profile = self.profiles[name]
profile.update(delta)
def modify_nodes(self, deltas):
for name, delta in deltas.items():
node = self.nodes[name]
node.update(delta)
def get_distros_by_name(self, names):
return self.distros.select(names)
def get_profiles_by_name(self, names):
return self.profiles.select(names)
def get_nodes_by_name(self, names):
return self.nodes.select(names)
def delete_distros_by_name(self, names):
return self.distros.delete(names)
def delete_profiles_by_name(self, names):
return self.profiles.delete(names)
def delete_nodes_by_name(self, names):
return self.nodes.delete(names)
def get_distros(self):
return self.distros.dump()
def get_profiles(self):
return self.profiles.dump()
def get_nodes(self):
return self.nodes.dump()
def start_nodes(self, names):
for name in names:
self.power_status[name] = 'start'
def stop_nodes(self, names):
for name in names:
self.power_status[name] = 'stop'
PAPI_METHODS = {
name: getattr(FakeProvisioningAPIBase, name)
for name in IProvisioningAPI.names(all=True)
if isinstance(IProvisioningAPI[name], Method)
}
def sync_xmlrpc_func(func):
"""Decorate a function so that it acts similarly to a synchronously
accessed remote XML-RPC call.
All method calls return synchronously.
"""
@wraps(func)
def wrapper(*args, **kwargs):
assert len(kwargs) == 0, (
"The Provisioning API is meant to be used via XML-RPC, "
"for now, so its methods are prevented from use with "
"keyword arguments, which XML-RPC does not support.")
# TODO: Convert exceptions into Faults.
return func(*args)
return wrapper
# Generate an synchronous variant.
FakeSynchronousProvisioningAPI = type(
b"FakeSynchronousProvisioningAPI", (FakeProvisioningAPIBase,), {
name: sync_xmlrpc_func(func) for name, func in PAPI_METHODS.items()
})
def async_xmlrpc_func(func):
"""Decorate a function so that it acts similarly to an asynchronously
accessed remote XML-RPC call.
All method calls return asynchronously, via a :class:`defer.Deferred`.
"""
@wraps(func)
def wrapper(*args, **kwargs):
assert len(kwargs) == 0, (
"The Provisioning API is meant to be used via XML-RPC, "
"for now, so its methods are prevented from use with "
"keyword arguments, which XML-RPC does not support.")
# TODO: Convert exceptions into Faults.
return defer.execute(func, *args)
return wrapper
# Generate an asynchronous variant.
FakeAsynchronousProvisioningAPI = type(
b"FakeAsynchronousProvisioningAPI", (FakeProvisioningAPIBase,), {
name: async_xmlrpc_func(func) for name, func in PAPI_METHODS.items()
})
|
agpl-3.0
| 4,621,239,559,335,503,000 | 29.509804 | 76 | 0.64653 | false | 3.971921 | false | false | false |
sassoftware/catalog-service
|
catalogService/rest/drivers/openstack/openstackclient.py
|
1
|
23160
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
from catalogService import errors
from catalogService.rest import baseDriver
from catalogService.rest.models import images
from catalogService.rest.models import instances
try:
from keystoneclient.auth.identity import v2 as v2_auth
from keystoneclient.client import Client as KeystoneClient
from keystoneclient.session import Session as KeystoneSession
from novaclient.v1_1.client import Client as NovaClient
from glanceclient import Client as GlanceClient
import logging
logging.getLogger('iso8601.iso8601').setLevel(logging.ERROR)
except ImportError:
NovaClient = None #pyflakes=ignore
class OpenStack_Image(images.BaseImage):
"OpenStack Image"
NOVA_PORT = 5000
CATALOG_NEW_FLOATING_IP = "new floating ip-"
CATALOG_NEW_FLOATING_IP_DESC = "[New floating IP in {pool}]"
# This is provided by the nova api
#class OpenStack_InstanceTypes(instances.InstanceTypes):
# "OpenStack Instance Types"
#
# idMap = [
# ('xenent.small', "Small"),
# ('xenent.medium', "Medium"),
# ]
# Nova address
# Nova port
# Glance address (until apis are integrated)
# Glance port
_configurationDescriptorXmlData = """<?xml version='1.0' encoding='UTF-8'?>
<descriptor xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.rpath.org/permanent/descriptor-1.0.xsd descriptor-1.0.xsd">
<metadata>
<displayName>OpenStack Configuration</displayName>
<descriptions>
<desc>Configure OpenStack</desc>
</descriptions>
</metadata>
<dataFields>
<field>
<name>name</name>
<descriptions>
<desc>Nova Server</desc>
</descriptions>
<type>str</type>
<required>true</required>
<help href='configuration/novaServerName.html'/>
</field>
<field>
<name>nova_port</name>
<descriptions>
<desc>Nova Port</desc>
</descriptions>
<type>int</type>
<constraints>
<descriptions>
<desc>Valid ports are integers between 1 and 65535</desc>
</descriptions>
<range><min>1</min><max>65535</max></range>
</constraints>
<required>true</required>
<default>%(nova_port)s</default>
<help href='configuration/novaPortNumber.html'/>
</field>
<field>
<name>alias</name>
<descriptions>
<desc>Name</desc>
</descriptions>
<type>str</type>
<required>true</required>
<help href='configuration/alias.html'/>
</field>
<field>
<name>description</name>
<descriptions>
<desc>Full Description</desc>
</descriptions>
<type>str</type>
<required>true</required>
<help href='configuration/description.html'/>
</field>
<field>
<name>project_name</name>
<descriptions>
<desc>Project Name</desc>
</descriptions>
<type>str</type>
<required>true</required>
<help href='configuration/project_name.html'/>
</field>
</dataFields>
</descriptor>""" % dict(nova_port=NOVA_PORT, )
# User Name
# Auth Token
_credentialsDescriptorXmlData = """<?xml version='1.0' encoding='UTF-8'?>
<descriptor xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.rpath.org/permanent/descriptor-1.0.xsd descriptor-1.0.xsd">
<metadata>
<displayName>OpenStack User Credentials</displayName>
<descriptions>
<desc>User Credentials for OpenStack</desc>
</descriptions>
</metadata>
<dataFields>
<field>
<name>username</name>
<descriptions>
<desc>User Name</desc>
</descriptions>
<type>str</type>
<constraints>
<descriptions>
<desc>Field must contain between 1 and 32 characters</desc>
</descriptions>
<length>32</length>
</constraints>
<required>true</required>
</field>
<field>
<name>password</name>
<descriptions>
<desc>Password</desc>
</descriptions>
<type>str</type>
<constraints>
<descriptions>
<desc>Field must contain between 1 and 40 characters</desc>
</descriptions>
<length>40</length>
</constraints>
<required>true</required>
<password>true</password>
</field>
</dataFields>
</descriptor>
"""
# http://glance.openstack.org/client.html
# http://pypi.python.org/pypi/python-novaclient
class ConsolidatedClient(object):
def __init__(self, keystone_client, nova_client, glance_client):
self.keystone = keystone_client
self.nova = nova_client
self.glance = glance_client
class OpenStackClient(baseDriver.BaseDriver):
Image = OpenStack_Image
cloudType = 'openstack'
configurationDescriptorXmlData = _configurationDescriptorXmlData
credentialsDescriptorXmlData = _credentialsDescriptorXmlData
RBUILDER_BUILD_TYPE = 'RAW_HD_IMAGE'
NovaClientClass = NovaClient
KEYSTONE_API_VERSION = '2.0'
GLANCE_CLIENT_VERSION = '2'
@classmethod
def isDriverFunctional(cls):
return cls.NovaClientClass is not None
getImageIdFromMintImage = baseDriver.BaseDriver._getImageIdFromMintImage_local
def _authUrl(self, server, port, secure=True):
return "%s://%s:%s" % ('https' if secure else 'http', server, port)
def _secureToInsecureFallback(self, callback, *args, **kwargs):
kwSecure = kwargs.copy()
kwSecure['secure'] = True
try:
# try calling the callback with secure=True
return callback(self, *args, **kwSecure)
except Exception, eSecure:
eSecure_type, eSecure_val, eSecure_trace = sys.exc_info()
kwInsecure = kwargs.copy()
kwInsecure['secure'] = False
# try calling the callback with secure=False
try:
return callback(self, *args, **kwInsecure)
except Exception, eInsecure:
# if insecure version also fails, transparently raise the secure exception
raise eSecure_type, eSecure_val, eSecure_trace
def drvCreateCloudClient(self, credentials):
cloudConfig = self.getTargetConfiguration()
server = cloudConfig['name']
port = cloudConfig['nova_port']
projectName = cloudConfig['project_name']
try:
session = KeystoneSession()
def authenticate(self, **kwargs):
secure = kwargs.pop('secure')
authUrl = self._authUrl(server, port, secure=secure)
keystoneCli = KeystoneClient(self.KEYSTONE_API_VERSION,
tenant_name=projectName,
auth_url=authUrl,
username=credentials['username'],
password=credentials['password'],
session=session)
auth = v2_auth.Password(
keystoneCli.auth_url,
username=credentials['username'],
password=credentials['password'])
session.auth = auth
keystoneCli.authenticate()
auth.auth_ref = keystoneCli.auth_ref
return keystoneCli
keystoneCli = self._secureToInsecureFallback(authenticate)
novaCli = self.NovaClientClass(auth_token=keystoneCli.auth_token,
project_id=projectName,
auth_url=keystoneCli.auth_url,
session=session)
endpoint = session.get_endpoint(service_type="image")
glanceCli = GlanceClient(self.GLANCE_CLIENT_VERSION,
endpoint=endpoint,
project_id=projectName,
token=keystoneCli.auth_token,
session=session)
clients = ConsolidatedClient(keystoneCli, novaCli, glanceCli)
except Exception, e:
raise errors.PermissionDenied(message =
"Error initializing client: %s" % (e, ))
return clients
def drvVerifyCloudConfiguration(self, dataDict):
serverName = dataDict['name']
serverPort = dataDict['nova_port']
def verify(self, **kwargs):
secure = kwargs.pop('secure')
self._verifyServerUrl(self._authUrl(serverName, serverPort, secure=secure))
self._secureToInsecureFallback(verify)
def terminateInstances(self, instanceIds):
running_instances = self.getInstances(instanceIds)
for server in running_instances:
server.delete() # there is no terminate method in novaclient
insts = instances.BaseInstances()
insts.extend(running_instances)
# Set state
for inst in insts:
inst.setState("Terminating")
return insts
def terminateInstance(self, instanceId):
return self.terminateInstances([instanceId])
def _get_flavors(self):
objlist = self.client.nova.flavors.list()
objlist.sort(key=lambda x: (x.vcpus, x.ram, x.disk))
return objlist
def _get_availability_zones(self):
objlist = self.client.nova.availability_zones.list(detailed=False)
objlist = [ x for x in objlist if x.zoneState.get('available') ]
objlist.sort(key=lambda x: x.zoneName)
return objlist
def drvPopulateImageDeploymentDescriptor(self, descr, extraArgs=None):
descr.setDisplayName("OpenStack Launch Parameters")
descr.addDescription("OpenStack Launch Parameters")
self.drvImageDeploymentDescriptorCommonFields(descr)
self._imageDeploymentSpecifcDescriptorFields(descr, extraArgs=extraArgs)
return self._drvPopulateDescriptorFromTarget(descr)
def drvPopulateLaunchDescriptor(self, descr, extraArgs=None):
descr.setDisplayName("OpenStack Launch Parameters")
descr.addDescription("OpenStack Launch Parameters")
self.drvLaunchDescriptorCommonFields(descr)
self._launchSpecificDescriptorFields(descr, extraArgs=extraArgs)
return self._drvPopulateDescriptorFromTarget(descr)
def _drvPopulateDescriptorFromTarget(self, descr):
pass
def _retriveSSHKeyPairs(self, descr):
keyPairs = [ descr.ValueWithDescription(x[0], descriptions = x[1])
for x in self._cliGetKeyPairs() ]
if not keyPairs:
raise errors.ParameterError("No OpenStack SSH key pairs defined, please create one")
return keyPairs
def _launchSpecificDescriptorFields(self, descr, extraArgs=None):
avzones = self._get_availability_zones()
descr.addDataField("availabilityZone",
descriptions = [
("Availability Zone", None),
(u"Zone de disponibilit\u00e9", "fr_FR")],
help = [
("launch/availabilityZones.html", None)],
default = [ avzones[0].zoneName ],
required=True,
type = descr.EnumeratedType([
descr.ValueWithDescription(x.zoneName, descriptions = x.zoneName)
for x in avzones ]
))
targetFlavors = self._get_flavors()
if not targetFlavors:
raise errors.ParameterError("No instance flavors defined")
flavors = [ descr.ValueWithDescription(str(f.id),
descriptions = "%s (VCPUs: %d, RAM: %d MB)" % (f.name, f.vcpus, f.ram)) for f in targetFlavors ]
descr.addDataField('flavor',
descriptions = 'Flavor',
required = True,
help = [
('launch/flavor.html', None)
],
type = descr.EnumeratedType(flavors),
default=flavors[0].key,
)
networks = self._cliGetNetworks()
descr.addDataField('network',
descriptions = 'Network',
required = True,
help = [
('launch/network.html', None)
],
type = descr.EnumeratedType(
descr.ValueWithDescription(x.id, descriptions = x.label)
for x in networks),
default=[networks[0].id],
)
descr.addDataField("keyName",
descriptions = [ ("SSH Key Pair", None), ("Paire de clefs", "fr_FR") ],
help = [
("launch/keyPair.html", None)
],
type = descr.EnumeratedType(self._retriveSSHKeyPairs(descr))
)
fpList = self._cliGetFloatingIps()
descr.addDataField('floatingIp',
descriptions = 'Floating IP',
required = True,
help = [
('launch/floatingIp.html', None)
],
type = descr.EnumeratedType(
descr.ValueWithDescription(x['id'], descriptions = x['label'])
for x in fpList),
default=fpList[0]['id'],
)
return descr
def _cliGetFloatingIps(self):
cli = self.client.nova
pools = cli.floating_ip_pools.list()
objs = cli.floating_ips.list()
unassigned = [
dict(
id=CATALOG_NEW_FLOATING_IP + x.name,
label=CATALOG_NEW_FLOATING_IP_DESC.format(pool=x.name),
pool=x.name)
for x in pools ]
for obj in objs:
if obj.instance_id:
continue
unassigned.append(dict(id=obj.id,
label= "%s in pool %s" % (obj.ip, obj.pool),
pool=obj.pool,
ip=obj.ip))
unassigned.sort(key=lambda x: x.get('ip'))
return unassigned
def _cliGetKeyPairs(self):
try:
rs = self.client.nova.keypairs.list()
except:
raise
return [ (x.id, x.name) for x in rs ]
def _cliGetNetworks(self):
networks = self.client.nova.networks.list()
networks.sort(key=lambda x: x.label.lower())
if not networks:
raise errors.ParameterError("No networks defined, please create one")
return networks
def _imageDeploymentSpecifcDescriptorFields(self, descr, **kwargs):
pass
# TODO: remove when novaclient has caught up to v1.1.
# This pulls a resource id from from a resource ref url
def _get_id_from_ref(self, resource_ref):
return resource_ref.split('/')[-1]
@classmethod
def _idFromRef(cls, ref):
if ref is None:
return ref
if isinstance(ref, int):
return str(ref)
# Grab the last part of the URL and return it
return os.path.basename(ref)
def drvGetInstances(self, instanceIds, force=False):
client = self.client.nova
cloudAlias = self.getCloudAlias()
instanceList = instances.BaseInstances()
images = self.getAllImages()
# Hash images so we can quickly return a ref
imagesMap = dict((self._idFromRef(image.opaqueId), image)
for image in images if hasattr(image, 'opaqueId'))
servers = sorted(client.servers.list(), key=self.sortKey)
for server in servers:
instanceId = str(server.id)
imageId = None
imgobj = server.image
if imgobj:
imageRef = self._idFromRef(imgobj['id'])
image = imagesMap.get(imageRef)
if image:
imageId = image.id
publicDnsName = privateDnsName = None
if server.addresses.values():
addrList = server.addresses.values()[0]
floatingAddrs = [ x['addr'] for x in addrList if x['OS-EXT-IPS:type'] == 'floating' ]
fixedAddrs = [ x['addr'] for x in addrList if x['OS-EXT-IPS:type'] == 'fixed' ]
if floatingAddrs:
publicDnsName = floatingAddrs[0]
if fixedAddrs:
privateDnsName = fixedAddrs[0]
inst = self._nodeFactory.newInstance(id = instanceId,
imageId = imageId,
instanceId = instanceId,
instanceName = server.name,
instanceDescription = server.name,
dnsName = 'UNKNOWN',
publicDnsName = publicDnsName,
privateDnsName = privateDnsName,
state = server.status,
launchTime = server.created if hasattr(server, 'created') else None,
cloudName = self.cloudName,
cloudAlias = cloudAlias)
instanceList.append(inst)
instanceList.sort(key = lambda x: (x.getState(), x.getInstanceId()))
return self.filterInstances(instanceIds, instanceList)
@classmethod
def _getServerAddressByType(cls, server, addressType):
if not server.addresses:
return None
addrlist = server.addresses.get(addressType)
if not addrlist:
return None
return addrlist[0]['addr']
def getLaunchInstanceParameters(self, image, descriptorData):
params = baseDriver.BaseDriver.getLaunchInstanceParameters(self,
image, descriptorData)
getField = descriptorData.getField
srUuid = getField('storageRepository')
params['srUuid'] = srUuid
return params
def deployImageProcess(self, job, image, auth, **params):
# RCE-1751: always redeploy.
if 0 and image.getIsDeployed():
self._msg(job, "Image is already deployed")
return image.getImageId()
ppop = params.pop
imageName = ppop('imageName')
cloudConfig = self.getTargetConfiguration()
nameLabel = image.getLongName()
nameDescription = image.getBuildDescription()
self._deployImage(job, image, auth, imageName=imageName)
self._msg(job, 'Image deployed')
return image.getImageId()
def launchInstanceProcess(self, job, image, auth, **launchParams):
ppop = launchParams.pop
instanceName = ppop('instanceName')
instanceDescription = ppop('instanceDescription')
flavorRef = ppop('flavor')
networkRef = ppop('network')
zoneRef = ppop('availabilityZone')
floatingIp = ppop('floatingIp')
if floatingIp.startswith(CATALOG_NEW_FLOATING_IP):
poolName = floatingIp[len(CATALOG_NEW_FLOATING_IP):]
floatingIp = self.client.nova.floating_ips.create(pool=poolName)
else:
floatingIp = self.client.nova.floating_ips.get(floatingIp)
keyName = ppop('keyName', None)
cloudConfig = self.getTargetConfiguration()
nameLabel = image.getLongName()
nameDescription = image.getBuildDescription()
imageName = image.getBaseFileName()
if not image.getIsDeployed():
imageId = self._deployImage(job, image, auth, imageName=imageName)
else:
imageId = getattr(image, 'opaqueId')
job.addHistoryEntry('Launching')
instId = self._launchInstanceOnTarget(job, instanceName, imageId,
flavorRef, keyName, floatingIp, zoneRef, networkRef)
return [ instId ]
@classmethod
def sortKey(cls, x):
return x.id
def getImagesFromTarget(self, imageIdsFilter):
cloudAlias = self.getCloudAlias()
client = self.client.nova
ret = []
images = sorted(client.images.list(detailed=True), key=self.sortKey)
for image in images:
# image.id is numeric
imageId = str(image.id)
imageName = image.name
img = self._nodeFactory.newImage(
id = imageId,
imageId = imageId,
isDeployed = True,
is_rBuilderImage = False,
shortName = imageName,
productName = imageName,
longName = imageName,
cloudName = self.cloudName,
cloudAlias = cloudAlias)
img.opaqueId = self._getLinkRel(image, 'self')
ret.append(img)
return self.filterImages(imageIdsFilter, ret)
@classmethod
def _getLinkRelFromList(cls, list, rel):
for link in list:
if link['rel'] == rel:
return link['href']
return None
@classmethod
def _getLinkRel(cls, obj, rel):
return cls._getLinkRelFromList(obj.links, rel)
def _getImageDiskFormat(self):
return 'raw'
def _getImageContainerFormat(self):
return 'bare'
def _importImage(self, job, imageMetadata, fileObj):
job.addHistoryEntry('Creating image')
glanceImage = self.client.glance.images.create(**imageMetadata)
job.addHistoryEntry('Uploading image content')
self.client.glance.images.upload(glanceImage.id, fileObj)
return str(glanceImage.id)
def _deployImageFromFile(self, job, image, path, *args, **kwargs):
imageName = kwargs.get('imageName', image.getShortName())
try:
job.addHistoryEntry('Uncompressing image')
logger = lambda *x: self._msg(job, *x)
archive = baseDriver.Archive(path, logger)
archive.extract()
archiveMembers = list(archive)
assert len(archiveMembers) == 1
member = archiveMembers[0]
fobj = archive.extractfile(member)
job.addHistoryEntry('Importing image')
imageDiskFormat = self._getImageDiskFormat()
imageContainerFormat = self._getImageContainerFormat()
imageMetadata = {'name':imageName, 'disk_format':imageDiskFormat,
'container_format':imageContainerFormat}
imageId = self._importImage(job, imageMetadata, fobj)
finally:
pass
return imageId
def _launchInstanceOnTarget(self, job, name, imageRef, flavorRef, keyName, floatingIp, zoneRef, networkRef):
client = self.client.nova
server = client.servers.create(name, imageRef, flavorRef,
key_name=keyName, nics=[{'net-id' : networkRef}],
availability_zone=zoneRef)
for i in range(20):
if server.status == 'ACTIVE':
break
job.addHistoryEntry('Waiting for server to become active')
time.sleep(2*i + 1)
server = client.servers.get(server)
server.add_floating_ip(floatingIp)
return str(server.id)
|
apache-2.0
| 6,831,616,599,087,930,000 | 36.056 | 156 | 0.60095 | false | 4.227049 | true | false | false |
openjck/apod-scraper
|
scraper.py
|
1
|
6524
|
#!/usr/bin/env python
import bleach
from bs4 import BeautifulSoup
from collections import OrderedDict
from dateutil import parser
import regex
import requests
import scraperwiki
import urlparse
class Page:
def __init__(self, path, basename, encoding):
self.path = path
self.basename = basename
self.encoding = encoding
self.url = path + basename
class Archive(Page):
def __init__(self, path, basename, encoding):
Page.__init__(self, path, basename, encoding)
@property
def links(self):
link_re = 'ap[0-9]+\.html'
soup = make_soup(self.url, self.encoding, parser='html.parser')
return soup.find_all(href=regex.compile(link_re))
class Entry(Page):
def __init__(self, path, basename, encoding, link):
Page.__init__(self, path, basename, encoding)
self.link = link
@property
def entry_url(self):
return self.url
@property
def date(self):
date_raw = self.link.previous_sibling[:-3]
date = parser.parse(date_raw).strftime('%Y-%m-%d')
return unicode(date, 'UTF-8')
@property
def title(self):
return self.link.text
@property
def credit(self):
soup = self.get_soup()
html = str(soup)
# The credit information is always below the title. Sometimes the title
# on the picture page is slightly different from the title on the index
# page, however, so fuzzy matching is used here to account for any
# differences.
match = regex.search('<b>\s*?(?:{0}){{e<={1}}}\s*?<(?:\/b|br.*?)>(.*?)<p>'.format(regex.escape(self.link.text.encode('UTF-8')), int(float(len(self.link.text)) * 0.25)), html, regex.DOTALL | regex.IGNORECASE)
if not match:
# If the above fails for some reason, one last attempt will be made
# to locate the credit information by searching between the title
# and the explanation.
match = regex.search('<b>.*?<(?:\/b|br.*?)>(.*?)<p>.*?<(?:b|h3)>\s*?Explanation(?::)?\s*?<\/(?:b|h3)>(?::)?', html, regex.DOTALL | regex.IGNORECASE)
if match:
# Remove all tags except the anchor tags, and remove all excess
# whitespace characters.
credit = ' '.join(bleach.clean(match.group(1), tags=['a'], attributes={'a': ['href']}, strip=True).split())
else:
credit = '';
return credit
@property
def explanation(self):
soup = self.get_soup()
html = str(soup)
match = regex.search('<(?:b|h3)>\s*?Explanation(?::)?\s*?<\/(?:b|h3)>(?::)?(.*?)<p>', html, regex.DOTALL | regex.IGNORECASE)
if match:
explanation = ' '.join(bleach.clean(match.group(1), tags=['a'], attributes={'a': ['href']}, strip=True).split())
else:
explanation = ''
return explanation
@property
def picture_thumbnail_url(self):
soup = self.get_soup()
picture_thumbail_link = soup.find('img', src=regex.compile('image/'))
# Check if there is a smaller version of the picture on the page.
if picture_thumbail_link:
picture_thumbnail_url = self.path + picture_thumbail_link['src']
else:
picture_thumbnail_url = ''
return unicode(picture_thumbnail_url, 'UTF-8')
@property
def picture_url(self):
soup = self.get_soup()
picture_link = soup.find('a', href=regex.compile(self.path.replace('.', '\.') + 'image/'))
# Check if there is a higher-resolution link to the picture.
if picture_link:
picture_url = picture_link['href']
else:
picture_url = ''
return unicode(picture_url, 'UTF-8')
@property
def video_url(self):
soup = self.get_soup()
video_link = soup.find('iframe')
if video_link:
video_url = video_link['src']
else:
video_url = ''
return unicode(video_url, 'UTF-8')
# Cache the soup.
def get_soup(self):
if not hasattr(self, 'soup'):
self.soup = make_soup(self.url, self.encoding, True, self.path)
return self.soup
def make_soup(url, encoding, absolute=False, base='', parser='lxml'):
html = requests.get(url)
if parser:
soup = BeautifulSoup(html.content, parser, from_encoding=encoding)
else:
soup = BeautifulSoup(html.content, from_encoding=encoding)
# Make all links absolute.
# http://stackoverflow.com/a/4468467/715866
if absolute:
for a in soup.find_all('a', href=True):
a['href'] = urlparse.urljoin(base, a['href'])
return soup
def save(url, date, title, credit, explanation, picture_thumbnail_url, picture_url, video_url, data_version):
data = OrderedDict()
data['url'] = url;
data['date'] = date;
data['title'] = title;
data['credit'] = credit;
data['explanation'] = explanation;
data['picture_thumbnail_url'] = picture_thumbnail_url;
data['picture_url'] = picture_url;
data['video_url'] = video_url;
data_versions = OrderedDict()
data_versions['url'] = url;
data_versions['data_version'] = data_version;
scraperwiki.sql.save(['url'], data)
scraperwiki.sql.save(['url'], data_versions, table_name='data_versions')
def table_exists(table):
try:
scraperwiki.sql.select('* FROM %s' % table)
return True
except:
return False
def main():
# Change this number when the scraping algorithm changes. All pages will be
# re-scraped.
version = '1.1.1'
path = 'http://apod.nasa.gov/apod/'
site_encoding = 'windows-1252'
archive = Archive(path, 'archivepix.html', site_encoding)
versions = table_exists('data_versions')
for link in archive.links:
entry = Entry(path, link['href'], site_encoding, link)
if versions:
result = scraperwiki.sql.select('url, data_version FROM data_versions WHERE url = "%s" LIMIT 1' % entry.entry_url)
# Only scrape and save the page if it contains a picture or video and
# if it has not already been scraped at this version.
if (not versions or not result or result[0]['data_version'] != version) and (entry.picture_thumbnail_url or entry.video_url):
save(entry.entry_url, entry.date, entry.title, entry.credit, entry.explanation, entry.picture_thumbnail_url, entry.picture_url, entry.video_url, data_version=version)
if __name__ == '__main__':
main()
|
gpl-3.0
| -8,320,870,269,722,252,000 | 31.949495 | 215 | 0.601625 | false | 3.75374 | false | false | false |
kyxw007/MovieRename
|
kyxw007/FileTool.py
|
1
|
1084
|
# coding=utf-8
import os
import Utils, Rename
root_dir = "/Volumes/XiaoMi-usb0/下载"
tool = Rename.RenameTool()
def get_suffix(file_name):
index = file_name.rfind('.')
return file_name[index:len(file_name)]
def folder_rename(root_dir):
file_list = os.listdir(root_dir)
for file_name in filter(Utils.hasChinese, file_list):
print("老文件名:", file_name)
tool.find_fitness_movie(file_name)
new_file_name = tool.new_filename + get_suffix(file_name)
print("新文件名:", new_file_name)
os.rename(root_dir + "/" + file_name, root_dir + "/" + new_file_name)
def single_rename(path, file_name):
print("老文件名:", file_name)
tool.find_fitness_movie(file_name)
new_file_name = tool.new_filename + get_suffix(file_name)
print("新文件名:", new_file_name)
os.rename(path + "/" + file_name, path + "/" + new_file_name)
single_rename("/Volumes/XiaoMi-usb0/下载", "火星救援.The Martian.2015.评分[8.4].主演[马特·达蒙].导演[雷德利·斯科特].Mp4Ba")
# folder_rename(root_dir)
|
apache-2.0
| 1,555,840,494,825,192,000 | 27.628571 | 101 | 0.645709 | false | 2.486352 | false | false | false |
srmnitc/tis-tools
|
runscripts/make_q4q6_dist/make_q4q6_dist.py
|
1
|
13555
|
import os
import sys
import subprocess as sub
import numpy as np
import time
import logging
import tistools_helpers.tistools_helpers as tistools_helpers
#SRM:set up logger for the general error messages
logger = logging.getLogger(__name__)
handler = logging.FileHandler("analysis.log")
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
logger.propagate = False
#workdir
workdir = '/home/users/menonsqr/storage/HCP19/tis_run'
seedfileaddress = '/home/users/menonsqr/SeedHCP19/seed.dat'
binary = '/home/users/menonsqr/tis-tools.git/trunk/runscripts/make_q4q6_dist/orderparameter/main'
tstcluster = 413
maxconfs=10
#create helpers class
helpers = tistools_helpers.tistools_helpers()
#class for seed
class Seed(object):
def __init__(self,seedfileaddress):
#self.atoms=np.empty([natoms,5])
self.seedfile=seedfileaddress
self.exists=True
#read the positions of seed atoms
def ReadSeed(self,read=True,idlist=None):
self.seedids = []
if read==True:
if os.path.isfile(self.seedfile) and os.path.getsize(self.seedfile) > 0:
for line in open(self.seedfile):
self.seedids.append(int(line.strip()))
self.atoms=np.empty([len(self.seedids),7])
else:
self.exists=False
else:
self.seedids=idlist
if len(self.seedids)>0:
self.atoms=np.empty([len(self.seedids),7])
else:
self.exists=False
#populate the positions of seed atoms
def PopulateSeed(self,filename,read=True):
if read==True:
atoms = read_alles(filename)
else:
atoms = filename
#add the atoms and positions
k = 0
for i in range(len(atoms)):
if atoms[i][0] in self.seedids:
self.atoms[k][0]=atoms[i][0]
self.atoms[k][1]=atoms[i][1]
self.atoms[k][2]=atoms[i][2]
self.atoms[k][3]=atoms[i][3]
self.atoms[k][4]=atoms[i][4]
self.atoms[k][5]=atoms[i][5]
self.atoms[k][6]=atoms[i][6]
k+=1
def CalculateDistances(self,otheratoms):
loo = []
for atom in self.atoms:
dist = []
for oatom in otheratoms.atoms:
#print 'seed'
#print seedatom[0]
a = oatom[1]
b = oatom[2]
c = oatom[3]
distance = np.sqrt((a-atom[1])**2 + (b-atom[2])**2 + (c-atom[3])**2 )
dist.append(distance)
mindist=min(dist)
#print mindist
#print (mindist<1e-5)
if mindist<1e-5:
#print "oh"
mindist=0.00
atom[4]=mindist
loo.append(mindist)
return loo
#function to read dump files
def read_alles(filename,filetype="dump"):
if (filetype=="dump"):
#ninenumber of lines are not required
#after that column 0,3,4,5 to be read.
count = 0
data = []
print "reading atom file"
for line in open(filename,'r'):
data.append(line)
boxsizelist = []
natoms = int(data[3])
#atoms values are as follows
# 0 : id
# 1,2,3 : x,y,z
# 4 : whichever distance value
# 5,6 : avg q4 and q6 respectively
# #total of seven parameters
atoms = np.empty([natoms,7])
i = 0
for line in data:
if (count==5) or (count==6) or (count==7):
raw = line.split()
boxsizelist.append(float(raw[0]))
boxsizelist.append(float(raw[1]))
elif (count>8):
raw = line.split()
atoms[i][0] = int(raw[0])
atoms[i][1] = float(raw[3])
atoms[i][2] = float(raw[4])
atoms[i][3] = float(raw[5])
atoms[i][4] = 99999.00
atoms[i][5] = 99999.00
atoms[i][6] = 99999.00
#atoms[i][4] = False
i+=1
count+=1
#print atoms
#print boxsizelist
return atoms
#main function that is to be called
def MakeStructureHistogram(pathtype,manual=False,gzip=False):
"""
special function to make histograms
hardcoded. Remove at some point.
"""
tmpfile = 'my_tmp'
snapshots=1
#set up histograms
distance1 = []
distance2 = []
distance3 = []
distance4 = []
distance5 = []
distance6 = []
distance7 = []
alle = []
if manual==False:
interfacelist = helpers.generate_intflist()
else:
interfacelist = helpers.read_intflist()
for interface in interfacelist:
if snapshots>maxconfs:
break
interface = interface.strip()
intfpath = os.path.join(os.getcwd(),"tis","la",interface)
intfpath = intfpath.strip()
pathpath = os.path.join(intfpath,pathtype+".dat")
pathpath = pathpath.strip()
pathlist = []
filenamelist = []
#we get the list of all paths that needs to be analysed
for path in open(pathpath,'r'):
pathlist.append(path)
print "finalised paths"
#may the analysis start
for path in pathlist:
if snapshots>maxconfs:
break
path = path.strip()
pathpath= os.path.join(intfpath,path)
identifier = interface+path
#we are in the folder now
#we have to read the actual trajectory
actualtraj = os.path.join(workdir,'tis','la',interface,path)
data = helpers.combine_paths_return(actualtraj,gzip=gzip)
print "read paths"
#we have the data on standby
#time to read the output raw data histo file.
histofile = os.path.join(pathpath,(identifier+'.histo.list'))
histodataslices = []
histodata = []
count=0
#print "ll"
#print histofile
if os.path.exists(histofile):
#print histofile
#print "kkll"
for line in open(histofile,'r'):
histodata.append(line.strip())
count+=1
if count==12:
histodataslices.append(histodata)
histodata = []
count =0
else:
continue
print "read histolists"
#loooping over each slice in the trajectory
for i in range(len(histodataslices)):
#print snapshots
bccids = map(int,histodataslices[i][3].split())
fccids = map(int,histodataslices[i][5].split())
hcpids = map(int,histodataslices[i][7].split())
udfids = map(int,histodataslices[i][9].split())
surids = map(int,histodataslices[i][11].split())
nucsize = len(bccids)+len(fccids)+len(hcpids)+len(udfids)
#print fccids
#print nucsize
#check if the guy should be part of histo, and which histo
if (nucsize <= tstcluster+3) and (nucsize >= tstcluster-3):
if snapshots>maxconfs:
break
snapshots+=1
print "value found"
tmpfile = os.path.join(os.getcwd(),identifier+'.temp')
outfile = open(tmpfile,'w')
for j in range(len(data[i])):
outfile.write(data[i][j])
outfile.flush()
outfile.close()
print "applying op"
#apply order parameter and read histo stuff
cmd = [binary,tmpfile]
proc = sub.Popen(cmd, stdin=sub.PIPE,stdout=sub.PIPE,stderr=sub.PIPE)
out,err = proc.communicate()
proc.wait()
print "reading the atoms"
#read the slice
#modify read alles to read in the q4 q6 too.- done
atoms = read_alles(tmpfile)
os.system(('rm %s')% tmpfile)
print "setting up seed"
#set up the seed classes
seed = Seed(seedfileaddress)
seed.ReadSeed()
seed.PopulateSeed(atoms,read=False)
#delete the seed particles from the lists
bccids = [x for x in bccids if x not in seed.seedids]
fccids = [x for x in fccids if x not in seed.seedids]
hcpids = [x for x in hcpids if x not in seed.seedids]
udfids = [x for x in udfids if x not in seed.seedids]
#set up surface class
surface = Seed('dummy')
surface.ReadSeed(read=False,idlist=surids)
if surface.exists:
surface.PopulateSeed(atoms,read=False)
#find udf ids in surface
#udfsurids = [x for x in udfids if x in surids]
udfsurids = [x for x in hcpids if x in surids]
#udfcoreids = [x for x in udfids if x not in surids]
udfcoreids = [x for x in hcpids if x not in surids]
print "populating seeds"
#set up UDF class
udfsur = Seed('dummy')
udfsur.ReadSeed(read=False,idlist=udfsurids)
if udfsur.exists:
udfsur.PopulateSeed(atoms,read=False)
udfcore = Seed('dummy')
udfcore.ReadSeed(read=False,idlist=udfcoreids)
if udfcore.exists:
udfcore.PopulateSeed(atoms,read=False)
print "reading q4q6files"
qlist = []
for line in open('result1.dat','r'):
line = line.strip()
raw = line.split()
dummy = [int(raw[0]),float(raw[1]),float(raw[2])]
qlist.append(dummy)
print "trimming q4q6files"
qlistcore = [ pos for pos in qlist if pos[0] in udfcoreids ]
print "assingning pos values to atoms"
for atomito in udfcore.atoms:
for pos in qlistcore:
if atomito[0]==pos[0]:
atomito[5]=pos[1]
atomito[6]=pos[2]
break
print "calculating distances"
#seeds are populated. Now find distance of each atom to the surface.
udfcore.CalculateDistances(surface)
print "making distance lists"
#now add the points to the arrays.
for atomcito in udfcore.atoms:
if atomcito[4]<=1.0:
distance1.append([atomcito[5],atomcito[6]])
elif atomcito[4]<=2.0:
distance2.append([atomcito[5],atomcito[6]])
elif atomcito[4]<=3.0:
distance3.append([atomcito[5],atomcito[6]])
elif atomcito[4]<=4.0:
distance4.append([atomcito[5],atomcito[6]])
elif atomcito[4]<=5.0:
distance5.append([atomcito[5],atomcito[6]])
elif atomcito[4]<=6.0:
distance6.append([atomcito[5],atomcito[6]])
elif atomcito[4]<=7.0:
distance7.append([atomcito[5],atomcito[6]])
else:
print "jsjsjsj"
alle.append([atomcito[5],atomcito[6]])
print "finished slice"
print snapshots
#write out the files
print "writing distance lists"
fout = open('distance1.dat','w')
for i in range(len(distance1)):
fout.write(("%f %f\n")%(distance1[i][0],distance1[i][1]))
fout.close()
fout = open('distance2.dat','w')
for i in range(len(distance2)):
fout.write(("%f %f\n")%(distance2[i][0],distance2[i][1]))
fout.close()
fout = open('distance3.dat','w')
for i in range(len(distance3)):
fout.write(("%f %f\n")%(distance3[i][0],distance3[i][1]))
fout.close()
fout = open('distance4.dat','w')
for i in range(len(distance4)):
fout.write(("%f %f\n")%(distance4[i][0],distance4[i][1]))
fout.close()
fout = open('distance5.dat','w')
for i in range(len(distance5)):
fout.write(("%f %f\n")%(distance5[i][0],distance5[i][1]))
fout.close()
fout = open('distance6.dat','w')
for i in range(len(distance6)):
fout.write(("%f %f\n")%(distance6[i][0],distance6[i][1]))
fout.close()
fout = open('distance7.dat','w')
for i in range(len(distance7)):
fout.write(("%f %f\n")%(distance7[i][0],distance7[i][1]))
fout.close()
fout = open('alle.dat','w')
for i in range(len(alle)):
fout.write(("%f %f\n")%(alle[i][0],alle[i][1]))
fout.close()
print "finishing up"
if __name__=='__main__':
MakeStructureHistogram('AB',manual=False,gzip=True)
|
gpl-3.0
| 3,505,470,412,904,452,600 | 33.579082 | 115 | 0.507562 | false | 3.752769 | false | false | false |
galbiati/video-representations
|
models/ATWModel.py
|
1
|
3142
|
import tensorflow as tf
from tensorflow.python.framework import ops
from model import Model
class ATWModel(Model):
"""
ATWModel implements a variant on the E-T-D model from model.Model()
Instead of doing next frame prediction, ATW attempts to encode the entire
sequence, then reproduce the video from only the final latent vectors.
__init__ args:
:encoder is a function that returns a batch of image encodings (rank 2 tensor)
:cell is a recurrent neural network cell that can be passed to tf.nn.rnn_cell.dynamic_rnn
:decoder is a function that returns a batch of decoded images (rank 4 tensor)
:latent_size is the size of the latent space
:activation is the activation function for the LSTM cells
:batchsize is the size of batches (necessary for proper reshaping)
:seqlen is the length of sequences (necessary for proper reshaping)
"""
def __init__(self, encoder, cell, decoder,
latent_size, activation,
batchsize, seqlen):
self.latent_size = latent_size
self.encoder = lambda inputs: encoder(inputs, latent_size=latent_size)
self.cell_fw = cell(num_units=latent_size, activation=activation)
self.cell_bw = cell(num_units=latent_size, activation=activation)
self.decoder = decoder
self.batchsize = batchsize
self.seqlen = seqlen
self.stacked_shape = (batchsize*seqlen, 60, 80, 3)
self.sequence_shape = (batchsize, seqlen, 60, 80, 3)
def build(self, inputs, reuse=False):
with tf.variable_scope('encoder', reuse=reuse):
inputs = self.stack(inputs)
encoded = self.encoder(inputs)
encoded = self.unstack(encoded)
with tf.variable_scope('lstm', reuse=reuse):
# initialize hidden state with ones instead of zeros to ensure pass-through at start
initial_state = tfrnn.LSTMStateTuple(
tf.ones((self.batchsize, self.latent_size)),
tf.zeros((self.batchsize, self.latent_size))
)
# encoder pass
_, seeds = tf.nn.dynamic_rnn(
self.cell_fw, encoded,
initial_state=initial_state,
sequence_length=[self.seqlen]*self.batchsize,
dtype=tf.float32, swap_memory=True,
)
# decoder pass
def rnn_step(next_tuple, next_elem):
input, state = next_tuple
output, next_state = self.cell_fw(input, state)
return (output, next_state)
state = seeds
next_input = state[1]
elems = np.arange(self.seqlen)
outputs, states = tf.scan(
rnn_step, elems, (next_input, state),
swap_memory=True
)
transitioned = tf.transpose(outputs, (1, 0, 2))
transitioned_ = self.stack(transitioned)
with tf.variable_scope('encoder', reuse=True):
decoded = self.decoder(transitioned_)
decoded = self.unstack(decoded)
return encoded, transitioned, decoded
|
mit
| -998,173,920,038,844,300 | 37.790123 | 96 | 0.610121 | false | 4.156085 | false | false | false |
jhermann/rituals
|
src/rituals/util/antglob.py
|
1
|
6871
|
# -*- coding: utf-8 -*-
# pylint: disable=too-few-public-methods
""" Recursive globbing with ant-style syntax.
"""
#
# The MIT License (MIT)
#
# Original source (2014-02-17) from https://github.com/zacherates/fileset.py
# Copyright (c) 2012 Aaron Maenpaa
#
# Modifications at https://github.com/jhermann/rituals
# Copyright ⓒ 2015 Jürgen Hermann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, unicode_literals, print_function
import os
import re
from ._compat import string_types
# TODO: allow '?'
# TODO: matching for Windows? (need to canonize to forward slashes in 'root')
__all__ = ['FileSet', 'includes', 'excludes']
def glob2re(part):
"""Convert a path part to regex syntax."""
return "[^/]*".join(
re.escape(bit).replace(r'\[\^', '[^').replace(r'\[', '[').replace(r'\]', ']')
for bit in part.split("*")
)
def parse_glob(pattern):
"""Generate parts of regex transformed from glob pattern."""
if not pattern:
return
bits = pattern.split("/")
dirs, filename = bits[:-1], bits[-1]
for dirname in dirs:
if dirname == "**":
yield "(|.+/)"
else:
yield glob2re(dirname) + "/"
yield glob2re(filename)
def compile_glob(spec):
"""Convert the given glob `spec` to a compiled regex."""
parsed = "".join(parse_glob(spec))
regex = "^{0}$".format(parsed)
return re.compile(regex)
class Pattern():
"""A single pattern for either inclusion or exclusion."""
def __init__(self, spec, inclusive):
"""Create regex-based pattern matcher from glob `spec`."""
self.compiled = compile_glob(spec.rstrip('/'))
self.inclusive = inclusive
self.is_dir = spec.endswith('/')
def __str__(self):
"""Return inclusiveness indicator and original glob pattern."""
return ('+' if self.inclusive else '-') + self.compiled.pattern
def matches(self, path):
"""Check this pattern against given `path`."""
return bool(self.compiled.match(path))
class FileSet():
""" Ant-style file and directory matching.
Produces an iterator of all of the files that match the provided patterns.
Note that directory matches must end with a slash, and if they're exclusions,
they won't be scanned (which prunes anything in that directory that would
otherwise match).
Directory specifiers:
** matches zero or more directories.
/ path separator.
File specifiers:
* glob style wildcard.
[chars] inclusive character sets.
[^chars] exclusive character sets.
Examples:
**/*.py recursively match all python files.
foo/**/*.py recursively match all python files in the 'foo' directory.
*.py match all the python files in the current directory.
*/*.txt match all the text files in top-level directories.
foo/**/* all files under directory 'foo'.
*/ top-level directories.
foo/ the directory 'foo' itself.
**/foo/ any directory named 'foo'.
**/.* hidden files.
**/.*/ hidden directories.
"""
def __init__(self, root, patterns):
if isinstance(patterns, string_types):
patterns = [patterns]
self.root = root
self.patterns = [i if hasattr(i, 'inclusive') else includes(i) for i in patterns]
def __repr__(self):
return "<FileSet at {0} {1}>".format(repr(self.root), ' '.join(str(i) for i in self. patterns))
def included(self, path, is_dir=False):
"""Check patterns in order, last match that includes or excludes `path` wins. Return `None` on undecided."""
inclusive = None
for pattern in self.patterns:
if pattern.is_dir == is_dir and pattern.matches(path):
inclusive = pattern.inclusive
#print('+++' if inclusive else '---', path, pattern)
return inclusive
def __iter__(self):
for path in self.walk():
yield path
def __or__(self, other):
return set(self) | set(other)
def __ror__(self, other):
return self | other
def __and__(self, other):
return set(self) & set(other)
def __rand__(self, other):
return self & other
def walk(self, **kwargs):
""" Like `os.walk` and taking the same keyword arguments,
but generating paths relative to the root.
Starts in the fileset's root and filters based on its patterns.
If ``with_root=True`` is passed in, the generated paths include
the root path.
"""
lead = ''
if 'with_root' in kwargs and kwargs.pop('with_root'):
lead = self.root.rstrip(os.sep) + os.sep
for base, dirs, files in os.walk(self.root, **kwargs):
prefix = base[len(self.root):].lstrip(os.sep)
bits = prefix.split(os.sep) if prefix else []
for dirname in dirs[:]:
path = '/'.join(bits + [dirname])
inclusive = self.included(path, is_dir=True)
if inclusive:
yield lead + path + '/'
elif inclusive is False:
dirs.remove(dirname)
for filename in files:
path = '/'.join(bits + [filename])
if self.included(path):
yield lead + path
def includes(pattern):
"""A single inclusive glob pattern."""
return Pattern(pattern, inclusive=True)
def excludes(pattern):
"""A single exclusive glob pattern."""
return Pattern(pattern, inclusive=False)
|
gpl-2.0
| 4,840,534,147,977,032,000 | 33.862944 | 116 | 0.600903 | false | 4.265839 | false | false | false |
DigiThinkIT/stem
|
test/unit/version.py
|
1
|
8982
|
"""
Unit tests for the stem.version.Version parsing and class.
"""
import unittest
import stem.util.system
import stem.version
from stem.version import Version
try:
# added in python 3.3
from unittest.mock import patch
except ImportError:
from mock import patch
TOR_VERSION_OUTPUT = """Mar 22 23:09:37.088 [notice] Tor v0.2.2.35 \
(git-73ff13ab3cc9570d). This is experimental software. Do not rely on it for \
strong anonymity. (Running on Linux i686)
Tor version 0.2.2.35 (git-73ff13ab3cc9570d)."""
class TestVersion(unittest.TestCase):
@patch('stem.util.system.call')
@patch.dict(stem.version.VERSION_CACHE)
def test_get_system_tor_version(self, call_mock):
call_mock.return_value = TOR_VERSION_OUTPUT.splitlines()
version = stem.version.get_system_tor_version()
self.assert_versions_match(version, 0, 2, 2, 35, None, 'git-73ff13ab3cc9570d')
self.assertEqual('73ff13ab3cc9570d', version.git_commit)
call_mock.assert_called_once_with('tor --version')
self.assertEqual({'tor': version}, stem.version.VERSION_CACHE)
def test_parsing(self):
"""
Tests parsing by the Version class constructor.
"""
# valid versions with various number of compontents to the version
version = Version('0.1.2.3-tag')
self.assert_versions_match(version, 0, 1, 2, 3, 'tag', None)
version = Version('0.1.2.3')
self.assert_versions_match(version, 0, 1, 2, 3, None, None)
version = Version('0.1.2-tag')
self.assert_versions_match(version, 0, 1, 2, None, 'tag', None)
version = Version('0.1.2')
self.assert_versions_match(version, 0, 1, 2, None, None, None)
# checks an empty tag
version = Version('0.1.2.3-')
self.assert_versions_match(version, 0, 1, 2, 3, '', None)
version = Version('0.1.2-')
self.assert_versions_match(version, 0, 1, 2, None, '', None)
# check with extra informaton
version = Version('0.1.2.3-tag (git-73ff13ab3cc9570d)')
self.assert_versions_match(version, 0, 1, 2, 3, 'tag', 'git-73ff13ab3cc9570d')
self.assertEqual('73ff13ab3cc9570d', version.git_commit)
version = Version('0.1.2.3-tag ()')
self.assert_versions_match(version, 0, 1, 2, 3, 'tag', '')
version = Version('0.1.2 (git-73ff13ab3cc9570d)')
self.assert_versions_match(version, 0, 1, 2, None, None, 'git-73ff13ab3cc9570d')
# checks invalid version strings
self.assertRaises(ValueError, stem.version.Version, '')
self.assertRaises(ValueError, stem.version.Version, '1.2.3.4nodash')
self.assertRaises(ValueError, stem.version.Version, '1.2.3.a')
self.assertRaises(ValueError, stem.version.Version, '1.2.a.4')
self.assertRaises(ValueError, stem.version.Version, '1x2x3x4')
self.assertRaises(ValueError, stem.version.Version, '12.3')
self.assertRaises(ValueError, stem.version.Version, '1.-2.3')
def test_comparison(self):
"""
Tests comparision between Version instances.
"""
# check for basic incrementing in each portion
self.assert_version_is_greater('1.1.2.3-tag', '0.1.2.3-tag')
self.assert_version_is_greater('0.2.2.3-tag', '0.1.2.3-tag')
self.assert_version_is_greater('0.1.3.3-tag', '0.1.2.3-tag')
self.assert_version_is_greater('0.1.2.4-tag', '0.1.2.3-tag')
self.assert_version_is_greater('0.1.2.3-ugg', '0.1.2.3-tag')
self.assert_version_is_equal('0.1.2.3-tag', '0.1.2.3-tag')
# check with common tags
self.assert_version_is_greater('0.1.2.3-beta', '0.1.2.3-alpha')
self.assert_version_is_greater('0.1.2.3-rc', '0.1.2.3-beta')
# checks that a missing patch level equals zero
self.assert_version_is_equal('0.1.2', '0.1.2.0')
self.assert_version_is_equal('0.1.2-tag', '0.1.2.0-tag')
# checks for missing patch or status
self.assert_version_is_greater('0.1.2.3-tag', '0.1.2.3')
self.assert_version_is_greater('0.1.2.3-tag', '0.1.2-tag')
self.assert_version_is_greater('0.1.2.3-tag', '0.1.2')
self.assert_version_is_equal('0.1.2.3', '0.1.2.3')
self.assert_version_is_equal('0.1.2', '0.1.2')
def test_nonversion_comparison(self):
"""
Checks that we can be compared with other types.
In python 3 on only equality comparisons work, greater than and less than
comparisons result in a TypeError.
"""
test_version = Version('0.1.2.3')
self.assertNotEqual(test_version, None)
self.assertNotEqual(test_version, 5)
def test_string(self):
"""
Tests the Version -> string conversion.
"""
# checks conversion with various numbers of arguments
self.assert_string_matches('0.1.2.3-tag')
self.assert_string_matches('0.1.2.3')
self.assert_string_matches('0.1.2')
def test_requirements_greater_than(self):
"""
Checks a VersionRequirements with a single greater_than rule.
"""
requirements = stem.version._VersionRequirements()
requirements.greater_than(Version('0.2.2.36'))
self.assertTrue(Version('0.2.2.36') >= requirements)
self.assertTrue(Version('0.2.2.37') >= requirements)
self.assertTrue(Version('0.2.3.36') >= requirements)
self.assertFalse(Version('0.2.2.35') >= requirements)
self.assertFalse(Version('0.2.1.38') >= requirements)
requirements = stem.version._VersionRequirements()
requirements.greater_than(Version('0.2.2.36'), False)
self.assertFalse(Version('0.2.2.35') >= requirements)
self.assertFalse(Version('0.2.2.36') >= requirements)
self.assertTrue(Version('0.2.2.37') >= requirements)
def test_requirements_less_than(self):
"""
Checks a VersionRequirements with a single less_than rule.
"""
requirements = stem.version._VersionRequirements()
requirements.less_than(Version('0.2.2.36'))
self.assertTrue(Version('0.2.2.36') >= requirements)
self.assertTrue(Version('0.2.2.35') >= requirements)
self.assertTrue(Version('0.2.1.38') >= requirements)
self.assertFalse(Version('0.2.2.37') >= requirements)
self.assertFalse(Version('0.2.3.36') >= requirements)
requirements = stem.version._VersionRequirements()
requirements.less_than(Version('0.2.2.36'), False)
self.assertFalse(Version('0.2.2.37') >= requirements)
self.assertFalse(Version('0.2.2.36') >= requirements)
self.assertTrue(Version('0.2.2.35') >= requirements)
def test_requirements_in_range(self):
"""
Checks a VersionRequirements with a single in_range rule.
"""
requirements = stem.version._VersionRequirements()
requirements.in_range(Version('0.2.2.36'), Version('0.2.2.38'))
self.assertFalse(Version('0.2.2.35') >= requirements)
self.assertTrue(Version('0.2.2.36') >= requirements)
self.assertTrue(Version('0.2.2.37') >= requirements)
self.assertFalse(Version('0.2.2.38') >= requirements)
# rule for 'anything in the 0.2.2.x series'
requirements = stem.version._VersionRequirements()
requirements.in_range(Version('0.2.2.0'), Version('0.2.3.0'))
for index in xrange(0, 100):
self.assertTrue(Version('0.2.2.%i' % index) >= requirements)
def test_requirements_multiple_rules(self):
"""
Checks a VersionRequirements is the logical 'or' when it has multiple rules.
"""
# rule to say 'anything but the 0.2.2.x series'
requirements = stem.version._VersionRequirements()
requirements.greater_than(Version('0.2.3.0'))
requirements.less_than(Version('0.2.2.0'), False)
self.assertTrue(Version('0.2.3.0') >= requirements)
self.assertFalse(Version('0.2.2.0') >= requirements)
for index in xrange(0, 100):
self.assertFalse(Version('0.2.2.%i' % index) >= requirements)
def assert_versions_match(self, version, major, minor, micro, patch, status, extra):
"""
Asserts that the values for a types.Version instance match the given
values.
"""
self.assertEqual(major, version.major)
self.assertEqual(minor, version.minor)
self.assertEqual(micro, version.micro)
self.assertEqual(patch, version.patch)
self.assertEqual(status, version.status)
self.assertEqual(extra, version.extra)
if extra is None:
self.assertEqual(None, version.git_commit)
def assert_version_is_greater(self, first_version, second_version):
"""
Asserts that the parsed version of the first version is greate than the
second (also checking the inverse).
"""
version1 = Version(first_version)
version2 = Version(second_version)
self.assertEqual(version1 > version2, True)
self.assertEqual(version1 < version2, False)
def assert_version_is_equal(self, first_version, second_version):
"""
Asserts that the parsed version of the first version equals the second.
"""
version1 = Version(first_version)
version2 = Version(second_version)
self.assertEqual(version1, version2)
def assert_string_matches(self, version):
"""
Parses the given version string then checks that its string representation
matches the input.
"""
self.assertEqual(version, str(Version(version)))
|
lgpl-3.0
| -1,618,298,573,717,635,800 | 34.223529 | 86 | 0.676353 | false | 3.205567 | true | false | false |
cadrev/Titanic-Prediction
|
data-munging.py
|
1
|
3412
|
#
# Title : Data munging(AKA cleaning) the Titanic Data
# Author : Felan Carlo Garcia
#
# Notes:
# -- Code is based on the Kaggle Python Tutorial
# -- data cleaning prior to implementing a machine learning algorithm.
import numpy as np
import pandas as pd
def processdata(filename, outputname):
df = pd.read_csv(filename,header=0)
# Make a new column 'Gender' and EmbarkedNum to convert the string
# information into an integer value.
# We do this because general machine learning algorithms do not
# work on string values.
df['Gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)
df['EmbarkedNum'] = df['Embarked'].map({'S': 0, 'C': 1, 'Q': 1}).astype(int)
# Executing the code:
# --print df[df['Age'].isnull()][Sex']-- shows that the titanic data contains
# some null values of the ages of the passengers.
# In this case, we can either drop the row or we can assign an arbitrary
# value to fill the missing data.
# For this code, arbitrary age data is obtained by using the median
# age data of the passengers. We make a new column 'AgeFill' and place
# the median data on the missing values instead of directly modifying
# the 'Age' column
df['AgeFill'] = df['Age']
for i in range(0, 2):
for j in range(0, 3):
median = df[(df['Gender'] == i) & (df['Pclass'] == j+1)]['Age'].dropna().median()
df.loc[ (df.Age.isnull()) & (df.Gender == i) & (df.Pclass == j+1),'AgeFill'] = median
# We add a new column 'AgeIsNull' to know which records has a missing
# values previously.
# We then interpolate the missing values from the 'Fare' column.
df['AgeIsNull'] = pd.isnull(df.Age).astype(int)
df['Fare'] = df['Fare'].interpolate()
# ------------- Feature Engineering Part --------------------
# Feature Engineering is the process of using domain/expert
# knowledge of the data to create features that make machine
# learning algorithms work better.
#
# In this case, studying the data shows that women and children
# have higher survival rates compared to men. Thus we add
# two additional features: 'Female' and 'Children', in an attempt
# to assist our learning model in its prediction.
# At the same time we add features Age*Class and FamilySize
# as additional engineered feature that may help our learning
# model
df['Children'] = df['AgeFill'].map(lambda x: 1 if x < 6.0 else 0)
df['Female'] = df['Gender'].map(lambda x: 1 if x == 0 else 0)
df['FamilySize'] = df['SibSp'] + df['Parch']
df['Age*Class'] = df['AgeFill'] * df['Pclass']
# Since most machine learning algorithms don't work on strings,
# we drop the columns in our pandas dataframe containing object
# datatypes.
# The code:
# --print df.dtypes[df.dtypes.map(lambda x: x=='object')]--
# will show which columns are made of object datatypes.
#
# In this case these are the following columns containing
# object.string:
# Age, Name, Sex, Ticket, Cabin, Embarked, Fare
#
# We drop the following objects columns along with the other data
# since they wont likely contribute to our machine learning
# prediction
df = df.drop(['Age','Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1)
df.to_csv(outputname, sep=',', index=False)
return df
def main():
print processdata('titanic-data-shuffled.csv', 'final-data.csv')
if __name__ == '__main__':
main()
|
mit
| 6,481,765,875,969,964,000 | 36.911111 | 91 | 0.663247 | false | 3.412 | false | false | false |
bfirsh/docker-py
|
setup.py
|
1
|
2327
|
#!/usr/bin/env python
import codecs
import os
import sys
from setuptools import setup, find_packages
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
'requests >= 2.5.2, != 2.11.0',
'six >= 1.4.0',
'websocket-client >= 0.32.0',
'docker-pycreds >= 0.2.1'
]
if sys.platform == 'win32':
requirements.append('pypiwin32 >= 219')
extras_require = {
':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
# While not imported explicitly, the ipaddress module is required for
# ssl_match_hostname to verify hosts match with certificates via
# ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname
':python_version < "3.3"': 'ipaddress >= 1.0.16',
}
version = None
exec(open('docker/version.py').read())
with open('./test-requirements.txt') as test_reqs_txt:
test_requirements = [line for line in test_reqs_txt]
long_description = ''
try:
with codecs.open('./README.rst', encoding='utf-8') as readme_rst:
long_description = readme_rst.read()
except IOError:
# README.rst is only generated on release. Its absence should not prevent
# setup.py from working properly.
pass
setup(
name="docker",
version=version,
description="A Python library for the Docker Engine API.",
long_description=long_description,
url='https://github.com/docker/docker-py',
packages=find_packages(exclude=["tests.*", "tests"]),
install_requires=requirements,
tests_require=test_requirements,
extras_require=extras_require,
zip_safe=False,
test_suite='tests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
maintainer='Joffrey F',
maintainer_email='[email protected]',
)
|
apache-2.0
| -7,816,205,614,684,412,000 | 30.026667 | 78 | 0.646755 | false | 3.699523 | true | false | false |
NERC-CEH/ecomaps
|
ecomaps/controllers/wmsviz.py
|
1
|
9385
|
# Copyright (C) 2007 STFC & NERC (Science and Technology Facilities Council).
# This software may be distributed under the terms of the
# Q Public License, version 1.0 or later.
# http://ndg.nerc.ac.uk/public_docs/QPublic_license.txt
#
"""
Controller for the 'View' tab - allowing the display of WMC map layers
@author C Byrom Feb 08, Modified D Lowe, May 09
"""
import logging
import urllib2
import urlparse
from cStringIO import StringIO
import xml.sax.saxutils as saxutils
#from ows_server.models import Utilities
from paste.httpexceptions import HTTPNotFound
from paste.request import parse_querystring
import ecomaps.lib.utils as utils
# ecomaps imports
from ecomaps.model import selectedItem
from ecomaps.lib.base import BaseController, response, config, request, c, session, render, abort
from ecomaps.lib.base import app_globals as g
from ecomaps.lib.wmc_util import GetWebMapContext, GetWebMapCapabilities, GetLegend, GetLegendUrl, GetFeatureInfo, openURL, GetResponse, parseEndpointString, getQueryParameter
from ecomaps.lib.build_figure import build_figure
from ecomaps.lib.status_builder import StatusBuilder
from ecomaps.lib.base import request
log = logging.getLogger(__name__)
class WmsvizController(BaseController):
_pilImageFormats = {
'image/png': 'PNG',
'image/jpg': 'JPEG',
'image/jpeg': 'JPEG',
'image/gif': 'GIF',
'image/tiff': 'TIFF'
}
indexTemplate = 'wmsviz.html'
def index(self):
"""
Default controller method to handle the initial requests to the page
"""
log.debug('entered wmsviz controller index action')
return HTTPNotFound
g.helpIcon='layout/icons/help.png' #needs to go in config
self.inputs=dict(parse_querystring(request.environ))
log.info(self.inputs)
c.wmcURL = ""
# check if all we're doing is removing a view item
if 'removeItem' in self.inputs:
return self.removeViewItem(self.inputs['removeItem'])
# check if we're doing an AJAX callback to get some WMC data
if 'REQUEST' in self.inputs:
if self.inputs['REQUEST'] == 'GetWebMapContext':
wmc= GetWebMapContext(self)
log.debug("finished wmsviz controller index action, req = GetWebMapContext")
return wmc
if self.inputs['REQUEST'] == 'GetWebMapCapabilities':
wmcDoc = GetWebMapCapabilities(self.inputs['ENDPOINT'])
response.headers['Content-Type'] = 'text/xml'
log.debug("finished wmsviz controller index action, req = GetWebMapCapabilities")
return wmcDoc
elif self.inputs['REQUEST'] == 'GetLegend':
resp = GetLegend(self)
log.debug("finished wmsviz controller index action, req = GetLegend")
return resp
elif self.inputs['REQUEST'] == 'GetLegendUrl':
resp = GetLegendUrl(self)
log.debug("finished wmsviz controller index action, req = GetLegendUrl")
return resp
if self.inputs['REQUEST'] == 'GetDisplayOptions':
jsonTxt = GetResponse(self.inputs['URL'])
response.headers['Content-Type'] = 'application/json'
log.debug("finished wmsviz controller index action, req = GetDisplayOptions")
return jsonTxt
if self.inputs['REQUEST'] == 'GetAxisConfig':
respText = GetResponse(self.inputs['URL'])
response.headers['Content-Type'] = 'text/xml'
return respText
if self.inputs['REQUEST'] == 'proxy':
# Client is requesting to use server as a proxy. Only forward the request if the
# request parameter value is for an acceptable request type.
url = self.inputs['URL']
requestType = getQueryParameter(url, 'request')
if requestType.lower() == 'getfeatureinfo':
try:
info = GetFeatureInfo(url)
except Exception, exc:
log.info("REQUEST:proxy Error making request to %s: %s" % (self.inputs['URL'], exc.__str__()))
info = "<p>Information is not available for this layer or position.</p>"
log.debug("finished wmsviz controller index action, req = GetFeatureInfo")
return "<FeatureInfo>" + saxutils.escape(info) + "</FeatureInfo>"
else:
log.info("Proxy forwarding refused for request of type %s to URL %s" % (requestType, url))
return None
#get server information from config file
g.server=config['app_conf']['serverurl']
statusBuilder = StatusBuilder()
status = statusBuilder.getCurrentStatus('wmsviz')
initialSetup = self._buildInitialSetup(self.inputs.get('ENDPOINT'))
session.save()
log.info('SAVED SESSION')
c.initialSetupJSON = utils.toJSON(initialSetup)
c.initialStatus = utils.toJSON(status)
log.debug("request.params = %s" % (request.params,))
log.debug("request.headers = %s" % (request.headers,))
log.debug("finished wmsviz controller index action")
return render(self.indexTemplate)
def _buildInitialSetup(self, endpointParam):
initialSetup = []
if endpointParam != None:
for ep in self.inputs['ENDPOINT'].split(','):
endpoint = {}
o = urlparse.urlparse(ep)
if o.path.find(':') > 0:
path = o.path[:o.path.find(':')]
url = "%(scheme)s://%(hostname)s%(port)s%(path)s" % {
'scheme' : o.scheme if o.scheme != None else '',
'hostname' : o.hostname if o.hostname != None else '',
'port' : ':' + str(o.port) if o.port != None else '',
'path': path,
}
layers = o.path[o.path.find(':')+1:].split('|')
endpoint['layers'] = layers
else:
url = ep
layers = ""
endpoint['url'] = url
initialSetup.append(endpoint)
return initialSetup
def addViewItem(self,endpoint):
"""
Add a selected item to the session
- if this is the first item, then display the selections tab
@param endpoint: WMC endpoint
"""
item = selectedItem.SelectedItem(None, None, None, endpoint)
selections = [item,]
# avoid duplicates
if 'viewItems' in session:
for selection in session['viewItems']:
if selection.wmcURL != endpoint:
selections.append(selection)
session['viewItems'] = selections
session.save()
def removeViewItem(self,endpoint):
"""
Remove view item from session data
- NB, do this by rebuilding the session data without the input data included
@param endpoint: The WMC endpoint of the view item to remove
"""
selections = []
for selection in session['viewItems']:
if selection.wmcURL != endpoint:
selections.append(selection)
# if the new list is empty, remove the session variable
if len(selections) == 0:
del session['viewItems']
c.UpdatePageTabs=1
else:
session['viewItems'] = selections
session.save()
def removeAllViewItems(self):
"""
Remove all old view items - clears out old endpoints
"""
session['viewItems']=[]
session.save()
def get_figure(self):
log.debug("running wmsvis.get_figure")
# Use .copy() on params to get a *writeable* MultiDict instance
params = request.params.copy()
log.debug("params = %s" % (params,))
# The response headers must be strings, not unicode, for modwsgi -
# ensure that the format is a string, omitting any non-ASCII
# characters.
format = params.pop('figFormat', 'image/png')
formatStr = format.encode('ascii', 'ignore')
finalImage = build_figure(params)
buffer = StringIO()
finalImage.save(buffer, self._pilImageFormats[formatStr])
response.headers['Content-Type'] = formatStr
# Remove headers that prevent browser caching, otherwise IE will not
# allow the image to be saved in its original format.
if 'Cache-Control' in response.headers:
del response.headers['Cache-Control']
if 'Pragma' in response.headers:
del response.headers['Pragma']
return buffer.getvalue()
|
gpl-2.0
| 131,777,740,538,252,640 | 36.242063 | 175 | 0.558977 | false | 4.593735 | false | false | false |
EtienneCmb/tensorpac
|
tensorpac/methods/meth_erpac.py
|
1
|
5370
|
"""Individual methods for assessing ERPAC."""
import numpy as np
from scipy.stats import chi2
from joblib import Parallel, delayed
from tensorpac.gcmi import nd_mi_gg
from tensorpac.config import CONFIG
def pearson(x, y, st='i...j, k...j->ik...'):
"""Pearson correlation for multi-dimensional arrays.
Parameters
----------
x, y : array_like
Compute pearson correlation between the multi-dimensional arrays
x and y.
st : string | 'i..j, k..j->ik...'
The string to pass to the np.einsum function.
Returns
-------
cov: array_like
The pearson correlation array.
"""
n = x.shape[-1]
# Distribution center :
mu_x = x.mean(-1, keepdims=True)
mu_y = y.mean(-1, keepdims=True)
# Distribution deviation :
s_x = x.std(-1, ddof=n - 1, keepdims=True)
s_y = y.std(-1, ddof=n - 1, keepdims=True)
# Compute correlation coefficient :
cov = np.einsum(st, x, y)
mu_xy = np.einsum(st, mu_x, mu_y)
cov -= n * mu_xy
cov /= np.einsum(st, s_x, s_y)
return cov
def erpac(pha, amp):
"""Event-Related Phase Amplitude Coupling.
This function computed the correlation coefficient between a circular and a
linear random variable at each time point and across trials. Adapted from
the function circ_corrcc Circular Statistics Toolbox for
Matlab By Philipp Berens, 2009 :cite:`berens2009circstat`. This function is
an adaptation of Voytek, 2013 :cite:`voytek2013method` for tensors.
Parameters
----------
pha, amp : array_like
Respectively the arrays of phases of shape (n_pha, ..., n_epochs) and
the array of amplitudes of shape (n_amp, ..., n_epochs).
Returns
-------
rho : array_like
Array of correlation coefficients of shape (n_amp, n_pha, ...)
pval : array_like
Array of p-values of shape (n_amp, n_pha, ...).
References
----------
Voytek et al. 2013 :cite:`voytek2013method`
"""
# Compute correlation coefficient for sin and cos independently
n = pha.shape[-1]
sa, ca = np.sin(pha), np.cos(pha)
rxs = pearson(amp, sa)
rxc = pearson(amp, ca)
rcs = pearson(sa, ca, st='i...j, k...j->i...')
rcs = rcs[np.newaxis, ...]
# Compute angular-linear correlation (equ. 27.47)
rho = np.sqrt((rxc**2 + rxs**2 - 2 * rxc * rxs * rcs) / (1 - rcs**2))
# Compute pvalue :
pval = 1. - chi2.cdf(n * rho**2, 2)
return rho, pval
def ergcpac(pha, amp, smooth=None, n_jobs=-1):
"""Event Related PAC using the Gaussian Copula Mutual Information.
This function assumes that phases and amplitudes have already been
prepared i.e. phases should be represented in a unit circle
(np.c_[np.sin(pha), np.cos(pha)]) and both inputs should also have been
copnormed.
Parameters
----------
pha, amp : array_like
Respectively arrays of phases of shape (n_pha, n_times, 2, n_epochs)
and the array of amplitudes of shape (n_amp, n_times, 1, n_epochs).
Returns
-------
erpac : array_like
Array of correlation coefficients of shape (n_amp, n_pha, n_times)
References
----------
Ince et al. 2017 :cite:`ince2017statistical`
"""
# get shapes
(n_pha, n_times, _, n_epochs), n_amp = pha.shape, amp.shape[0] # noqa
# compute mutual information across trials
ergcpac = np.zeros((n_amp, n_pha, n_times))
if isinstance(smooth, int):
# define the temporal smoothing vector
vec = np.arange(smooth, n_times - smooth, 1)
times = [slice(k - smooth, k + smooth + 1) for k in vec]
# move time axis to avoid to do it inside parallel
pha, amp = np.moveaxis(pha, 1, -2), np.moveaxis(amp, 1, -2)
# function to run in parallel across times
def _fcn(t): # noqa
_erpac = np.zeros((n_amp, n_pha), dtype=float)
xp, xa = pha[..., t, :], amp[..., t, :]
for a in range(n_amp):
_xa = xa.reshape(n_amp, 1, -1)
for p in range(n_pha):
_xp = xp.reshape(n_pha, 2, -1)
_erpac[a, p] = nd_mi_gg(_xp[p, ...], _xa[a, ...])
return _erpac
# run the function across time points
_ergcpac = Parallel(n_jobs=n_jobs, **CONFIG['JOBLIB_CFG'])(delayed(
_fcn)(t) for t in times)
# reconstruct the smoothed array
for a in range(n_amp):
for p in range(n_pha):
mean_vec = np.zeros((n_times,), dtype=float)
for t, _gc in zip(times, _ergcpac):
ergcpac[a, p, t] += _gc[a, p]
mean_vec[t] += 1
ergcpac[a, p, :] /= mean_vec
else:
for a in range(n_amp):
for p in range(n_pha):
ergcpac[a, p, ...] = nd_mi_gg(pha[p, ...], amp[a, ...])
return ergcpac
def swap_erpac_trials(pha):
"""Swap trials across the last dimension."""
tr_ = np.random.permutation(pha.shape[-1])
return pha[..., tr_]
def _ergcpac_perm(pha, amp, smooth=None, n_jobs=-1, n_perm=200):
def _ergcpac_single_perm():
p = swap_erpac_trials(pha)
return ergcpac(p, amp, smooth=smooth, n_jobs=1)
out = Parallel(n_jobs=n_jobs, **CONFIG['JOBLIB_CFG'])(delayed(
_ergcpac_single_perm)() for _ in range(n_perm))
return np.stack(out)
|
bsd-3-clause
| -5,151,203,058,610,020,000 | 32.987342 | 79 | 0.577281 | false | 3.27439 | false | false | false |
tobiasgoecke/django-messages
|
tests/settings.py
|
1
|
1335
|
import os.path
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django_messages'
]
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
SITE_ID = 1
SECRET_KEY = '+zzix-&k$afk-k0d0s7v01w0&15z#ne$71qf28#e$$c*@g742z'
ROOT_URLCONF = "urls"
DEBUG = True
STATIC_URL = '/static/'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(__file__), 'database.db'),
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
|
bsd-3-clause
| -2,423,822,688,440,526,300 | 25.7 | 71 | 0.632959 | false | 3.608108 | false | true | false |
bruckhaus/challenges
|
python_challenges/project_euler/p012_highly_divisible.py
|
1
|
2097
|
__author__ = 'tilmannbruckhaus'
import numpy
import sys
class HighlyDivisible:
# Highly divisible triangular number
# Problem 12
# The sequence of triangle numbers is generated by adding the natural numbers.
# So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28.
# The first ten terms would be:
#
# 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
#
# Let us list the factors of the first seven triangle numbers:
#
# 1: 1
# 3: 1,3
# 6: 1,2,3,6
# 10: 1,2,5,10
# 15: 1,3,5,15
# 21: 1,3,7,21
# 28: 1,2,4,7,14,28
# We can see that 28 is the first triangle number to have over five divisors.
#
# What is the value of the first triangle number to have over five hundred divisors?
#
# See: http://www.wikihow.com/Determine-the-Number-of-Divisors-of-an-Integer
def __init__(self):
pass
@staticmethod
def find(limit):
index = 1
candidate = 1
while True:
factors = HighlyDivisible.factor(candidate)
if HighlyDivisible.num_divisors(factors) > limit:
return candidate
index += 1
candidate += index
@staticmethod
def factor(candidate):
factors = []
for i in range(2, int(numpy.ma.sqrt(candidate)) + 1):
exponent = 0
while candidate % i == 0:
# i is a factor
exponent += 1
candidate /= i
if exponent > 0:
factors.append([i, exponent])
if candidate > 1:
# we are left with a prime:
factors.append([candidate, 1])
return factors
@staticmethod
def num_divisors(factors):
num_divisors = 1
for (divisor, exponent) in factors:
# see wikiHow link above
num_divisors *= exponent + 1
return num_divisors
if __name__ == '__main__':
count = 500
result = HighlyDivisible.find(count)
print "\nThe value of the first triangle number to have over", count, "divisors is", result
|
mit
| -1,834,102,411,324,664,800 | 27.337838 | 95 | 0.556986 | false | 3.731317 | false | false | false |
nathanielvarona/airflow
|
airflow/jobs/local_task_job.py
|
1
|
8884
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import signal
from typing import Optional
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.jobs.base_job import BaseJob
from airflow.models.taskinstance import TaskInstance
from airflow.stats import Stats
from airflow.task.task_runner import get_task_runner
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import provide_session
from airflow.utils.state import State
class LocalTaskJob(BaseJob):
"""LocalTaskJob runs a single task instance."""
__mapper_args__ = {'polymorphic_identity': 'LocalTaskJob'}
def __init__(
self,
task_instance: TaskInstance,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
mark_success: bool = False,
pickle_id: Optional[str] = None,
pool: Optional[str] = None,
*args,
**kwargs,
):
self.task_instance = task_instance
self.dag_id = task_instance.dag_id
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
self.task_runner = None
# terminating state is used so that a job don't try to
# terminate multiple times
self.terminating = False
super().__init__(*args, **kwargs)
def _execute(self):
self.task_runner = get_task_runner(self)
# pylint: disable=unused-argument
def signal_handler(signum, frame):
"""Setting kill signal handler"""
self.log.error("Received SIGTERM. Terminating subprocesses")
self.on_kill()
self.task_instance.refresh_from_db()
if self.task_instance.state not in State.finished:
self.task_instance.set_state(State.FAILED)
self.task_instance._run_finished_callback( # pylint: disable=protected-access
error="task received sigterm"
)
raise AirflowException("LocalTaskJob received SIGTERM signal")
# pylint: enable=unused-argument
signal.signal(signal.SIGTERM, signal_handler)
if not self.task_instance.check_and_change_state_before_execution(
mark_success=self.mark_success,
ignore_all_deps=self.ignore_all_deps,
ignore_depends_on_past=self.ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
ignore_ti_state=self.ignore_ti_state,
job_id=self.id,
pool=self.pool,
):
self.log.info("Task is not able to be run")
return
try:
self.task_runner.start()
heartbeat_time_limit = conf.getint('scheduler', 'scheduler_zombie_task_threshold')
# task callback invocation happens either here or in
# self.heartbeat() instead of taskinstance._run_raw_task to
# avoid race conditions
#
# When self.terminating is set to True by heartbeat_callback, this
# loop should not be restarted. Otherwise self.handle_task_exit
# will be invoked and we will end up with duplicated callbacks
while not self.terminating:
# Monitor the task to see if it's done. Wait in a syscall
# (`os.wait`) for as long as possible so we notice the
# subprocess finishing as quick as we can
max_wait_time = max(
0, # Make sure this value is never negative,
min(
(
heartbeat_time_limit
- (timezone.utcnow() - self.latest_heartbeat).total_seconds() * 0.75
),
self.heartrate,
),
)
return_code = self.task_runner.return_code(timeout=max_wait_time)
if return_code is not None:
self.handle_task_exit(return_code)
return
self.heartbeat()
# If it's been too long since we've heartbeat, then it's possible that
# the scheduler rescheduled this task, so kill launched processes.
# This can only really happen if the worker can't read the DB for a long time
time_since_last_heartbeat = (timezone.utcnow() - self.latest_heartbeat).total_seconds()
if time_since_last_heartbeat > heartbeat_time_limit:
Stats.incr('local_task_job_prolonged_heartbeat_failure', 1, 1)
self.log.error("Heartbeat time limit exceeded!")
raise AirflowException(
"Time since last heartbeat({:.2f}s) "
"exceeded limit ({}s).".format(time_since_last_heartbeat, heartbeat_time_limit)
)
finally:
self.on_kill()
def handle_task_exit(self, return_code: int) -> None:
"""Handle case where self.task_runner exits by itself"""
self.log.info("Task exited with return code %s", return_code)
self.task_instance.refresh_from_db()
# task exited by itself, so we need to check for error file
# in case it failed due to runtime exception/error
error = None
if self.task_instance.state == State.RUNNING:
# This is for a case where the task received a sigkill
# while running
self.task_instance.set_state(State.FAILED)
if self.task_instance.state != State.SUCCESS:
error = self.task_runner.deserialize_run_error()
self.task_instance._run_finished_callback(error=error) # pylint: disable=protected-access
def on_kill(self):
self.task_runner.terminate()
self.task_runner.on_finish()
@provide_session
def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# ensure termination if processes are created later
self.task_runner.terminate()
return
self.task_instance.refresh_from_db()
ti = self.task_instance
if ti.state == State.RUNNING:
fqdn = get_hostname()
same_hostname = fqdn == ti.hostname
if not same_hostname:
self.log.warning(
"The recorded hostname %s " "does not match this instance's hostname " "%s",
ti.hostname,
fqdn,
)
raise AirflowException("Hostname of job runner does not match")
current_pid = self.task_runner.process.pid
same_process = ti.pid == current_pid
if ti.pid is not None and not same_process:
self.log.warning("Recorded pid %s does not match " "the current pid %s", ti.pid, current_pid)
raise AirflowException("PID of job runner does not match")
elif self.task_runner.return_code() is None and hasattr(self.task_runner, 'process'):
self.log.warning(
"State of this instance has been externally set to %s. " "Terminating instance.", ti.state
)
self.task_runner.terminate()
if ti.state == State.SUCCESS:
error = None
else:
# if ti.state is not set by taskinstance.handle_failure, then
# error file will not be populated and it must be updated by
# external source suck as web UI
error = self.task_runner.deserialize_run_error() or "task marked as failed externally"
ti._run_finished_callback(error=error) # pylint: disable=protected-access
self.terminating = True
|
apache-2.0
| 6,241,240,399,067,721,000 | 41.711538 | 109 | 0.605583 | false | 4.376355 | false | false | false |
ykoga-kyutech/nippou_proj
|
nippou_app/migrations/0001_initial.py
|
1
|
4531
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
import django.utils.timezone
import django.contrib.auth.models
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('password', models.CharField(verbose_name='password', max_length=128)),
('last_login', models.DateTimeField(blank=True, verbose_name='last login', null=True)),
('is_superuser', models.BooleanField(help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status', default=False)),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], error_messages={'unique': 'A user with that username already exists.'}, unique=True, max_length=30)),
('first_name', models.CharField(blank=True, verbose_name='first name', max_length=30)),
('last_name', models.CharField(blank=True, verbose_name='last name', max_length=30)),
('email', models.EmailField(blank=True, verbose_name='email address', max_length=254)),
('is_staff', models.BooleanField(help_text='Designates whether the user can log into this admin site.', verbose_name='staff status', default=False)),
('is_active', models.BooleanField(help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active', default=True)),
('date_joined', models.DateTimeField(verbose_name='date joined', default=django.utils.timezone.now)),
('user_dev', models.CharField(verbose_name='所属', max_length=512)),
('groups', models.ManyToManyField(help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', blank=True, verbose_name='groups', related_query_name='user', to='auth.Group')),
('user_permissions', models.ManyToManyField(help_text='Specific permissions for this user.', related_name='user_set', blank=True, verbose_name='user permissions', related_query_name='user', to='auth.Permission')),
],
options={
'abstract': False,
'verbose_name_plural': 'users',
'verbose_name': 'user',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='nippou_data',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('title', models.CharField(verbose_name='タイトル', max_length=512)),
('text', models.TextField(verbose_name='本文')),
('date', models.DateTimeField(verbose_name='投稿日時', default=datetime.datetime.now)),
('open', models.BooleanField(verbose_name='公開', default=False)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('task_name', models.CharField(verbose_name='タスク名', max_length=512)),
('time_yotei', models.IntegerField(verbose_name='予定時間')),
('time_jitsu', models.IntegerField(verbose_name='実時間')),
('task_y', models.TextField(verbose_name='Y:やったこと')),
('task_w', models.TextField(verbose_name='W:わかったこと')),
('task_t', models.TextField(verbose_name='T:次やること')),
('nippou', models.ForeignKey(to='nippou_app.nippou_data')),
],
),
]
|
mit
| -7,798,382,819,515,398,000 | 62.557143 | 432 | 0.612272 | false | 4.063014 | false | false | false |
sapphon/minecraftpython
|
src/main/resources/assets/minecraftpython/scripts/base/items.py
|
1
|
4641
|
IRON_SHOVEL = 'iron_shovel'
IRON_PICKAXE = 'iron_pickaxe'
IRON_AXE = 'iron_axe'
FLINT_AND_STEEL = 'flint_and_steel'
APPLE = 'apple'
BOW = 'bow'
ARROW = 'arrow'
COAL = 'coal'
DIAMOND = 'diamond'
IRON_INGOT = 'iron_ingot'
GOLD_INGOT = 'gold_ingot'
IRON_SWORD = 'iron_sword'
WOODEN_SWORD = 'wooden_sword'
WOODEN_SHOVEL = 'wooden_shovel'
WOODEN_PICKAXE = 'wooden_pickaxe'
WOODEN_AXE = 'wooden_axe'
STONE_SWORD = 'stone_sword'
STONE_SHOVEL = 'stone_shovel'
STONE_PICKAXE = 'stone_pickaxe'
STONE_AXE = 'stone_axe'
DIAMOND_SWORD = 'diamond_sword'
DIAMOND_SHOVEL = 'diamond_shovel'
DIAMOND_PICKAXE = 'diamond_pickaxe'
DIAMOND_AXE = 'diamond_axe'
STICK = 'stick'
BOWL = 'bowl'
MUSHROOM_STEW = 'mushroom_stew'
GOLDEN_SWORD = 'golden_sword'
GOLDEN_SHOVEL = 'golden_shovel'
GOLDEN_PICKAXE = 'golden_pickaxe'
GOLDEN_AXE = 'golden_axe'
STRING = 'string'
FEATHER = 'feather'
GUNPOWDER = 'gunpowder'
WOODEN_HOE = 'wooden_hoe'
STONE_HOE = 'stone_hoe'
IRON_HOE = 'iron_hoe'
DIAMOND_HOE = 'diamond_hoe'
GOLDEN_HOE = 'golden_hoe'
WHEAT_SEEDS = 'wheat_seeds'
WHEAT = 'wheat'
BREAD = 'bread'
LEATHER_HELMET = 'leather_helmet'
LEATHER_CHESTPLATE = 'leather_chestplate'
LEATHER_LEGGINGS = 'leather_leggings'
LEATHER_BOOTS = 'leather_boots'
CHAINMAIL_HELMET = 'chainmail_helmet'
CHAINMAIL_CHESTPLATE = 'chainmail_chestplate'
CHAINMAIL_LEGGINGS = 'chainmail_leggings'
CHAINMAIL_BOOTS = 'chainmail_boots'
IRON_HELMET = 'iron_helmet'
IRON_CHESTPLATE = 'iron_chestplate'
IRON_LEGGINGS = 'iron_leggings'
IRON_BOOTS = 'iron_boots'
DIAMOND_HELMET = 'diamond_helmet'
DIAMOND_CHESTPLATE = 'diamond_chestplate'
DIAMOND_LEGGINGS = 'diamond_leggings'
DIAMOND_BOOTS = 'diamond_boots'
GOLDEN_HELMET = 'golden_helmet'
GOLDEN_CHESTPLATE = 'golden_chestplate'
GOLDEN_LEGGINGS = 'golden_leggings'
GOLDEN_BOOTS = 'golden_boots'
FLINT = 'flint'
PORKCHOP = 'porkchop'
COOKED_PORKCHOP = 'cooked_porkchop'
PAINTING = 'painting'
GOLDEN_APPLE = 'golden_apple'
SIGN = 'sign'
WOODEN_DOOR = 'wooden_door'
BUCKET = 'bucket'
WATER_BUCKET = 'water_bucket'
LAVA_BUCKET = 'lava_bucket'
MINECART = 'minecart'
SADDLE = 'saddle'
IRON_DOOR = 'iron_door'
REDSTONE = 'redstone'
SNOWBALL = 'snowball'
BOAT = 'boat'
LEATHER = 'leather'
MILK_BUCKET = 'milk_bucket'
BRICK = 'brick'
CLAY_BALL = 'clay_ball'
REEDS = 'reeds'
PAPER = 'paper'
BOOK = 'book'
SLIME_BALL = 'slime_ball'
CHEST_MINECART = 'chest_minecart'
FURNACE_MINECART = 'furnace_minecart'
EGG = 'egg'
COMPASS = 'compass'
FISHING_ROD = 'fishing_rod'
CLOCK = 'clock'
GLOWSTONE_DUST = 'glowstone_dust'
FISH = 'fish'
COOKED_FISHED = 'cooked_fished'
DYE = 'dye'
BONE = 'bone'
SUGAR = 'sugar'
CAKE = 'cake'
BED = 'bed'
REPEATER = 'repeater'
COOKIE = 'cookie'
FILLED_MAP = 'filled_map'
SHEARS = 'shears'
MELON = 'melon'
PUMPKIN_SEEDS = 'pumpkin_seeds'
MELON_SEEDS = 'melon_seeds'
BEEF = 'beef'
COOKED_BEEF = 'cooked_beef'
CHICKEN = 'chicken'
COOKED_CHICKEN = 'cooked_chicken'
ROTTEN_FLESH = 'rotten_flesh'
ENDER_PEARL = 'ender_pearl'
BLAZE_ROD = 'blaze_rod'
GHAST_TEAR = 'ghast_tear'
GOLD_NUGGET = 'gold_nugget'
NETHER_WART = 'nether_wart'
POTION = 'potion'
GLASS_BOTTLE = 'glass_bottle'
SPIDER_EYE = 'spider_eye'
FERMENTED_SPIDER_EYE = 'fermented_spider_eye'
BLAZE_POWDER = 'blaze_powder'
MAGMA_CREAM = 'magma_cream'
BREWING_STAND = 'brewing_stand'
CAULDRON = 'cauldron'
ENDER_EYE = 'ender_eye'
SPECKLED_MELON = 'speckled_melon'
SPAWN_EGG = 'spawn_egg'
EXPERIENCE_BOTTLE = 'experience_bottle'
FIRE_CHARGE = 'fire_charge'
WRITABLE_BOOK = 'writable_book'
WRITTEN_BOOK = 'written_book'
EMERALD = 'emerald'
ITEM_FRAME = 'item_frame'
FLOWER_POT = 'flower_pot'
CARROT = 'carrot'
POTATO = 'potato'
BAKED_POTATO = 'baked_potato'
POISONOUS_POTATO = 'poisonous_potato'
MAP = 'map'
GOLDEN_CARROT = 'golden_carrot'
SKULL = 'skull'
CARROT_ON_A_STICK = 'carrot_on_a_stick'
NETHER_STAR = 'nether_star'
PUMPKIN_PIE = 'pumpkin_pie'
FIREWORKS = 'fireworks'
FIREWORK_CHARGE = 'firework_charge'
ENCHANTED_BOOK = 'enchanted_book'
TRIPWIRE_HOOK = 'tripwire_hook'
COMPARATOR = 'comparator'
NETHERBRICK = 'netherbrick'
QUARTZ = 'quartz'
TNT_MINECART = 'tnt_minecart'
HOPPER_MINECART = 'hopper_minecart'
IRON_HORSE_ARMOR = 'iron_horse_armor'
GOLDEN_HORSE_ARMOR = 'golden_horse_armor'
DIAMOND_HORSE_ARMOR = 'diamond_horse_armor'
LEAD = 'lead'
NAME_TAG = 'name_tag'
COMMAND_BLOCK_MINECART = 'command_block_minecart'
RECORD_13 = 'record_13'
RECORD_CAT = 'record_cat'
RECORD_BLOCKS = 'record_blocks'
RECORD_CHIRP = 'record_chirp'
RECORD_FAR = 'record_far'
RECORD_MALL = 'record_mall'
RECORD_MELLOHI = 'record_mellohi'
RECORD_STAL = 'record_stal'
RECORD_STRAD = 'record_strad'
RECORD_WARD = 'record_ward'
RECORD_11 = 'record_11'
RECORD_WAIT = 'record_wait'
CL_00000044 = 'CL_00000044'
|
gpl-3.0
| 2,584,095,510,397,654,000 | 25.83237 | 49 | 0.721827 | false | 2.132813 | false | true | false |
rocky/python-spark
|
example/python2/py2_parser.py
|
1
|
20426
|
# Copyright (c) 2016-2017 Rocky Bernstein
"""
More complex expression parsing
"""
# from __future__ import print_function
import sys
from spark_parser.ast import AST
from py2_scan import Python2Scanner, ENDMARKER
from spark_parser import GenericASTBuilder
DEFAULT_DEBUG = {'rules': False, 'transition': False, 'reduce' : False,
'errorstack': 'full', 'context': True, 'dups': True}
class PythonParser(GenericASTBuilder):
"""A more complete spark example: a Python 2 Parser.
Note: function parse() comes from GenericASTBuilder
"""
def __init__(self, start='file_input', debug=DEFAULT_DEBUG):
super(PythonParser, self).__init__(AST, start, debug=debug)
self.start = start
self.debug = debug
# Put left-recursive list non-terminals:
# x ::= x y
# x ::=
self.collect = frozenset(('stmts', 'comments', 'dot_names', 'dots',
'comp_op_exprs', 'newline_or_stmts',
'comma_names', 'comma_fpdef_opt_eqtests',)
)
def debug_reduce(self, rule, tokens, parent, i):
"""Customized format and print for our kind of tokens
which gets called in debugging grammar reduce rules
"""
prefix = ' '
if parent and tokens:
p_token = tokens[parent]
if hasattr(p_token, 'line'):
prefix = 'L.%3d.%03d: ' % (p_token.line, p_token.column)
pass
pass
print("%s%s ::= %s" % (prefix, rule[0], ' '.join(rule[1])))
def nonterminal(self, nt, args):
# nonterminal with a (reserved) single word derivation
no_skip = ('pass_stmt', 'continue_stmt', 'break_stmt', 'return_stmt')
has_len = hasattr(args, '__len__')
if nt in self.collect and len(args) > 1:
#
# Collect iterated thingies together.
#
rv = args[0]
for arg in args[1:]:
rv.append(arg)
elif (has_len and len(args) == 1 and
hasattr(args[0], '__len__') and args[0] not in no_skip and
len(args[0]) == 1):
# Remove singleton derivations
rv = GenericASTBuilder.nonterminal(self, nt, args[0])
del args[0] # save memory
elif (has_len and len(args) == 2 and
hasattr(args[1], '__len__') and len(args[1]) == 0):
# Remove trailing epsilon rules, but only when there
# are two items.
if hasattr(args[0], '__len__') and len(args[0]) == 1:
# Remove singleton derivation
rv = args[0]
else:
rv = GenericASTBuilder.nonterminal(self, nt, args[:1])
del args[1] # save memory
else:
rv = GenericASTBuilder.nonterminal(self, nt, args)
return rv
##########################################################
# Python 2 grammar rules. Grammar rule functions
# start with the name p_ and are collected automatically
##########################################################
def p_python_grammar(self, args):
'''
### Note: comment rules that start ## are rules from python26.gr
## We use them to assist checking translation to a SPARK-format grammar.
single_input ::= NEWLINE
single_input ::= simple_stmt
single_input ::= compound_stmt NEWLINE
file_input ::= newline_or_stmts ENDMARKER
newline_or_stmts ::= newline_or_stmt*
# Grammar uses NEWLINE instead of 'sep', but ; does separate statements.
# The grammar is vague on how NEWLINE, INDENT, and DEDENT are computed.
newline_or_stmt ::= sep
newline_or_stmt ::= stmt_plus
newline_or_stmt ::= comment sep
stmts ::= stmt*
stmts ::= stmt sep
stmt_plus ::= stmt+
eval_input ::= testlist newlines ENDMARKER
newlines ::= NEWLINE+
decorator ::= AT dotted_name arglist_opt NEWLINE
arglist_opt ::= arglist?
## arglist ::= (argument ',')*
## (argument [','] | '*' test (',' argument)* [',' '**' test] | '**' test)
arglist ::= argument_commas arglist2
argument_commas ::= argument_commas argument_comma
argument_commas ::=
argument_comma ::= argument COMMA
## (argument [','] | '*' test (',' argument)* [',' '**' test] | '**' test)
arglist2 ::= argument comma_opt
arglist2 ::= START test comma_arguments comma_starstar_test_opt
arglist2 ::= STARSTAR test
comma_arguments ::= comma_argument*
comma_argument ::= COMMA argument
comma_starstar_test_opt ::= COMMA STARSTAR test
comma_starstar_test_opt ::=
## Really [keyword '='] test
## argument ::= test [gen_for] | test '=' test
argument ::= test gen_for_opt
argument ::= test EQUAL test
## list_iter ::= list_for | list_if
list_iter ::= list_for
list_iter ::= list_if
## list_for ::= 'for' exprlist 'in' testlist_safe [list_iter]
list_for ::= FOR exprlist IN testlist_safe list_iter_opt
list_iter_opt ::= list_iter?
## list_if ::= 'if' old_test [list_iter]
list_if ::= IF old_test list_iter_opt
gen_for_opt ::= gen_for?
## gen_iter ::= gen_for | gen_if
gen_iter ::= gen_for
gen_iter ::= gen_if
## gen_for ::= 'for' exprlist 'in' or_test [gen_iter]
gen_for ::= FOR exprlist IN or_test gen_iter_opt
gen_iter_opt ::= gen_iter?
## gen_if ::= 'if' old_test [gen_iter]
gen_if ::= IF old_test gen_iter_opt
## testlist1 ::= test (',' test)*
testlist1 ::= test comma_tests
decorators ::= decorator+
decorated ::= decorators classdef_or_funcdef
classdef_or_funcdef ::= classdef
classdef_or_funcdef ::= funcdef
funcdef ::= DEF NAME parameters COLON suite
parameters ::= LPAREN varargslist_opt RPAREN
varargslist_opt ::= varargslist?
# FILL IN
## varargslist ::= fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME)
## varargslist ::= fpdef ['=' test] (',' fpdef ['=' test])* [',']
varargslist ::= fpdef eq_test_opt comma_fpdef_opt_eqtests comma_opt
## (',' fpdef ['=' test])*
comma_fpdef_opt_eqtests ::= comma_fpdef_opt_eqtests COMMA fpdef eq_test_opt
comma_fpdef_opt_eqtests ::=
star_names ::= star_names STAR NAME star_star_opt
star_names ::= star_names star_star_opt
star_names ::=
eq_tests ::= eq_tests eq_test
eq_tests ::=
eq_test_opt ::= eq_test?
eq_test ::= EQUAL test
star_star_opt ::= COMMA STAR_STAR NAME
star_star_opt ::=
## fpdef ::= NAME | '(' fplist ')'
fpdef ::= NAME
fpdef ::= LPAREN fplist RPAREN
## fplist ::= fpdef (',' fpdef)* [',']
fplist ::= fpdef fplist1 comma_opt
## (',' fpdef)* [',']
fplist1 ::= fplist COMMA fpdef
fplist1 ::=
comma_opt ::= COMMA?
stmt ::= simple_stmt
stmt ::= compound_stmt
simple_stmt ::= small_stmt
small_stmt ::= expr_stmt
small_stmt ::= print_stmt
small_stmt ::= del_stmt
small_stmt ::= pass_stmt
small_stmt ::= flow_stmt
small_stmt ::= import_stmt
small_stmt ::= global_stmt
small_stmt ::= exec_stmt
small_stmt ::= assert_stmt
## expr_stmt ::= testlist (augassign (yield_expr|testlist)
## | ('=' (yield_expr|testlist))*)
expr_stmt ::= testlist AUGASSIGN yield_expr_or_testlist
expr_stmt ::= testlist EQUAL yield_expr_or_testlists
yield_expr_or_testlists ::= yield_expr_or_testlists yield_expr_or_testlist
yield_expr_or_testlists ::= yield_expr_or_testlist
yield_expr_or_testlist ::= yield_expr
yield_expr_or_testlist ::= testlist
## yield_expr ::= 'yield' [testlist]
yield_expr ::= YIELD testlist_opt
print_stmt ::= PRINT test_params_or_redirect
test_params_or_redirect ::= test comma_test_opt comma_opt
# FIXME: go over Not quite right as there is one or more..
test_params_or_redirect ::= REDIRECT test comma_test_opt comma_opt
comma_test_opt ::= COMMA test
comma_test_opt ::=
del_stmt ::= DEL exprlist
pass_stmt ::= PASS
flow_stmt ::= break_stmt
flow_stmt ::= continue_stmt
flow_stmt ::= return_stmt
flow_stmt ::= raise_stmt
flow_stmt ::= yield_stmt
break_stmt ::= BREAK
continue_stmt ::= CONTINUE
# return_stmt ::= 'return' [testlist]
return_stmt ::= RETURN testlist_opt
testlist_opt ::= testlist?
yield_stmt ::= yield_expr
raise_stmt ::= RAISE test_opt3
test_opt3 ::= test COMMA test COMMA test
test_opt3 ::= test COMMA test
test_opt3 ::= test
global_stmt ::= GLOBAL NAME comma_names
comma_names ::= comma_name*
comma_name ::= COMMA NAME
exec_stmt ::= EXEC expr
exec_stmt ::= EXEC expr IN test
exec_stmt ::= EXEC expr IN test COMMA test
assert_stmt ::= ASSERT test
assert_stmt ::= ASSERT test COMMA test
test_opt ::= test?
## exprlist ::= expr (',' expr)* [',']
exprlist ::= expr comma_exprs comma_opt
## (',' expr)*
comma_exprs ::= comma_exprs COMMA expr
comma_exprs ::=
# testlist ::= test (',' test)* [',']
testlist ::= test comma_tests comma_opt
# (',' test)*
comma_tests ::= comma_tests COMMA test
comma_tests ::=
## Backward compatibility cruft to support:
## [ x for x in lambda : True, lambda : False if x() ]
## even while also allowing:
## lambda x : 5 if x else 2
## (But not a mix of the two)
## testlist_safe ::= old_test [(',' old_test)+ [',']]
testlist_safe ::= old_test testlist_safe1_opt
testlist_safe1_opt ::= comma_old_tests comma_opt
testlist_safe1_opt ::=
## (',' old_test)+
comma_old_tests ::= comma_old_tests comma_old_test
comma_old_tests ::= comma_old_test
comma_old_test ::= COMMA old_test
## old_test ::= or_test | old_lambdef
old_test ::= or_test
old_test ::= old_lambdef
## old_lambdef ::= 'lambda' [varargslist] ':' old_test
old_lambdef ::= LAMBDA varargslist_opt COLON old_test
test ::= or_test IF or_test ELSE test
test ::= or_test
test ::= lambdef
or_test ::= and_test or_and_tests
## ('or' and_test)*
or_and_tests ::= or_and_test*
or_and_test ::= OR and_test
## and_test ::= not_test ('and' not_test)*
and_test ::= not_test and_not_tests
## ('and' not_test)*
and_not_tests ::= and_not_tests AND not_test
and_not_tests ::=
## not_test ::= 'not' not_test | comparison
not_test ::= NOT not_test
not_test ::= comparison
## comparison ::= expr (comp_op expr)*
comparison ::= expr comp_op_exprs
## (comp_op expr)*
comp_op_exprs ::= comp_op_exprs comp_op expr
comp_op_exprs ::=
comp_op ::= COMP_OP
comp_op ::= IN
comp_op ::= IS
comp_op ::= IS NOT
# Condensation of this
## expr ::= xor_expr ('|' xor_expr)*
## xor_expr ::= and_expr ('^' and_expr)*
## and_expr ::= shift_expr ('&' shift_expr)*
## shift_expr ::= arith_expr (('<<'|'>>') arith_expr)*
## arith_expr ::= term (('+'|'-') term)*
## term ::= factor (('*'|'/'|'%'|'//') factor)*
## We don't care about operator precidence
expr ::= factor binop_arith_exprs
binop_arith_exprs ::= binop_arith_exprs binop factor
binop_arith_exprs ::=
binop ::= BINOP
binop ::= PLUS
binop ::= MINUS
binop ::= STAR
## factor ::= ('+'|'-'|'~') factor | power
factor ::= op_factor factor
factor ::= power
op_factor ::= PLUS
op_factor ::= MINUS
op_factor ::= TILDE
power ::= atom trailers starstar_factor_opt
## atom ::= ('(' [yield_expr|testlist_gexp] ')' | '[' [listmaker] ']'
## | '{' [dictmaker] '}' | '`' testlist1 '`'
## | NAME | NUMBER | STRING+)
atom ::= LPAREN yield_expr_or_testlist_gexp_opt RPAREN
atom ::= LBRACKET listmaker_opt RBRACKET
atom ::= LBRACE dictmaker_opt RBRACE
atom ::= BACKTICK testlist1 BACKTICK
atom ::= NUMBER
atom ::= NAME
atom ::= strings
dictmaker_opt ::= dictmaker?
## [yield_expr|testlist_gexp]
yield_expr_or_testlist_gexp_opt ::= yield_expr
yield_expr_or_testlist_gexp_opt ::= testlist_gexp
yield_expr_or_testlist_gexp_opt ::=
listmaker_opt ::= listmaker?
## listmaker ::= test ( list_for | (',' test)* [','] )
listmaker ::= test list_for_or_comma_tests_comma_opt
list_for_or_comma_tests_comma_opt ::= list_for
list_for_or_comma_tests_comma_opt ::= comma_tests comma_opt
## testlist_gexp ::= test ( gen_for | (',' test)* [','] )
testlist_gexp ::= test gen_for_or_comma_tests_comma_opt
gen_for_or_comma_tests_comma_opt ::= gen_for
gen_for_or_comma_tests_comma_opt ::= comma_tests comma_opt
lambdef ::= LAMBDA varargslist_opt COLON test
trailers ::= trailer*
## trailer ::= '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
trailer ::= LPAREN arglist_opt RPAREN
trailer ::= LBRACKET subscriptlist RBRACKET
trailer ::= DOT NAME
## subscriptlist ::= subscript (',' subscript)* [',']
subscriptlist ::= subscript comma_subscripts comma_opt
## (',' subscript)*
comma_subscripts ::= comma_subscripts comma_subscript
comma_subscripts ::=
## ',' subscript
comma_subscript ::= COMMA subscript
## subscript ::= '.' '.' '.' | test | [test] ':' [test] [sliceop]
subscript ::= DOT DOT DOT
subscript ::= test
subscript ::= test_opt COLON test_opt sliceop_opt
sliceop_opt ::= sliceop?
## sliceop ::= ':' [test]
sliceop ::= COLON test_opt
starstar_factor_opt ::= STARSTAR factor
starstar_factor_opt ::=
## dictmaker ::= test ':' test (',' test ':' test)* [',']
dictmaker ::= test COLON comma_test_colon_tests comma_opt
## (',' test ':' test)*
comma_test_colon_tests ::= comma_test_colon_tests comma_test_colon_test
comma_test_colon_tests ::=
## (',' test ':' test)
comma_test_colon_test ::= COMMA test COLON test
classdef ::= CLASS NAME class_subclass_opt COLON suite
class_subclass_opt ::= LPAREN testlist_opt RPAREN
class_subclass_opt ::=
strings ::= STRING+
sep ::= comments
sep ::= NEWLINE
sep ::= SEMICOLON
comments ::= comment+
comment ::= COMMENT
comment ::= COMMENT NEWLINE
'''
# Import-related grammar
def p_import(self, args):
"""
## import_stmt ::= import_name | import_from
import_stmt ::= import_name
import_stmt ::= import_from
## import_name ::= IMPORT dotted_as_names
import_name ::= IMPORT dotted_as_names
## import_from ::= ('from' ('.'* dotted_name | '.'+)
## 'import' ('*' | '(' import_as_names ')' | import_as_names))
import_from ::= FROM dots_dotted_name_or_dots import_list
import_as_name ::= NAME
import_as_name ::= NAME AS NAME
dotted_as_name ::= dotted_name
dotted_as_name ::= dotted_name AS NAME
dots_dotted_name_or_dots ::= dots dotted_name
dots_dotted_name_or_dots ::= DOT dots
dots ::= DOT*
## 'import' ('*' | '(' import_as_names ')' | import_as_names))
import_list ::= IMPORT STAR
import_list ::= IMPORT LPAREN import_as_names RPAREN
import_list ::= IMPORT import_as_names
## import_as_names ::= import_as_name ((',' import_as_name)+\) [',']
# Note: we don't do the opt comma at the end
import_as_names ::= import_as_name comma_import_as_names
## (',' import_as_name)+
comma_import_as_names ::= comma_import_as_names comma_import_as_name
comma_import_as_names ::=
## ',' import_as_name
comma_import_as_name ::= COMMA import_as_name
comma_dotted_as_names ::= dotted_as_name+
dotted_as_names ::= dotted_as_name comma_dotted_as_names
comma_dotted_as_names ::= comma_dotted_as_names COMMA dotted_as_name
comma_dotted_as_names ::=
dotted_name ::= NAME dot_names
dot_names ::= dot_names DOT NAME
dot_names ::=
"""
def p_compund_stmt(self, args):
"""
compound_stmt ::= if_stmt
compound_stmt ::= while_stmt
compound_stmt ::= for_stmt
compound_stmt ::= try_stmt
compound_stmt ::= with_stmt
compound_stmt ::= funcdef
compound_stmt ::= classdef
compound_stmt ::= decorated
if_stmt ::= IF test COLON suite elif_suites else_suite_opt
if_stmt ::= IF test COLON NEWLINE suite elif_suites else_suite_opt
elif_suites ::= elif_suites ELIF test COLON suite
elif_suites ::=
else_suite_opt ::= ELSE COLON suite
else_suite_opt ::=
## while_stmt ::= 'while' test ':' suite ['else' ':' suite]
while_stmt ::= WHILE test COLON suite else_suite_opt
## for_stmt ::= 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
for_stmt ::= FOR exprlist IN testlist COLON suite else_colon_suite_opt
## ['else' ':' suite]
else_colon_suite_opt ::= ELSE COLON suite
else_colon_suite_opt ::=
## try_stmt ::= ('try' ':' suite
## ((except_clause ':' suite)+
## ['else' ':' suite]
## ['finally' ':' suite] |
## 'finally' ':' suite))
## with_stmt ::= with' test [ with_var ] ':' suite
with_stmt ::= WITH test with_var_opt COLON suite
with_var_opt ::= with_var?
## with_var ::= 'as' expr
with_var ::= AS expr
suite ::= stmt_plus
suite ::= NEWLINE indent stmt_plus NEWLINE DEDENT
suite ::= NEWLINE indent stmt_plus DEDENT
indent ::= INDENT comments
indent ::= INDENT
"""
def parse_python2(python_stmts, start='file_input',
show_tokens=False, parser_debug=DEFAULT_DEBUG, check=False):
assert isinstance(python_stmts, str)
tokens = Python2Scanner().tokenize(python_stmts)
if show_tokens:
for t in tokens:
print(t)
# For heavy grammar debugging:
# parser_debug = {'rules': True, 'transition': True, 'reduce': True,
# 'errorstack': 'full', 'context': True, 'dups': True}
# Normal debugging:
# parser_debug = {'rules': False, 'transition': False, 'reduce': True,
# 'errorstack': 'full', 'context': True, 'dups': True}
parser = PythonParser(start=start, debug=parser_debug)
if check:
parser.check_grammar()
return parser.parse(tokens)
if __name__ == '__main__':
if len(sys.argv) == 1:
for python2_stmts in (
# # "if True: pass",
# """
# while True:
# if False:
# continue
# """,
# "if True: pass",
"""return f()""",
):
print(python2_stmts)
print('-' * 30)
ast = parse_python2(python2_stmts + ENDMARKER,
start='file_input', show_tokens=False, check=True)
print(ast)
print('=' * 30)
else:
python2_stmts = " ".join(sys.argv[1:])
parse_python2(python2_stmts, show_tokens=False, check=True)
|
mit
| -2,205,583,393,401,705,700 | 31.015674 | 88 | 0.527808 | false | 3.892149 | true | false | false |
maru/fiubar
|
fiubar/config/settings/local.py
|
1
|
2473
|
# -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
from django.contrib.messages import constants as message_constants
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = get_secret('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/2.0/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = get_secret('DJANGO_SECRET_KEY',
default='Z&r}+t&ZTLV`*M3`i|50FWCPWfdyuPigh8')
# DATABASE CONFIGURATION
DATABASES['default'] = get_secret('DATABASE_DEFAULT', DATABASES['default'])
MESSAGE_LEVEL = message_constants.DEBUG
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = get_secret('DJANGO_EMAIL_BACKEND',
'django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware']
INSTALLED_APPS += ['debug_toolbar']
INTERNAL_IPS = ['127.0.0.1', ]
# tricks to have debug toolbar when developing with docker
if get_secret('USE_DOCKER', default='no') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
ACCOUNT_DEFAULT_HTTP_PROTOCOL = get_secret('ACCOUNT_DEFAULT_HTTP_PROTOCOL',
default='http')
# ACCOUNT_ADAPTER = 'fiubar.models.SignupClosedAdapter'
ALLOWED_HOSTS = get_secret('DJANGO_ALLOWED_HOSTS',
default=['127.0.0.1', 'localhost'])
|
mit
| -7,611,459,301,191,556,000 | 29.158537 | 80 | 0.534169 | false | 4.101161 | false | false | false |
NicovincX2/Python-3.5
|
Génie logiciel/Architecture logicielle/Patron de conception/Patron de structure/flyweight.py
|
1
|
2973
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""http://codesnipers.com/?q=python-flyweights"""
import weakref
class FlyweightMeta(type):
def __new__(mcs, name, parents, dct):
"""
:param name: class name
:param parents: class parents
:param dct: dict: includes class attributes, class methods,
static methods, etc
:return: new class
"""
# set up instances pool
dct['pool'] = weakref.WeakValueDictionary()
return super(FlyweightMeta, mcs).__new__(mcs, name, parents, dct)
@staticmethod
def _serialize_params(cls, *args, **kwargs):
"""Serialize input parameters to a key.
Simple implementation is just to serialize it as a string
"""
args_list = map(str, args)
args_list.extend([str(kwargs), cls.__name__])
key = ''.join(args_list)
return key
def __call__(cls, *args, **kwargs):
key = FlyweightMeta._serialize_params(cls, *args, **kwargs)
pool = getattr(cls, 'pool', {})
instance = pool.get(key)
if not instance:
instance = super(FlyweightMeta, cls).__call__(*args, **kwargs)
pool[key] = instance
return instance
class Card(object):
"""The object pool. Has builtin reference counting"""
_CardPool = weakref.WeakValueDictionary()
"""Flyweight implementation. If the object exists in the
pool just return it (instead of creating a new one)"""
def __new__(cls, value, suit):
obj = Card._CardPool.get(value + suit)
if not obj:
obj = object.__new__(cls)
Card._CardPool[value + suit] = obj
obj.value, obj.suit = value, suit
return obj
# def __init__(self, value, suit):
# self.value, self.suit = value, suit
def __repr__(self):
return "<Card: %s%s>" % (self.value, self.suit)
class Card2(object):
__metaclass__ = FlyweightMeta
def __init__(self, *args, **kwargs):
# print('Init {}: {}'.format(self.__class__, (args, kwargs)))
pass
if __name__ == '__main__':
# comment __new__ and uncomment __init__ to see the difference
c1 = Card('9', 'h')
c2 = Card('9', 'h')
print(c1, c2)
print(c1 == c2, c1 is c2)
print(id(c1), id(c2))
c1.temp = None
c3 = Card('9', 'h')
print(hasattr(c3, 'temp'))
c1 = c2 = c3 = None
c3 = Card('9', 'h')
print(hasattr(c3, 'temp'))
# Tests with metaclass
instances_pool = getattr(Card2, 'pool')
cm1 = Card2('10', 'h', a=1)
cm2 = Card2('10', 'h', a=1)
cm3 = Card2('10', 'h', a=2)
assert (cm1 == cm2) != cm3
assert (cm1 is cm2) is not cm3
assert len(instances_pool) == 2
del cm1
assert len(instances_pool) == 2
del cm2
assert len(instances_pool) == 1
del cm3
assert len(instances_pool) == 0
### OUTPUT ###
# (<Card: 9h>, <Card: 9h>)
# (True, True)
# (31903856, 31903856)
# True
# False
|
gpl-3.0
| -3,190,265,489,521,152,500 | 24.62931 | 74 | 0.557686 | false | 3.329227 | false | false | false |
drivefast/pycipherwallet
|
example/http_router.py
|
1
|
3024
|
import time
import bottle
import bcrypt
import sqlalchemy
from sqlalchemy.sql import text as sql_statement
import cipherwallet.api_router
ROOT = '/path/to/pycipherwallet/example'
@bottle.route('/<folder:re:css>/<filename:re:.*\.css>')
@bottle.route('/<folder:re:js>/<filename:re:.*\.js>')
@bottle.route('/<folder:re:img>/<filename:re:.*\.(png|jpg|ico)>')
def static_css(folder, filename):
return bottle.static_file(folder + "/" + filename, root=ROOT)
@bottle.route('/<filename:re:.*\.html>')
def static(filename):
return bottle.static_file(filename, root=ROOT)
@bottle.route('/js/cipherwallet.js')
def cipherwalletjs():
return bottle.static_file("js/cipherwallet.js", root=ROOT)
@bottle.post('/user/<user_id>')
def create_user(user_id):
"""
This sample web service is created to look similar to what is called with a POST method
by your signup web page when the user presses the "create user" submit button. Form
data is POSTed from the signup page.
If data signup page data was loaded from the mobile app (QR code scanning), we also
register the user to use cipherwallet (QR code scanning) for the logins
This should mostly be *your* procedure to create an user record, and should work regardless
of whether cipherwallet is active or not
"""
try:
# connect to the database (normally, the cipherwallet sdk will connect to the same database)
# we use a sqlite database here as an example
db_engine = sqlalchemy.create_engine('sqlite:///your.db', echo=True)
db = db_engine.connect()
except:
bottle.abort(503, "Service Unavailable")
# make sure we have valid data
firstname = bottle.request.POST.get('firstname', "").strip()
password1 = bottle.request.POST.get('password1', "").strip()
if (
user_id is None or len(user_id) < 5 or len(user_id) > 64 or
len(firstname) < 1 or len(firstname) > 64 or
len(password1) < 5 or len(password1) > 64
):
bottle.abort(400, "Bad Request")
# encrypt the password (you DO store the passwords in encrypted form, dont you)
password = bcrypt.hashpw(password1, bcrypt.gensalt())
# if the user already exists, delete it
# (obviously, you wouldn't do that on your real website)
db.execute(
sql_statement("DELETE FROM users WHERE email = :user_id;"),
user_id=user_id
)
# now add the user
ret = db.execute(
sql_statement(
"INSERT INTO users(firstname, email, password, created_on) " +
"VALUES (:firstname, :email, :password, :now);"
),
firstname=firstname,
email=user_id,
password=password,
now=time.time()
)
if ret.rowcount:
return {
'firstname': firstname,
'email': user_id,
}
else:
bottle.abort(503, "Service Unavailable")
if __name__ == "__main__":
bottle.debug(True)
bottle.run(host="127.0.0.1", port=8070, reloader=True)
|
mit
| -8,174,461,802,536,404,000 | 34.576471 | 100 | 0.642526 | false | 3.669903 | false | false | false |
zhlinh/leetcode
|
0038.Count and Say/solution.py
|
1
|
1175
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: [email protected]
Version: 0.0.1
Created Time: 2016-02-17
Last_modify: 2016-02-17
******************************************
'''
'''
The count-and-say sequence is the sequence of integers beginning as follows:
1, 11, 21, 1211, 111221, ...
1 is read off as "one 1" or 11.
11 is read off as "two 1s" or 21.
21 is read off as "one 2, then one 1" or 1211.
Given an integer n, generate the nth sequence.
Note: The sequence of integers will be represented as a string.
'''
class Solution(object):
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
curr = "1"
for i in range(n - 1):
count = 1
prev = curr
say = prev[0]
curr = ""
for j in range(1, len(prev)):
if prev[j] == say:
count += 1
else:
curr += str(count) + say
count = 1
say = prev[j]
curr += str(count) + say
return curr
|
apache-2.0
| -3,460,842,501,020,716,500 | 25.111111 | 76 | 0.455319 | false | 3.706625 | false | false | false |
vitorio/ocropodium
|
ocradmin/nodelib/nodes/cuneiform.py
|
1
|
1938
|
"""
Cuneiform Recogniser
"""
from __future__ import absolute_import
import os
import codecs
import shutil
import tempfile
import subprocess as sp
import numpy
from nodetree import node
from . import base
from .. import stages, types, utils
class CuneiformRecognizer(base.CommandLineRecognizerNode):
"""
Recognize an image using Cuneiform.
"""
binary = "cuneiform"
stage = stages.RECOGNIZE
intypes = [numpy.ndarray]
parameters = [
dict(name="single_column", type="bool", value=False)
]
def get_command(self, outfile, image):
"""
Cuneiform command line. Simplified for now.
"""
args = [self.binary, "-f", "hocr", "-o", outfile]
if self._params.get("single_column", False):
args.extend(["--singlecolumn"])
return args + [image]
def process(self, binary):
"""
Convert a full page.
"""
hocr = None
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.close()
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as btmp:
btmp.close()
self.write_binary(btmp.name, binary)
args = self.get_command(tmp.name, btmp.name)
self.logger.debug("Running: '%s'", " ".join(args))
proc = sp.Popen(args, stderr=sp.PIPE)
err = proc.stderr.read()
if proc.wait() != 0:
print err
return u"!!! %s CONVERSION ERROR %d: %s !!!" % (
os.path.basename(self.binary).upper(),
proc.returncode, err)
with codecs.open(tmp.name, "r", "utf8") as tread:
hocr = tread.read()
os.unlink(tmp.name)
os.unlink(btmp.name)
utils.set_progress(self.logger, self.progress_func, 100, 100)
return hocr
|
apache-2.0
| -6,946,050,462,958,837,000 | 27.925373 | 82 | 0.54644 | false | 3.923077 | false | false | false |
adamgilman/tourbillon
|
tourbillon/streams.py
|
1
|
1427
|
from datetime import datetime
from threading import Timer
class TourbillonStream(object):
def __init__(self, tb, stream_name):
self.tb = tb
self.r = self.tb.r
self.stream_name = stream_name
self.channel = None
# self.message_queue
self.halt_next = False
self.seconds_delay = 1
def add(self, tick_tuple):
if type(tick_tuple) is not tuple:
raise Exception("Tick data must be a tuple (datetime, data)")
if type(tick_tuple[0]) is not datetime:
raise Exception("Tick data must be a tuple (datetime, data)")
self.r.rpush(self.stream_name, tick_tuple)
def format_message(self, message):
return "%s: %s" % (self.stream_name, message)
def length(self):
if self.channel is not None:
return self.r.llen(self.stream_name)
else:
return None
def output_channel(self, output_channel):
self.channel = output_channel
def announce(self, message):
self.r.publish(self.channel, message)
def set_delay(self, seconds_delay):
self.seconds_delay = seconds_delay
def start(self):
if self.channel is None:
raise Exception("Channel must be set before starting")
self.queueNextEmit()
def stop(self):
self.halt_next = True
def queueNextEmit(self):
self.timer = Timer(self.seconds_delay, self.emitter)
self.timer.start()
def emitter(self):
#self.announce("test emitter")
self.announce( self.r.lpop(self.stream_name) )
if not self.halt_next:
self.queueNextEmit()
|
mit
| 9,180,900,712,580,681,000 | 22.8 | 64 | 0.704275 | false | 2.979123 | false | false | false |
cmos3511/cmos_linux
|
python/op/nop/core/report/report.py
|
1
|
5302
|
"""
Generate report module of OnePiece Platform
Usage:
a) dic :
1. instantiation
2. call method: read_dic
b) json :
1. instantiation
2. call method: read_json
API:
Input: gen_rpt_path
gen_rpt_path: generate report path
Ouput:
1) stdout: show the report on the stdout-screen
2) log: save the report to the specified file(.rpt)
1.json file and dic format:
{"R1": {"C1": "V11",
"C2": "V12"},
"R2": {"C1": "V21",
"C2": "V22"},
"R3": {"C1": "V31",
"C2": "V32"},
}
2. generated ASCII table example:
--------------------------
| | C1 | C2 |
--------------------------
| R1 | V11 | V12 |
--------------------------
| R2 | V21 | V22 |
--------------------------
| R3 | V31 | V33 |
--------------------------
"""
import os
import json
import math
import texttable
from utils import pcom
LOG = pcom.gen_logger(__name__)
class Report:
"""generate specified ASCII table"""
def __init__(self, rpt_path):
"""rpt_path: generated report file path
self._data_dic: generated table need data"""
self._rpt_path = rpt_path
self._data_dic = {}
def _set_dic(self, data_dic):
"""set the report need dic data"""
self._data_dic = data_dic
@classmethod
def _check_json(cls, json_path):
"""check if the given json file exists?"""
if os.path.exists(json_path):
return True
LOG.warning("The json file %s used to generate report does not exist", json_path)
return False
def _gen_rpt_dir(self):
"""if not exists: mkdir generated report dir
else: pass
"""
base_dir = os.path.dirname(os.path.abspath(self._rpt_path))
pcom.mkdir(LOG, base_dir)
@classmethod
def _gen_porper_width(cls, lst):
"""Generate the appropriate width based on the list of column
['lavall', 'kevinf', 'wyatt_wang', 'guanyu']
--> [6, 6, 10, 6]
title = 6(first)
average = 28 / 4 = 7(sum/num)
1) if average >= title: return average
2) else: return title
--> return 7
"""
width_lst = [len(item) for item in lst]
title_width = width_lst[0]
average_width = sum(width_lst) / len(width_lst)
average_width = math.ceil(average_width)
if average_width >= title_width:
return average_width
return title_width
def _auto_adjust_col_width(self, nested_lst):
"""optimize texttable's output
Get the generated ASCII table's column width list besed on
the nested list.
The nested list is the texttable needed data
"""
col_width_lst = []
for index, _ in enumerate(nested_lst[0]):
tmp_lst = []
for lst in nested_lst:
tmp_lst.append(lst[index])
col_width_lst.append(self._gen_porper_width(tmp_lst))
return col_width_lst
def _work_json_data(self):
"""convert json dic data to list type
json format:
{"R1": {"C1": "V11",
"C2": "V12"},
"R2": {"C1": "V21",
"C2": "V22"},
"R3": {"C1": "V31"},
"C2": "V32"}
nested list format:
[['', 'C1', 'C2'],
['R1', 'V11', 'V12'],
['R2', 'V21', 'V22'],
['R3', 'V31', 'V32']]
"""
data_lst = []
row_lst = list(self._data_dic.keys())
col_lst = []
col_lst.insert(0, '')
col_lst.extend(self._data_dic[row_lst[0]])
data_lst.append(col_lst)
for row_title in row_lst:
tmp_lst = [row_title,]
for col_title in self._data_dic[row_title].keys():
tmp_lst.append(self._data_dic[row_title][col_title])
data_lst.append(tmp_lst)
return data_lst
def _gen_table(self):
"""generate the proper ASCII table by texttable
in: the specified dic format
out: the ASCII table
"""
data_lst = self._work_json_data()
width_lst = self._auto_adjust_col_width(data_lst)
table = texttable.Texttable()
table.set_cols_width(width_lst)
table.add_rows(data_lst)
report_table = table.draw()
return report_table
def _show_rpt(self, rpt_table):
"""show the report to stdout and save it to the specified file"""
LOG.info("The report for this sub_stage: %s%s", os.linesep, rpt_table)
with open(self._rpt_path, 'w') as w_rpt:
w_rpt.write(rpt_table)
def read_dic(self, data_dic):
"""generate report from a dic"""
self._set_dic(data_dic)
self._gen_rpt_dir()
rpt_table = self._gen_table()
self._show_rpt(rpt_table)
def read_json(self, json_path):
"""generaged report from the specified format json file"""
if self._check_json(json_path):
with open(json_path) as rd_json:
data_dic = json.load(rd_json)
self.read_dic(data_dic)
|
gpl-3.0
| 7,415,528,286,573,856,000 | 30.372781 | 89 | 0.50132 | false | 3.572776 | false | false | false |
smart-cities/reading_aginova_sensors
|
scripts/parse_log.py
|
1
|
2425
|
#!/usr/bin/env python
"""Parse a log file and submit last values
Parse a specified log file looking the most recent data being inserted to the
'NOVA_LASTDATA_T' table and submit this over http. MAX_BYTES is used to
limit the volume of data that is read from the log file into memory.
"""
# beware this is horrible hacked together python, continue at your own risk...
import os
import re
import sys
import json
import urllib2
MIN_EPOCH = 0
MAX_BYTES = 1*1024*1024 #1MB
SUBMIT_URL = 'http://smartcities.switchsystems.co.uk/api/reading/send/%s'
def f_to_c(value):
""" Convert Fahrenheit to Celsius"""
return (value - 32) / 1.8
def send_data(device_id, epoch, value, sensor_name='TEMP'):
"""Send sensor data over http"""
data = {
'deviceId': device_id,
'sensorName': sensor_name,
'dataFloat': f_to_c(float(value)), # convert to Celsius
'timestamp': int(epoch)/1000, #timestamp in seconds
}
url = SUBMIT_URL % urllib2.quote(json.dumps(data))
#print url
return urllib2.urlopen(url).read()
def tail(handle, max_bytes=None):
"""Return the lines contined in the last n bytes"""
try:
if max_bytes:
handle.seek((-1 * max_bytes), os.SEEK_END)
else:
handle.seek(0)
except OSError:
handle.seek(0)
return ''.join(handle.read().decode('utf-8', 'ignore')).splitlines()[1:]
def scan_file(filename):
"""Scan through lines looking for INSERTS into NOVA_LASTDATA_T"""
data = {}
log_file = open(filename,'r')
for line in reversed(tail(log_file, MAX_BYTES)):
result = re.search(r"^INSERT INTO NOVA_LASTDATA_T VALUES\(\d,(\d*),(\d*),'temp',(\d*\.\d*).*$", line)
if result and result.group(1) not in data:
data[result.group(1)] = (result.group(2), result.group(3))
log_file.close()
return data
if __name__ == '__main__':
if len(sys.argv) > 1:
if len(sys.argv) > 2:
MIN_EPOCH = int(sys.argv[2])
DATA = scan_file(sys.argv[1])
#print DATA
for sensor_id in DATA:
if DATA[sensor_id][0] > MIN_EPOCH:
send_data(sensor_id, DATA[sensor_id][0], DATA[sensor_id][1])
else:
print "Skipping data too old: %s, %s, %s" % (sensor_id,
DATA[sensor_id][0], DATA[sensor_id][1])
else:
print "USAGE: parse_log.py FILENAME [MIN_EPOCH]"
|
gpl-2.0
| 3,628,607,066,047,131,000 | 29.696203 | 109 | 0.602474 | false | 3.246319 | false | false | false |
google-research/language
|
language/capwap/utils/text_utils.py
|
1
|
11097
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for dealing with text."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import string
from bert import tokenization
from language.capwap.utils import nltk_utils
from language.capwap.utils import tensor_utils
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import lookup as contrib_lookup
TextInputs = collections.namedtuple(
"TextInputs", ["token_ids", "mask", "segment_ids", "positions"])
TextOutputs = collections.namedtuple("TextLabels", ["token_ids", "mask"])
# ------------------------------------------------------------------------------
#
# General purpose text functions for masking/unmasking.
#
# ------------------------------------------------------------------------------
class Vocab(object):
"""Wrapper around the BERT tokenizer and vocabulary."""
PAD = "[PAD]"
UNK = "[UNK]"
SEP = "[SEP]"
CLS = "[CLS]"
IMG = "[IMG]"
ANS = "[A]"
QUE = "[Q]"
def __init__(self, vocab_file, do_lower_case):
# Load BERT tokenizer.
self._tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)
if self.IMG not in self:
# Override [unused0] to point to IMG.
idx = self._tokenizer.vocab.pop("[unused0]")
self._tokenizer.vocab[self.IMG] = idx
self._tokenizer.inv_vocab[idx] = self.IMG
if self.ANS not in self:
# Override [unused1] to point to ANS.
idx = self._tokenizer.vocab.pop("[unused1]")
self._tokenizer.vocab[self.ANS] = idx
self._tokenizer.inv_vocab[idx] = self.ANS
if self.QUE not in self:
# Override [unused2] to point to QUE.
idx = self._tokenizer.vocab.pop("[unused2]")
self._tokenizer.vocab[self.QUE] = idx
self._tokenizer.inv_vocab[idx] = self.QUE
# Validate
for i in range(len(self)):
assert i in self._tokenizer.inv_vocab
for special_token in [self.PAD, self.UNK, self.SEP, self.CLS]:
assert special_token in self
def __len__(self):
return len(self._tokenizer.vocab)
def __contains__(self, token):
return token in self._tokenizer.vocab
def t2i(self, token):
return self._tokenizer.vocab[token]
def i2t(self, index):
return self._tokenizer.inv_vocab[index]
def tokenize(self, text):
"""Convert text to word pieces."""
return self._tokenizer.tokenize(text)
@staticmethod
def clean(wordpieces):
"""Clean word pieces."""
if Vocab.CLS in wordpieces:
idx = wordpieces.index(Vocab.CLS)
wordpieces = wordpieces[idx + 1:]
if Vocab.SEP in wordpieces:
idx = wordpieces.index(Vocab.SEP)
wordpieces = wordpieces[:idx]
if Vocab.PAD in wordpieces:
wordpieces = [w for w in wordpieces if w != Vocab.PAD]
# Various adhoc hacks.
adjusted = []
for w in wordpieces:
# Remove non-ascii.
try:
w.encode(encoding="utf-8").decode("ascii")
except UnicodeDecodeError:
continue
# Remove [unused*]
if w.startswith("[unused"):
continue
# Remove repeated word.
if not w.startswith("##") and adjusted and adjusted[-1] == w:
continue
adjusted.append(w)
return adjusted
@staticmethod
def detokenize(wordpieces):
"""Convert word pieces to text."""
wordpieces = Vocab.clean(wordpieces)
tokens = []
for w in wordpieces:
if w.startswith("##") and len(tokens):
tokens[-1] = tokens[-1] + w.lstrip("##")
else:
tokens.append(w)
return " ".join(tokens)
def get_string_lookup_table(self):
unk_idx = self._tokenizer.vocab[self.UNK]
ordered = [self.i2t(i) for i in range(len(self))]
return contrib_lookup.index_table_from_tensor(
np.array(ordered), default_value=unk_idx)
@classmethod
def load(cls, path):
do_lower_case = "uncased" in path or "cased" not in path
return cls(path, do_lower_case)
# ------------------------------------------------------------------------------
#
# General purpose text functions for masking/unmasking.
#
# ------------------------------------------------------------------------------
def get_token_mask(token_ids, stop_id):
"""Create mask for all ids past stop_id (inclusive)."""
batch_size = tensor_utils.shape(token_ids, 0)
num_tokens = tensor_utils.shape(token_ids, 1)
# Create position matrix.
idx_range = tf.expand_dims(tf.range(num_tokens), 0)
idx_range = tf.tile(idx_range, [batch_size, 1])
# Find positions of stop_id.
stop_positions = tf.where(
condition=tf.equal(token_ids, stop_id),
x=idx_range,
y=tf.fill([batch_size, num_tokens], num_tokens))
# Find earliest stop position (length).
stop_positions = tf.reduce_min(stop_positions, -1)
# Mask out all tokens at positions > stop_id.
mask = tf.less_equal(idx_range, tf.expand_dims(stop_positions, -1))
return tf.cast(mask, tf.int32)
def get_random_span(text, p, max_span_len, max_iter=10):
"""Get random subspan from text token sequence, following heuristics.
Heuristics:
1) Should not start or end mid-wordpiece.
2) Must contain at least one non-stopword token.
3) Length should be drawn from Geo(p) and less than max_span_len.
Args:
text: <string> [], space-separated token string.
p: <float32> Geometric distribution parameter.
max_span_len: Length to pad or truncate to.
max_iter: Maximum rejection sampling iterations.
Returns:
span_wid: <string>
"""
# Split text into tokens.
tokens = tf.string_split([text]).values
seq_len = tf.size(tokens)
def reject(start, end):
"""Reject span sample."""
span = tokens[start:end + 1]
wordpiece_boundary = tf.logical_or(
tf.strings.regex_full_match(span[0], r"^##.*"),
tf.strings.regex_full_match(span[-1], r"^##.*"))
span = tokens[start:end]
stopwords = list(nltk_utils.get_stopwords() | set(string.punctuation))
non_stopword = tf.setdiff1d(span, stopwords)
all_stopword = tf.equal(tf.size(non_stopword.out), 0)
length = tf.equal(tf.size(span), 0)
return tf.reduce_any([wordpiece_boundary, all_stopword, length])
def sample(start, end):
"""Sample length from truncated Geo(p)."""
# Sample from truncated geometric distribution.
geometric = lambda k: (1 - p)**(k - 1) * p
probs = np.array([geometric(k) for k in range(1, max_span_len + 1)])
probs /= probs.sum()
length = tf.distributions.Categorical(probs=probs).sample() + 1
# Sample start uniformly.
max_offset = tf.maximum(1, seq_len - length + 1)
start = tf.random.uniform([], 0, max_offset, dtype=tf.int32)
end = start + length
# Return span.
return [start, end]
# Rejection sample. Start with dummy span variable.
start = tf.constant(0)
end = tf.constant(0)
start, end = tf.while_loop(
reject, sample, [start, end], maximum_iterations=max_iter)
span = tf.strings.reduce_join(tokens[start:end], separator=" ")
return span
# ------------------------------------------------------------------------------
#
# General purpose text functions for masking/unmasking.
#
# ------------------------------------------------------------------------------
def build_text_inputs(
text,
length,
lookup_table,
segment_id=0,
start_token=None,
end_token=None,
):
"""Convert text to TextInputs.
Args:
text: <string>, space-separated token string.
length: Length to pad or truncate to.
lookup_table: Instance of contrib.lookup.index_table_from_tensor.
segment_id: Integer denoting segment type.
start_token: Optional start token.
end_token: Optional end token.
Returns:
Instance of TextInputs.
"""
# Tokenize and truncate.
tokens = tf.string_split([text]).values
length_offset = sum([0 if i is None else 1 for i in [start_token, end_token]])
tokens = tokens[:length - length_offset]
if start_token is not None:
tokens = tf.concat([[start_token], tokens], axis=0)
if end_token is not None:
tokens = tf.concat([tokens, [end_token]], axis=0)
token_ids = tf.cast(lookup_table.lookup(tokens), tf.int32)
mask = tf.ones_like(token_ids)
segment_ids = tf.fill(tf.shape(token_ids), segment_id)
pad = [[0, length - tf.size(token_ids)]]
token_ids = tf.pad(token_ids, pad)
mask = tf.pad(mask, pad)
segment_ids = tf.pad(segment_ids, pad)
positions = tf.range(length)
text_input = TextInputs(
token_ids=tf.ensure_shape(token_ids, [length]),
mask=tf.ensure_shape(mask, [length]),
segment_ids=tf.ensure_shape(segment_ids, [length]),
positions=tf.ensure_shape(positions, [length]))
return text_input
def build_planner_inputs(question, answer, length, lookup_table):
"""Convert text to TextInputs for conditional text planner.
Args:
question: <string>, space-separated token string.
answer: <string>, space-separated token string.
length: Length to pad or truncate to.
lookup_table: Instance of contrib.lookup.index_table_from_tensor.
Returns:
Instance of TextInputs.
"""
# Build question.
q_tokens = tf.string_split([question]).values
q_tokens = tf.concat([["[Q]"], q_tokens], axis=0)
q_token_ids = tf.cast(lookup_table.lookup(q_tokens), tf.int32)
q_len = tensor_utils.shape(q_token_ids, 0)
q_positions = tf.range(q_len)
# Build answer.
a_tokens = tf.string_split([answer]).values
a_tokens = tf.concat([["[A]"], a_tokens], axis=0)
a_token_ids = tf.cast(lookup_table.lookup(a_tokens), tf.int32)
a_len = tensor_utils.shape(a_token_ids, 0)
a_positions = tf.range(a_len)
# Combine.
token_ids = tf.concat([q_token_ids, a_token_ids], axis=0)
segment_ids = tf.concat([tf.fill([q_len], 2), tf.fill([a_len], 1)], axis=0)
positions = tf.concat([q_positions, a_positions], axis=0)
q_mask = tf.ones_like(q_token_ids)
mask = tf.concat([q_mask, tf.ones_like(a_token_ids)], axis=0)
# Truncate.
token_ids = token_ids[:length]
segment_ids = segment_ids[:length]
mask = mask[:length]
positions = positions[:length]
# Pad.
pad = [[0, length - tf.size(token_ids)]]
token_ids = tf.pad(token_ids, pad)
mask = tf.pad(mask, pad)
segment_ids = tf.pad(segment_ids, pad)
positions = tf.pad(positions, pad)
text_input = TextInputs(
token_ids=tf.ensure_shape(token_ids, [length]),
mask=tf.ensure_shape(mask, [length]),
segment_ids=tf.ensure_shape(segment_ids, [length]),
positions=tf.ensure_shape(positions, [length]))
return text_input
|
apache-2.0
| -2,312,943,053,695,171,000 | 30.796562 | 80 | 0.635217 | false | 3.464564 | false | false | false |
haisland0909/Denoising-Dirty-Documents
|
script/prediction.py
|
1
|
5057
|
# coding: UTF8
from sklearn.pipeline import FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn import preprocessing
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
import sklearn.linear_model
import img_to_pickle as i_p
import features as f
import classify
import preprocessing as pre
import pickle
import numpy as np
import pandas as pd
import datetime
import os
ROOT = os.path.abspath(os.path.dirname(__file__))
SUBMISSION_DIR = ROOT.replace("script", "tmp/submission")
clf_dict = {
'LR': {
"name": 'L2 Logistic Regression',
"clf": sklearn.linear_model.LogisticRegression(penalty='l2', dual=False, C=0.01),
},
'GB2': {
"name": 'Gradient Boosting New',
"clf": GradientBoostingRegressor(random_state=1, learning_rate=0.05,
n_estimators=3000, subsample=0.8,
max_features=0.3, min_samples_split=2,
min_samples_leaf=1, max_depth=7)
},
"RF": {
"name": "RandomForest",
"clf": RandomForestRegressor(max_depth=7, max_features=0.4,
min_samples_leaf=10, min_samples_split=2,
n_jobs=-1, n_estimators=1000)
},
'SGDR': {
"name": 'SGD Regression',
"clf": sklearn.linear_model.SGDRegressor(penalty='l2'),
}
}
def zero_one(x):
return min(max(x, 0.), 1.)
def convert_testdata(test_gray_data):
data_df = f.make_test_df(test_gray_data)
fu = FeatureUnion(transformer_list=f.feature_transformer_rule)
Std = preprocessing.StandardScaler()
X_test = fu.fit_transform(data_df)
#X_test = Std.fit_transform(X_test)
return X_test
def convert_traindata(train_gray_data, labels):
data_df = f.make_data_df(train_gray_data, labels)
fu = FeatureUnion(transformer_list=f.feature_transformer_rule)
Std = preprocessing.StandardScaler()
X_train = fu.fit_transform(data_df)
y_train = np.concatenate(data_df["label"].apply(lambda x: x.flatten()))
X_train = Std.fit_transform(X_train)
return X_train, y_train
def prediction(clf_name):
print "****************classifier****************"
print clf_dict[clf_name]["clf"]
clf = clf_dict[clf_name]["clf"]
_, _, _, train_gray_data, test_gray_data, _, labels = i_p.load_data()
train_keys = train_gray_data.keys()
test_keys = test_gray_data.keys()
train_df = f.make_data_df(train_gray_data, labels)
test_df = f.make_test_df(test_gray_data)
train_df = train_df.reset_index()
test_df = test_df.reset_index()
train_df.columns = ["pngname", "input", "label"]
test_df.columns = ["pngname", "input"]
# operation check
if clf_name == "SGDB":
# train_df, train_keys, test_df, test_keys = pre.make_checkdata(mode="df")
# train_df, train_keys, _, _ = pre.make_checkdata(mode="df")
for i in xrange(len(train_keys)):
train_X, train_y = classify.set_traindata(train_df, train_keys[i])
clf.partial_fit(train_X, train_y)
else:
# operation check
# train_df, train_keys, _, _ = pre.make_checkdata(mode="df")
fu = FeatureUnion(transformer_list=f.feature_transformer_rule)
train_X = fu.fit_transform(train_df)
train_y = np.concatenate(train_df["label"].apply(lambda x: x.flatten()))
train_X, train_y = classify.downsampling_data(train_X, train_y, 0.2)
clf.fit(train_X, train_y)
clf_dir = os.path.abspath(os.path.dirname(__file__)) +\
"/../tmp/fit_instance/"
now = datetime.datetime.now()
savefile = clf_dir + clf_name + now.strftime("%Y_%m_%d_%H_%M_%S") + ".pickle"
fi = open(savefile, "w")
pickle.dump(clf, fi)
fi.close()
for i in xrange(len(test_keys)):
test_img = test_df[(test_df["pngname"] == test_keys[i])]["input"].as_matrix()[0]
imgname = test_keys[i]
shape = test_img.shape
test_img = {test_keys[i]: test_img}
X_test = convert_testdata(test_img)
output = clf.predict(X_test)
output = np.asarray(output)
zo = np.vectorize(zero_one)
output = zo(output).reshape(shape)
tmp = []
for row in xrange(len(output)):
for column in xrange(len(output[row])):
id_ = imgname + "_" + str(row + 1) + "_" + str(column + 1)
value = output[row][column]
pix = [id_, value]
tmp.append(pix)
if i == 0:
predict_df = pd.DataFrame(tmp)
else:
tmp_df = pd.DataFrame(tmp)
predict_df = pd.concat([predict_df, tmp_df])
predict_df.columns = ["id", "value"]
now = datetime.datetime.now()
submission_path = SUBMISSION_DIR + "/submission_" + now.strftime("%Y_%m_%d_%H_%M_%S") + ".csv"
predict_df.to_csv(submission_path, header=True, index=False)
if __name__ == '__main__':
clf_name = "RF"
prediction(clf_name)
|
apache-2.0
| -8,497,978,010,576,337,000 | 29.281437 | 98 | 0.58513 | false | 3.34679 | true | false | false |
tgbugs/hypush
|
hyputils/memex/models/user.py
|
1
|
8621
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import re
import sqlalchemy as sa
from sqlalchemy.ext.hybrid import Comparator, hybrid_property
from sqlalchemy.ext.declarative import declared_attr
from hyputils.memex._compat import string_types
from hyputils.memex.db import Base
from hyputils.memex.util.user import split_user
from hyputils.memex.security import security
USERNAME_MIN_LENGTH = 3
USERNAME_MAX_LENGTH = 30
USERNAME_PATTERN = "(?i)^[A-Z0-9._]+$"
EMAIL_MAX_LENGTH = 100
DISPLAY_NAME_MAX_LENGTH = 30
def _normalise_username(username):
# We normalize usernames by dots and case in order to discourage attempts
# at impersonation.
return sa.func.lower(sa.func.replace(username, sa.text("'.'"), sa.text("''")))
class UsernameComparator(Comparator):
"""
Custom comparator for :py:attr:`~h.models.user.User.username`.
This ensures that all lookups against the username property, such as
session.query(User).filter_by(username='juanwood')
use the normalised username for the lookup and appropriately normalise the
RHS of the query. This means that a query like the one above will
correctly find a user with a username of "Juan.Wood", for example.
"""
def operate(self, op, other, **kwargs):
return op(
_normalise_username(self.__clause_element__()),
_normalise_username(other),
**kwargs
)
class UserIDComparator(Comparator):
"""
Custom comparator for :py:attr:`~h.models.user.User.userid`.
A user's userid is a compound property which depends on their username
and their authority. A naive comparator for this property would generate
SQL like the following:
... WHERE 'acct:' || username || '@' || authority = ...
This would be slow, due to the lack of an index on the LHS expression.
While we could add a functional index on this expression, we can also take
advantage of the existing index on (normalised_username, authority), which
is what this comparator does.
A query such as
session.query(User).filter_by(userid='acct:[email protected]')
will instead generate
WHERE
(lower(replace(username, '.', '')), authority ) =
(lower(replace('luis.silva', '.', '')), 'example.com')
"""
def __init__(self, username, authority):
self.username = username
self.authority = authority
def __clause_element__(self):
return sa.tuple_(_normalise_username(self.username), self.authority)
def __eq__(self, other):
"""
Compare the userid for equality with `other`.
`other` can be anything plausibly on the RHS of a comparison, which
can include other SQL clause elements or expressions, as in
User.userid == sa.tuple_(User.username, Group.authority)
or literals, as in
User.userid == 'acct:[email protected]'
We treat the literal case specially, and split the string into
username and authority ourselves. If the string is not a well-formed
userid, the comparison will always return False.
"""
if isinstance(other, string_types):
try:
val = split_user(other)
except ValueError:
# The value being compared isn't a valid userid
return False
else:
other = sa.tuple_(_normalise_username(val["username"]), val["domain"])
return self.__clause_element__() == other
def in_(self, userids):
others = []
for userid in userids:
try:
val = split_user(userid)
except ValueError:
continue
other = sa.tuple_(_normalise_username(val["username"]), val["domain"])
others.append(other)
if not others:
return False
return self.__clause_element__().in_(others)
class User(Base):
__tablename__ = "user"
@declared_attr
def __table_args__(cls): # noqa: N805
return (
# (email, authority) must be unique
sa.UniqueConstraint("email", "authority"),
# (normalised username, authority) must be unique. This index is
# also critical for making user lookups fast.
sa.Index(
"ix__user__userid",
_normalise_username(cls.username),
cls.authority,
unique=True,
),
)
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
#: Username as chosen by the user on registration
_username = sa.Column("username", sa.UnicodeText(), nullable=False)
#: The "authority" for this user. This represents the "namespace" in which
#: this user lives. By default, all users are created in the namespace
#: corresponding to `request.domain`, but this can be overridden with the
#: `h.authority` setting.
authority = sa.Column("authority", sa.UnicodeText(), nullable=False)
#: The display name which will be used when rendering an annotation.
display_name = sa.Column(sa.UnicodeText())
#: A short user description/bio
description = sa.Column(sa.UnicodeText())
#: A free-form column to allow the user to say where they are
location = sa.Column(sa.UnicodeText())
#: The user's URI/link on the web
uri = sa.Column(sa.UnicodeText())
#: The user's ORCID ID
orcid = sa.Column(sa.UnicodeText())
identities = sa.orm.relationship(
"UserIdentity", backref="user", cascade="all, delete-orphan"
)
@hybrid_property
def username(self):
return self._username
@username.setter
def username(self, value):
self._username = value
@username.comparator
def username(cls): # noqa: N805
return UsernameComparator(cls._username)
@hybrid_property
def userid(self):
return "acct:{username}@{authority}".format(
username=self.username, authority=self.authority
)
@userid.comparator
def userid(cls): # noqa: N805
return UserIDComparator(cls.username, cls.authority)
email = sa.Column(sa.UnicodeText())
last_login_date = sa.Column(
sa.TIMESTAMP(timezone=False),
default=datetime.datetime.utcnow,
server_default=sa.func.now(),
nullable=False,
)
registered_date = sa.Column(
sa.TIMESTAMP(timezone=False),
default=datetime.datetime.utcnow,
server_default=sa.func.now(),
nullable=False,
)
@sa.orm.validates("email")
def validate_email(self, key, email):
if email is None:
return email
if len(email) > EMAIL_MAX_LENGTH:
raise ValueError(
"email must be less than {max} characters "
"long".format(max=EMAIL_MAX_LENGTH)
)
return email
@sa.orm.validates("_username")
def validate_username(self, key, username):
if not USERNAME_MIN_LENGTH <= len(username) <= USERNAME_MAX_LENGTH:
raise ValueError(
"username must be between {min} and {max} "
"characters long".format(
min=USERNAME_MIN_LENGTH, max=USERNAME_MAX_LENGTH
)
)
if not re.match(USERNAME_PATTERN, username):
raise ValueError(
"username must have only letters, numbers, " "periods, and underscores."
)
return username
@classmethod
def get_by_email(cls, session, email, authority):
"""Fetch a user by email address."""
if email is None:
return None
return (
session.query(cls)
.filter(
sa.func.lower(cls.email) == email.lower(), cls.authority == authority
)
.first()
)
@classmethod
def get_by_username(cls, session, username, authority):
"""Fetch a user by username."""
return (
session.query(cls)
.filter(cls.username == username, cls.authority == authority)
.first()
)
def __acl__(self):
terms = []
# auth_clients that have the same authority as the user
# may update the user
user_update_principal = "client_authority:{}".format(self.authority)
terms.append((security.Allow, user_update_principal, "update"))
terms.append(security.DENY_ALL)
return terms
def __repr__(self):
return "<User: %s>" % self.username
|
mit
| -137,838,271,922,611,360 | 30.122744 | 88 | 0.609094 | false | 4.259387 | false | false | false |
kriberg/eve-armada
|
armada/logistics/models.py
|
1
|
2472
|
from django.db import models
from django.contrib import admin
from armada.capsuler.models import UserPilot, \
UserCorporation, \
UserAPIKey, \
Capsuler
class LogisticsTeam(models.Model):
name = models.CharField(max_length=200)
corporation = models.ForeignKey(UserCorporation)
creator = models.ForeignKey(UserPilot)
team_type = models.CharField(max_length=30, choices=(
('STOCKER', 'Stocking'),
('HAULER', 'Hauling'),
('MANUFACTUER', 'Manufacturing'),
('FUELER', 'Fueling')))
def get_members(self):
return LogisticsTeamMember.objects.filter(team=self).order_by('pilot__public_info__name')
def get_capsuler_members(self, user):
pilots = user.get_active_pilots()
return LogisticsTeamMember.objects.filter(team=self, pilot__in=pilots)
def get_managers(self):
return LogisticsTeamMember.objects.filter(team=self,
manager=True,
accepted=True)
def is_member(self, capsuler):
if self.get_capsuler_members(capsuler).count() > 0 or capsuler == self.manager:
return True
else:
return False
def is_manager(self, capsuler):
memberships = LogisticsTeamMember.objects.filter(team=self,
pilot__in=capsuler.get_pilots_in_corporation(self.corporation),
accepted=True,
manager=True)
return memberships.count() > 0
def is_creator(self, capsuler):
for membership in self.get_capsuler_members(capsuler):
if membership.pilot == self.creator:
return True
return False
def __unicode__(self):
return self.name
def get_page_link(self):
return '/%s/%s/%s/' % (self.team_type.lower(), self.corporation, self.name)
class Meta:
unique_together = ('corporation', 'name')
class LogisticsTeamMember(models.Model):
team = models.ForeignKey(LogisticsTeam)
pilot = models.ForeignKey(UserPilot, related_name='pilot_userpilot')
accepted = models.BooleanField(default=False, editable=False)
manager = models.BooleanField(default=False)
god = models.ForeignKey(Capsuler, related_name='god_capsuler', editable=False)
class Meta:
unique_together = ('team', 'pilot')
def __unicode__(self):
return '%s: %s' % (self.team.name, self.pilot)
admin.site.register(LogisticsTeam)
admin.site.register(LogisticsTeamMember)
|
agpl-3.0
| -895,496,008,196,292,100 | 32.863014 | 97 | 0.64644 | false | 3.662222 | false | false | false |
vivyly/fancast
|
fancast/casting/views.py
|
1
|
6503
|
import simplejson
from django.views import generic
from django.http import HttpResponse
from django.views.decorators.csrf import (csrf_exempt,
requires_csrf_token)
from rest_framework import (viewsets,
generics,
)
from rest_framework.renderers import JSONRenderer
from .models import (Project,
Character,
Actor,
Prospect)
from .serializers import (ProjectSerializer,
CharacterSerializer,
ProspectSerializer,
ActorSerializer)
from .forms import (AddActor,
AddVote,
AddCharacter)
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
class ProjectListView(generic.ListView):
#template_name = "project_list.html"
template_name = "projects.html"
def get_queryset(self):
return Project.objects.all().order_by('published')
class ProjectDetailView(generic.DetailView):
model = Project
#template_name = "cast_list.html"
template_name = "casting.html"
context_object_name = "slug"
def get_context_data(self, **kwargs):
context = super(ProjectDetailView, self).get_context_data(**kwargs)
context['sessionID'] = self.request.COOKIES.get('sessionid')
return context
class ProjectViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows projects to be viewed or edited
"""
queryset = Project.objects.all().order_by('published')
serializer_class = ProjectSerializer
class CharacterViewSet(generics.ListCreateAPIView):
model = Character
serializer_class = CharacterSerializer
def get_queryset(self):
slug = self.kwargs.get('slug')
if slug:
return Character.objects.filter(project__slug=slug)
return Character.objects.none()
class CharacterDetail(generics.RetrieveAPIView):
model = Character
serializer_class = CharacterSerializer
def get_queryset(self):
slug = self.kwargs.get('slug')
if slug:
return Character.objects.filter(slug=slug)
return Character.objects.none()
class ActorViewSet(generics.ListAPIView):
model = Actor
serializer_class = ActorSerializer
def get_queryset(self):
slug = self.kwargs.get('slug')
if slug:
character = Character.objects.get(slug=slug)
eligible_actors = []
for actor in character.actors:
try:
prospect = Prospect.objects.get(actor = actor,
character = character)
actor.upvotes = prospect.upvotes
actor.downvotes = prospect.downvotes
actor.total = actor.total
except Prospect.DoesNotExist:
actor.upvotes = 0
actor.downvotes = 0
actor.total = 0
eligible_actors.append(actor)
return eligible_actors
else:
return Character.objects.none()
class ActorDetail(generics.RetrieveAPIView):
serializer_class = ActorSerializer
def get_queryset(self):
slug = self.kwargs.get('slug')
if slug:
try:
return Actor.objects.filter(slug=slug)
except Actor.DoesNotExist:
pass
return Actor.objects.none()
@csrf_exempt
@requires_csrf_token
def vote(request, slug):
if request.method == "POST" or request.method == "PUT":
#this is probably not the right way to do it, need
#to figure out why post params are coming in as a string
#instead of a QueryDict
params = simplejson.loads(request.body)
params['sessionid'] = request.session.session_key
params['prospect_id'] = slug
form = AddVote(params)
if form.is_valid():
_vote = form.save()
try:
prospect = Prospect.objects.get(slug=slug)
prospects = Prospect.objects.filter(character=prospect.character)
serializer = ProspectSerializer(prospects, many=True,
context = {'request':request})
serializer.is_valid()
return JSONResponse(serializer.data)
except Prospect.DoesNotExist:
return JSONResponse({})
@csrf_exempt
@requires_csrf_token
def add_actor(request):
if request.method == "POST":
#this is probably not the right way to do it, need
#to figure out why post params are coming in as a string
#instead of a QueryrDict
params = simplejson.loads(request.body)
form = AddActor(params)
if form.is_valid():
_actor = form.save()
character = Character.objects.get(slug=params.get('character_id'))
prospects = Prospect.objects.filter(character=character)
serializer = ProspectSerializer(prospects, many=True,
context = {'request':request})
serializer.is_valid()
return JSONResponse(serializer.data)
else:
errors = [(k, v[0]) for k, v in
form.errors.items()]
return JSONResponse({'errors':errors})
return JSONResponse({})
@csrf_exempt
@requires_csrf_token
def add_character(request):
if request.method == "POST":
#this is probably not the right way to do it, need
#to figure out why post params are coming in as a string
#instead of a QueryrDict
print request
params = simplejson.loads(request.body)
print params
form = AddCharacter(params)
if form.is_valid():
character = form.save()
serializer = CharacterSerializer([character], many=True,
context = {'request':request})
serializer.is_valid()
return JSONResponse(serializer.data)
else:
errors = [(k, v[0]) for k, v in
form.errors.items()]
return JSONResponse(errors)
return JSONResponse({})
|
bsd-3-clause
| -2,488,046,979,807,079,400 | 33.775401 | 78 | 0.584807 | false | 4.576355 | false | false | false |
blab/nextstrain-augur
|
builds/dengue/dengue.process.py
|
1
|
9677
|
from __future__ import print_function
import os, sys
# we assume (and assert) that this script is running from the virus directory, i.e. inside H7N9 or zika
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import base.process
from base.process import process
import argparse
import numpy as np
from dengue_titers import titer_model, titer_export ## Set up and parameterize the titer model separately for tidiness
##### Define references and metadata #####
sanofi_vaccine_strains = {
'denv1': 'DENV1/THAILAND/PUO359/1980',
'denv2': 'DENV2/THAILAND/PUO218/1980',
'denv3': 'DENV3/THAILAND/PAH88188/1988',
'denv4': 'DENV4/INDONESIA/S1228/1978',
'all': None}
regions = ['africa', 'europe', 'north_america', 'china', 'south_asia',
'japan_korea', 'south_pacific', 'oceania', 'south_america',
'southeast_asia', 'west_asia']
##### Parse args, set up config #####
def collect_args():
"""Returns a dengue-specific argument parser."""
parser = base.process.collect_args()
# parser.add_argument('-j', '--json', default=None, nargs='+', type=str, help="Accepts path to prepared JSON(s); overrides -s argument")
parser.add_argument('-s', '--serotypes', default=["multiple"], nargs='+', type=str, choices=['denv1', 'denv2', 'denv3', 'denv4', 'all', 'multiple'],
help="Look for prepared JSON(s) like ./prepared/dengue_SEROTYPE.json; 'multiple' will run all five builds. Default='multiple'")
parser.add_argument('--no_mut_freqs', default=True, action='store_true', help="skip mutation frequencies")
parser.add_argument('--no_tree_freqs', default=False, action='store_true', help="skip tree (clade) frequencies")
parser.add_argument('--no_titers', default=False, action='store_true', help="skip titer models")
parser.set_defaults(json = None)
return parser
def make_config (prepared_json, args):
"""
Configure your analysis here.
Parsed as a function to enable running multiple builds with one cmd.
"""
return {
"dir": "dengue",
"in": prepared_json,
"geo_inference": ['region'], # what traits to perform this on; don't run country (too many demes, too few sequences per deme to be reliable)
"auspice": { ## settings for auspice JSON export
"extra_attr": ['serum', 'clade', 'dTiter_sanofi'], # keys from tree.tree.clade['attr'] to include in export
"color_options": { # which traits to color the tree by in auspice; titer colorbys are added in dengue_titers
"country":{"key":"country", "legendTitle":"Country", "menuItem":"country", "type":"discrete"},
"region":{"key":"region", "legendTitle":"Region", "menuItem":"region", "type":"discrete"},
"gt":{"key":"genotype", "legendTitle":"Genotype", "menuItem":"genotype", "type":"discrete"},
},
"defaults": {'geoResolution': 'region', 'colorBy': 'region', 'distanceMeasure': 'div', 'mapTriplicate': True}
},
"timetree_options": {"Tc": False},
"fit_titer_model": not args.no_titers,
"titers": { # regularization parameter values and cross-validation fraction
"lam_avi":0.0,
"lam_pot":0.5,
"lam_drop":1.0,
"training_fraction":0.9,
},
"estimate_mutation_frequencies": not args.no_mut_freqs,
"estimate_tree_frequencies": not args.no_tree_freqs,
"clean": args.clean,
"pivot_spacing": 1.0/4, # pivots = time points; 1/N timepoints per year
"newick_tree_options":{
"raxml": not args.no_raxml # for dev work only
}
}
##### Parse input files/params and run #####
if __name__=="__main__":
parser = collect_args()
args = parser.parse_args()
### Find the right input files ###
if args.json: # If provided, a specified JSON path overrides serotype argument
args.json = [args.json]
else: # Look for serotype-specific JSONs in the ./prepared/ directory
if 'multiple' in args.serotypes: # "multiple" = run all 5 builds
args.serotypes = ['denv1', 'denv2', 'denv3', 'denv4', 'all']
else:
args.serotypes = args.serotypes
args.json = ['./prepared/dengue_%s.json'%s for s in args.serotypes] # Look for ./prepared/dengue_SEROTYPE.json if no file paths given
for j in args.json: # validate input JSONs exist
assert os.path.isfile(j)
### Run analyses ###
for prepared_json in args.json:
try:
print("Processing %s"%prepared_json)
runner = process(make_config(prepared_json, args)) # parse
runner.align() # run alignment with mafft
runner.build_tree() # build tree with fasttree -> raxml
runner.timetree_setup_filter_run() # infer ML ancestral states (geo traits, node dates, mutations)
runner.run_geo_inference() # run mugration model to infer transmissions
# estimate mutation frequencies here.
if runner.config["estimate_mutation_frequencies"]:
pivots = runner.get_pivots_via_spacing()
runner.estimate_mutation_frequencies(pivots=pivots, min_freq=0.02, inertia=np.exp(-1.0/12), stiffness=2)
# estimate tree frequencies here.
if runner.config["estimate_tree_frequencies"]: # methods @ [ref]
pivots = runner.get_pivots_via_spacing()
runner.estimate_tree_frequencies(pivots=pivots, stiffness=2) # stiffness ~= amount of smoothing
for region in ['southeast_asia', 'south_america']: #regions:
try:
runner.estimate_tree_frequencies(region=region, stiffness=2)
except:
continue
# titers
if runner.config["fit_titer_model"] and runner.config["titers"]: # methods @ Neher et al., PNAS 2016
titer_model(runner,
lam_pot = runner.config['titers']['lam_pot'],
lam_avi = runner.config['titers']['lam_avi'],
lam_drop = runner.config['titers']['lam_drop'],
training_fraction = runner.config['titers']['training_fraction'],
sanofi_strain = sanofi_vaccine_strains[runner.info['lineage']], # vaccine strain for each serotype-specific build
plot=False,
criterium = lambda node: True) # calculate dTiter for all branches
cross_validate=3) # calculate dTiter for all branches
titer_export(runner)
### Export for visualization in auspice
runner.auspice_export()
except:
continue
##### Extra code bank #####
'''
genotypes = {
'denv1': {'I': [('E', 461, 'V'), ('E', 484, 'L'), ('M', 107, 'T')],
'II': [('E', 345, 'A'), ('E', 432, 'M'), ('E', 439, 'V')],
'IV': [('E', 339, 'S'), ('M', 72, 'E'), ('E', 88, 'T')],
'V': [('E', 297, 'T'), ('NS5', 135, 'M')]},
'denv2': {'AMERICAN': [('E', 71, 'D'), ('E', 81, 'T'), ('E', 129, 'I')],
'ASIANAMERICAN': [('E', 491, 'A'), ('M', 15, 'G'), ('M', 39, 'I')],
'ASIANI': [('E', 484, 'I'), ('NS5', 688, 'I'), ('NS1', 222, 'N')],
'COSMOPOLITAN': [('E', 71, 'A'), ('E', 149, 'N'), ('E', 462, 'V')],
'SYLVATIC': [('E', 59, 'F'), ('E', 236, 'M'), ('E', 432, 'V')]},
'denv3': {'I': [('E', 233, 'K'), ('M', 128, 'F'), ('E', 68, 'V')],
'II': [('M', 57, 'A'), ('NS5', 750, 'K')],
'III': [('E', 303, 'T'), ('E', 454, 'V'), ('E', 132, 'Y')],
'IV': [('E', 22, 'E'), ('E', 50, 'V'), ('E', 62, 'G')]},
'denv4': {'I': [('E', 494, 'H'), ('NS1', 5, 'A')],
'II': [('E', 265, 'A'), ('E', 46, 'T'), ('NS1', 246, 'S')],
'SYLVATIC': [('E', 132, 'V'), ('E', 154, 'S'), ('E', 162, 'T')]},
'all': {}
}
for i in ['denv1', 'denv2', 'denv3', 'denv4']:
for k,v in genotypes[i].items():
genotypes['all'][i.upper()+'_'+k] = v
# Label named clades based on mutations/genotypes at defining sites
runner.matchClades(genotypes[runner.info['lineage']])
# this is tricky with dengue because the canonical genotypes
# don't really represent the present-day viral diversity.
# I'll get around to redefining these soon-ish hopefully.
### Comparison: force dTiter values to be non-zero only on interserotype brances
def is_interserotype(node):
descendents = node.get_terminals()
serotypes = [k.name.split('/')[0] for k in descendents if 'DENV' in k.name]
serotypes = [s for s in serotypes if s != 'DENV']
return len(set(serotypes)) > 1
interserotype_branches = []
for node in runner.tree.tree.find_clades():
if is_interserotype(node):
interserotype_branches.append(node)
for child in node.clades:
interserotype_branches.append(child)
for node in runner.tree.tree.find_clades():
if node in interserotype_branches:
node.interserotype = True
else:
node.interserotype = False
titer_model(runner,
lam_pot = runner.config['titers']['lam_pot'],
lam_avi = runner.config['titers']['lam_avi'],
lam_drop = runner.config['titers']['lam_drop'],
training_fraction = runner.config['titers']['training_fraction'],
plot=False,
criterium = lambda node: node.interserotype == True,
csv_fname='~/Users/Sidney/Dropbox/dengue/data/titer-model/interserotype-branch-effects/model_predictions.csv')
'''
|
agpl-3.0
| -5,209,273,781,426,590,000 | 48.372449 | 152 | 0.571045 | false | 3.406195 | true | false | false |
LowerSilesians/geo-squizzy
|
build_big_data/main.py
|
1
|
2041
|
import random
import json
from models import CITY
class Duplicates:
def __init__(self):
self.storage = dict()
pass
class Feature:
def __init__(self, *args, **kwargs):
self.data = dict({"type": "Feature", "properties": dict(), "geometry": {"type": kwargs['type'], "coordinates": []}})
self.data['properties'] = kwargs['model']().get_random_data()
def add_coordinates(self, coordinates=None):
self.data['geometry']['coordinates'] = coordinates
class DataStructure:
def __init__(self, *args, **kwargs):
self.data = dict({'type': kwargs['type'], 'features': []})
self.duplicates = Duplicates()
self._range = kwargs['coordinates_range']
self.feature_model = kwargs['feature_model']
self.feature_type = kwargs['feature_type']
self.__run__(number=kwargs['features_number'])
pass
def __run__(self, number=None):
self.data['features'] = [self.feature() for x in range(0, number, 1)]
pass
def coordinates(self):
x = random.uniform(self._range[0], self._range[1])
case = self.duplicates.storage.get(x, None)
while case is not None:
x = random.uniform(self._range[0], self._range[1])
case = self.duplicates.storage.get(x, None)
self.duplicates.storage[x] = x
return x
def feature(self):
feature = Feature(type=self.feature_type, model=self.feature_model)
feature.add_coordinates(coordinates=[self.coordinates(), self.coordinates()])
return feature.data
if __name__ == "__main__":
geo = DataStructure(type="FeatureCollection",
feature_type="Point",
coordinates_range=[float(-200), float(200)],
features_number=100000,
feature_model=CITY)
geo_json = json.dumps(geo.data)
f = open("/home/ing/PycharmProjects/geo-squizzy/geosquizzy/build_big_data/data/dump100000.json", "w")
f.write(geo_json)
f.close()
|
mit
| -4,119,395,978,009,548,000 | 33.033333 | 124 | 0.590397 | false | 3.872865 | false | false | false |
mrh1997/cymu
|
libclang/build-and-test-clang.py
|
1
|
1065
|
import subprocess
import sys
from os.path import dirname, join
prj_path = dirname(sys.argv[0])
if len(sys.argv) == 1 or sys.argv[1] != 'no-rebuild':
subprocess.check_call(['vagrant', 'powershell', '-c', 'cmd.exe', '-c',
r'C:\vagrant\build.cmd'])
sys.path.append(join(prj_path, r'src\tools\clang\bindings\python'))
import clang.cindex
clang.cindex.Config.set_library_path(join(prj_path, r'build\Release\bin'))
c_src = """
int main(void)
{
int a;
int * b;
a = (3 + 4) * -(3 + 1);
b = &a;
return a;
}
"""
def print_node(node, indentation=0):
print indentation*' ', node.kind.name, node.spelling, node.operator_kind.name if node.operator_kind != clang.cindex.OperatorKind.NULL else ""
for subnode in node.get_children():
print_node(subnode, indentation+1)
transunit = clang.cindex.TranslationUnit.from_source(
'test.c', unsaved_files=[('test.c', c_src)])
if len(list(transunit.diagnostics)) > 0:
for diag in transunit.diagnostics:
print diag
else:
print_node(transunit.cursor)
|
gpl-3.0
| 7,588,159,631,043,952,000 | 31.272727 | 148 | 0.647887 | false | 2.97486 | false | false | false |
jhauberg/cards.py
|
cards/templatefield.py
|
1
|
2868
|
# coding=utf-8
"""
This module provides functions for working with template fields.
"""
import re
from typing import Iterator
class TemplateField: # pylint: disable=too-few-public-methods
""" Represents a field in a template. """
def __init__(self,
name: str=None,
context: str=None,
inner_content: str=None,
indices: range=None):
self.name = name # the name of the field
self.context = context # the context passed to the field name
self.inner_content = inner_content # the inner content between the field braces
self.indices = indices # the indices ranging from the first wrapping '{' to the last '}'
if self.inner_content is None:
if self.name is not None:
if self.context is not None:
self.inner_content = self.name + ' ' + self.context
else:
self.inner_content = self.name
def __str__(self):
return '{{ ' + (self.inner_content or '') + ' }}'
def has_row_reference(self) -> bool:
""" Determine whether a field holds a row reference. """
return (self.context.startswith('#')
if self.context is not None
else False)
def fields(content: str,
with_name_like: str=None,
with_context_like: str=None,
strictly_matching: bool=True) -> Iterator[TemplateField]:
""" Return an iterator for all fields (e.g. '{{ a_field }}') that occur in a template. """
pattern = r'{{\s?(([^}}\s]*)\s?(.*?))\s?}}'
for match in re.finditer(pattern, content):
inner_content = match.group(1).strip()
name = match.group(2).strip()
context = match.group(3).strip()
inner_content = inner_content if len(inner_content) > 0 else None
name = name if len(name) > 0 else None
context = context if len(context) > 0 else None
field = TemplateField(
name, context, inner_content, indices=range(
match.start(), match.end()))
satisfies_name_filter = (with_name_like is None or
(with_name_like is not None and field.name is not None
and re.search(with_name_like, field.name) is not None))
satisfies_context_filter = (with_context_like is None or
(with_context_like is not None and field.context is not None
and re.search(with_context_like, field.context) is not None))
satisfies_filter = (satisfies_name_filter and satisfies_context_filter
if strictly_matching
else satisfies_name_filter or satisfies_context_filter)
if satisfies_filter:
yield field
|
mit
| -7,489,422,013,993,804,000 | 36.246753 | 98 | 0.561018 | false | 4.280597 | false | false | false |
mrunge/horizon_lib
|
horizon_lib/tables/actions.py
|
1
|
38071
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import logging
import types
import warnings
from django.conf import settings
from django.core import urlresolvers
from django import shortcuts
from django.template.loader import render_to_string # noqa
from django.utils.datastructures import SortedDict
from django.utils.functional import Promise # noqa
from django.utils.http import urlencode # noqa
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
import six
from horizon_lib import exceptions
from horizon_lib import messages
from horizon_lib.utils import functions
from horizon_lib.utils import html
LOG = logging.getLogger(__name__)
# For Bootstrap integration; can be overridden in settings.
ACTION_CSS_CLASSES = ("btn", "btn-default", "btn-sm")
STRING_SEPARATOR = "__"
class BaseActionMetaClass(type):
"""Metaclass for adding all actions options from inheritance tree
to action.
This way actions can inherit from each other but still use
the class attributes DSL. Meaning, all attributes of Actions are
defined as class attributes, but in the background, it will be used as
parameters for the initializer of the object. The object is then
initialized clean way. Similar principle is used in DataTableMetaclass.
"""
def __new__(mcs, name, bases, attrs):
# Options of action are set ass class attributes, loading them.
options = {}
if attrs:
options = attrs
# Iterate in reverse to preserve final order
for base in bases[::-1]:
# It actually throws all super classes away except immediate
# superclass. But it's fine, immediate super-class base_options
# includes everything because superclasses was created also by
# this metaclass. Same principle is used in DataTableMetaclass.
if hasattr(base, 'base_options') and base.base_options:
base_options = {}
# Updating options by superclasses.
base_options.update(base.base_options)
# Updating superclass options by actual class options.
base_options.update(options)
options = base_options
# Saving all options to class attribute, this will be used for
# instantiating of the specific Action.
attrs['base_options'] = options
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, *args, **kwargs):
cls.base_options.update(kwargs)
# Adding cls.base_options to each init call.
klass = super(BaseActionMetaClass, cls).__call__(
*args, **cls.base_options)
return klass
@six.add_metaclass(BaseActionMetaClass)
class BaseAction(html.HTMLElement):
"""Common base class for all ``Action`` classes."""
def __init__(self, **kwargs):
super(BaseAction, self).__init__()
self.datum = kwargs.get('datum', None)
self.table = kwargs.get('table', None)
self.handles_multiple = kwargs.get('handles_multiple', False)
self.requires_input = kwargs.get('requires_input', False)
self.preempt = kwargs.get('preempt', False)
self.policy_rules = kwargs.get('policy_rules', None)
def data_type_matched(self, datum):
"""Method to see if the action is allowed for a certain type of data.
Only affects mixed data type tables.
"""
if datum:
action_data_types = getattr(self, "allowed_data_types", [])
# If the data types of this action is empty, we assume it accepts
# all kinds of data and this method will return True.
if action_data_types:
datum_type = getattr(datum, self.table._meta.data_type_name,
None)
if datum_type and (datum_type not in action_data_types):
return False
return True
def get_policy_target(self, request, datum):
"""Provide the target for a policy request.
This method is meant to be overridden to return target details when
one of the policy checks requires them. E.g., {"user_id": datum.id}
"""
return {}
def allowed(self, request, datum):
"""Determine whether this action is allowed for the current request.
This method is meant to be overridden with more specific checks.
"""
return True
def _allowed(self, request, datum):
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check and self.policy_rules:
target = self.get_policy_target(request, datum)
return (policy_check(self.policy_rules, request, target) and
self.allowed(request, datum))
return self.allowed(request, datum)
def update(self, request, datum):
"""Allows per-action customization based on current conditions.
This is particularly useful when you wish to create a "toggle"
action that will be rendered differently based on the value of an
attribute on the current row's data.
By default this method is a no-op.
"""
pass
def get_default_classes(self):
"""Returns a list of the default classes for the action. Defaults to
``["btn", "btn-default", "btn-sm"]``.
"""
return getattr(settings, "ACTION_CSS_CLASSES", ACTION_CSS_CLASSES)
def get_default_attrs(self):
"""Returns a list of the default HTML attributes for the action.
Defaults to returning an ``id`` attribute with the value
``{{ table.name }}__action_{{ action.name }}__{{ creation counter }}``.
"""
if self.datum is not None:
bits = (self.table.name,
"row_%s" % self.table.get_object_id(self.datum),
"action_%s" % self.name)
else:
bits = (self.table.name, "action_%s" % self.name)
return {"id": STRING_SEPARATOR.join(bits)}
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.name)
def associate_with_table(self, table):
self.table = table
class Action(BaseAction):
"""Represents an action which can be taken on this table's data.
.. attribute:: name
Required. The short name or "slug" representing this
action. This name should not be changed at runtime.
.. attribute:: verbose_name
A descriptive name used for display purposes. Defaults to the
value of ``name`` with the first letter of each word capitalized.
.. attribute:: verbose_name_plural
Used like ``verbose_name`` in cases where ``handles_multiple`` is
``True``. Defaults to ``verbose_name`` with the letter "s" appended.
.. attribute:: method
The HTTP method for this action. Defaults to ``POST``. Other methods
may or may not succeed currently.
.. attribute:: requires_input
Boolean value indicating whether or not this action can be taken
without any additional input (e.g. an object id). Defaults to ``True``.
.. attribute:: preempt
Boolean value indicating whether this action should be evaluated in
the period after the table is instantiated but before the data has
been loaded.
This can allow actions which don't need access to the full table data
to bypass any API calls and processing which would otherwise be
required to load the table.
.. attribute:: allowed_data_types
A list that contains the allowed data types of the action. If the
datum's type is in this list, the action will be shown on the row
for the datum.
Default to be an empty list (``[]``). When set to empty, the action
will accept any kind of data.
.. attribute:: policy_rules
list of scope and rule tuples to do policy checks on, the
composition of which is (scope, rule)
scope: service type managing the policy for action
rule: string representing the action to be checked
for a policy that requires a single rule check:
policy_rules should look like
"(("compute", "compute:create_instance"),)"
for a policy that requires multiple rule checks:
rules should look like
"(("identity", "identity:list_users"),
("identity", "identity:list_roles"))"
At least one of the following methods must be defined:
.. method:: single(self, data_table, request, object_id)
Handler for a single-object action.
.. method:: multiple(self, data_table, request, object_ids)
Handler for multi-object actions.
.. method:: handle(self, data_table, request, object_ids)
If a single function can work for both single-object and
multi-object cases then simply providing a ``handle`` function
will internally route both ``single`` and ``multiple`` requests
to ``handle`` with the calls from ``single`` being transformed
into a list containing only the single object id.
"""
def __init__(self, single_func=None, multiple_func=None, handle_func=None,
attrs=None, **kwargs):
super(Action, self).__init__(**kwargs)
self.method = kwargs.get('method', "POST")
self.requires_input = kwargs.get('requires_input', True)
self.verbose_name = kwargs.get('verbose_name', self.name.title())
self.verbose_name_plural = kwargs.get('verbose_name_plural',
"%ss" % self.verbose_name)
self.allowed_data_types = kwargs.get('allowed_data_types', [])
self.icon = kwargs.get('icon', None)
if attrs:
self.attrs.update(attrs)
# Don't set these if they're None
if single_func:
self.single = single_func
if multiple_func:
self.multiple = multiple_func
if handle_func:
self.handle = handle_func
# Ensure we have the appropriate methods
has_handler = hasattr(self, 'handle') and callable(self.handle)
has_single = hasattr(self, 'single') and callable(self.single)
has_multiple = hasattr(self, 'multiple') and callable(self.multiple)
if has_handler or has_multiple:
self.handles_multiple = True
if not has_handler and (not has_single or has_multiple):
cls_name = self.__class__.__name__
raise NotImplementedError('You must define either a "handle" '
'method or a "single" or "multiple" '
'method on %s.' % cls_name)
if not has_single:
def single(self, data_table, request, object_id):
return self.handle(data_table, request, [object_id])
self.single = types.MethodType(single, self)
if not has_multiple and self.handles_multiple:
def multiple(self, data_table, request, object_ids):
return self.handle(data_table, request, object_ids)
self.multiple = types.MethodType(multiple, self)
def get_param_name(self):
"""Returns the full POST parameter name for this action.
Defaults to
``{{ table.name }}__{{ action.name }}``.
"""
return "__".join([self.table.name, self.name])
class LinkAction(BaseAction):
"""A table action which is simply a link rather than a form POST.
.. attribute:: name
Required. The short name or "slug" representing this
action. This name should not be changed at runtime.
.. attribute:: verbose_name
A string which will be rendered as the link text. (Required)
.. attribute:: url
A string or a callable which resolves to a url to be used as the link
target. You must either define the ``url`` attribute or override
the ``get_link_url`` method on the class.
.. attribute:: allowed_data_types
A list that contains the allowed data types of the action. If the
datum's type is in this list, the action will be shown on the row
for the datum.
Defaults to be an empty list (``[]``). When set to empty, the action
will accept any kind of data.
"""
# class attribute name is used for ordering of Actions in table
name = "link"
ajax = False
def __init__(self, attrs=None, **kwargs):
super(LinkAction, self).__init__(**kwargs)
self.method = kwargs.get('method', "GET")
self.bound_url = kwargs.get('bound_url', None)
self.name = kwargs.get('name', self.name)
self.verbose_name = kwargs.get('verbose_name', self.name.title())
self.url = kwargs.get('url', None)
self.allowed_data_types = kwargs.get('allowed_data_types', [])
self.icon = kwargs.get('icon', None)
self.kwargs = kwargs
if not kwargs.get('verbose_name', None):
raise NotImplementedError('A LinkAction object must have a '
'verbose_name attribute.')
if attrs:
self.attrs.update(attrs)
if self.ajax:
self.classes = list(self.classes) + ['ajax-update']
def get_ajax_update_url(self):
table_url = self.table.get_absolute_url()
params = urlencode(
SortedDict([("action", self.name), ("table", self.table.name)])
)
return "%s?%s" % (table_url, params)
def render(self):
return render_to_string(("horizon_lib/common/"
"_data_table_table_action.html"),
{"action": self})
def associate_with_table(self, table):
super(LinkAction, self).associate_with_table(table)
if self.ajax:
self.attrs['data-update-url'] = self.get_ajax_update_url()
def get_link_url(self, datum=None):
"""Returns the final URL based on the value of ``url``.
If ``url`` is callable it will call the function.
If not, it will then try to call ``reverse`` on ``url``.
Failing that, it will simply return the value of ``url`` as-is.
When called for a row action, the current row data object will be
passed as the first parameter.
"""
if not self.url:
raise NotImplementedError('A LinkAction class must have a '
'url attribute or define its own '
'get_link_url method.')
if callable(self.url):
return self.url(datum, **self.kwargs)
try:
if datum:
obj_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=(obj_id,))
else:
return urlresolvers.reverse(self.url)
except urlresolvers.NoReverseMatch as ex:
LOG.info('No reverse found for "%s": %s' % (self.url, ex))
return self.url
class FilterAction(BaseAction):
"""A base class representing a filter action for a table.
.. attribute:: name
The short name or "slug" representing this action. Defaults to
``"filter"``.
.. attribute:: verbose_name
A descriptive name used for display purposes. Defaults to the
value of ``name`` with the first letter of each word capitalized.
.. attribute:: param_name
A string representing the name of the request parameter used for the
search term. Default: ``"q"``.
.. attribute: filter_type
A string representing the type of this filter. If this is set to
``"server"`` then ``filter_choices`` must also be provided.
Default: ``"query"``.
.. attribute: filter_choices
Required for server type filters. A tuple of tuples representing the
filter options. Tuple composition should evaluate to (string, string,
boolean), representing the filter parameter, display value, and whether
or not it should be applied to the API request as an API query
attribute. API type filters do not need to be accounted for in the
filter method since the API will do the filtering. However, server
type filters in general will need to be performed in the filter method.
By default this attribute is not provided.
.. attribute: needs_preloading
If True, the filter function will be called for the initial
GET request with an empty ``filter_string``, regardless of the
value of ``method``.
"""
# TODO(gabriel): The method for a filter action should be a GET,
# but given the form structure of the table that's currently impossible.
# At some future date this needs to be reworked to get the filter action
# separated from the table's POST form.
# class attribute name is used for ordering of Actions in table
name = "filter"
def __init__(self, **kwargs):
super(FilterAction, self).__init__(**kwargs)
self.method = kwargs.get('method', "POST")
self.name = kwargs.get('name', self.name)
self.verbose_name = kwargs.get('verbose_name', _("Filter"))
self.filter_type = kwargs.get('filter_type', "query")
self.filter_choices = kwargs.get('filter_choices')
self.needs_preloading = kwargs.get('needs_preloading', False)
self.param_name = kwargs.get('param_name', 'q')
self.icon = "search"
if self.filter_type == 'server' and self.filter_choices is None:
raise NotImplementedError(
'A FilterAction object with the '
'filter_type attribute set to "server" must also have a '
'filter_choices attribute.')
def get_param_name(self):
"""Returns the full query parameter name for this action.
Defaults to
``{{ table.name }}__{{ action.name }}__{{ action.param_name }}``.
"""
return "__".join([self.table.name, self.name, self.param_name])
def assign_type_string(self, table, data, type_string):
for datum in data:
setattr(datum, table._meta.data_type_name, type_string)
def data_type_filter(self, table, data, filter_string):
filtered_data = []
for data_type in table._meta.data_types:
func_name = "filter_%s_data" % data_type
filter_func = getattr(self, func_name, None)
if not filter_func and not callable(filter_func):
# The check of filter function implementation should happen
# in the __init__. However, the current workflow of DataTable
# and actions won't allow it. Need to be fixed in the future.
cls_name = self.__class__.__name__
raise NotImplementedError("You must define a %s method "
"for %s data type in %s." %
(func_name, data_type, cls_name))
_data = filter_func(table, data, filter_string)
self.assign_type_string(table, _data, data_type)
filtered_data.extend(_data)
return filtered_data
def filter(self, table, data, filter_string):
"""Provides the actual filtering logic.
This method must be overridden by subclasses and return
the filtered data.
"""
return data
def is_api_filter(self, filter_field):
"""Determine if the given filter field should be used as an
API filter.
"""
if self.filter_type == 'server':
for choice in self.filter_choices:
if (choice[0] == filter_field and len(choice) > 2 and
choice[2] is True):
return True
return False
class FixedFilterAction(FilterAction):
"""A filter action with fixed buttons."""
def __init__(self, **kwargs):
super(FixedFilterAction, self).__init__(**kwargs)
self.filter_type = kwargs.get('filter_type', "fixed")
self.needs_preloading = kwargs.get('needs_preloading', True)
self.fixed_buttons = self.get_fixed_buttons()
self.filter_string = ''
def filter(self, table, images, filter_string):
self.filter_string = filter_string
categories = self.categorize(table, images)
self.categories = defaultdict(list, categories)
for button in self.fixed_buttons:
button['count'] = len(self.categories[button['value']])
if not filter_string:
return images
return self.categories[filter_string]
def get_fixed_buttons(self):
"""Returns a list of dictionaries describing the fixed buttons
to use for filtering.
Each list item should be a dict with the following keys:
* ``text``: Text to display on the button
* ``icon``: Icon class for icon element (inserted before text).
* ``value``: Value returned when the button is clicked. This value is
passed to ``filter()`` as ``filter_string``.
"""
return []
def categorize(self, table, images):
"""Override to separate images into categories.
Return a dict with a key for the value of each fixed button,
and a value that is a list of images in that category.
"""
return {}
class BatchAction(Action):
"""A table action which takes batch action on one or more
objects. This action should not require user input on a
per-object basis.
.. attribute:: name
An internal name for this action.
.. method:: action_present
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_present (PendingDeprecation)
String or tuple/list. The display forms of the name.
Should be a transitive verb, capitalized and translated. ("Delete",
"Rotate", etc.) If tuple or list - then setting
self.current_present_action = n will set the current active item
from the list(action_present[n])
You can pass a complete action name including 'data_type' by specifying
'%(data_type)s' substitution in action_present ("Delete %(data_type)s").
Otherwise a complete action name is a format of "<action> <data_type>".
<data_type> is determined based on the number of items.
By passing a complete action name you allow translators to control
the order of words as they want.
NOTE: action_present attribute is bad for translations and should be
avoided. Please use the action_present method instead.
This form is kept for legacy.
.. method:: action_past
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_past (PendingDeprecation)
String or tuple/list. The past tense of action_present. ("Deleted",
"Rotated", etc.) If tuple or list - then
setting self.current_past_action = n will set the current active item
from the list(action_past[n])
NOTE: action_past attribute is bad for translations and should be
avoided. Please use the action_past method instead.
This form is kept for legacy.
.. attribute:: data_type_singular
Optional display name (if the data_type method is not defined) for the
type of data that receives the action. ("Key Pair", "Floating IP", etc.)
.. attribute:: data_type_plural
Optional plural word (if the data_type method is not defined) for the
type of data being acted on. Defaults to appending 's'. Relying on the
default is bad for translations and should not be done, so it's absence
will raise a DeprecationWarning. It is currently kept as optional for
legacy code.
NOTE: data_type_singular and data_type_plural attributes are bad for
translations and should be avoided. Please use the action_present and
action_past methods. This form is kept for legacy.
.. attribute:: success_url
Optional location to redirect after completion of the delete
action. Defaults to the current page.
"""
def __init__(self, **kwargs):
super(BatchAction, self).__init__(**kwargs)
action_present_method = False
if hasattr(self, 'action_present'):
if callable(self.action_present):
action_present_method = True
else:
warnings.warn(PendingDeprecationWarning(
'The %s BatchAction class must have an action_present '
'method instead of attribute.' % self.__class__.__name__
))
action_past_method = False
if hasattr(self, 'action_past'):
if callable(self.action_past):
action_past_method = True
else:
warnings.warn(PendingDeprecationWarning(
'The %s BatchAction class must have an action_past '
'method instead of attribute.' % self.__class__.__name__
))
action_methods = action_present_method and action_past_method
has_action_method = action_present_method or action_past_method
if has_action_method and not action_methods:
raise NotImplementedError(
'The %s BatchAction class must have both action_past and'
'action_present methods.' % self.__class__.__name__
)
if not action_methods:
if not kwargs.get('data_type_singular'):
raise NotImplementedError(
'The %s BatchAction class must have a data_type_singular '
'attribute when action_past and action_present attributes '
'are used.' % self.__class__.__name__
)
self.data_type_singular = kwargs.get('data_type_singular')
self.data_type_plural = kwargs.get('data_type_plural',
self.data_type_singular + 's')
# TODO(ygbo): get rid of self.use_action_method once action_present and
# action_past are changed to methods handling plurals.
self.use_action_method = action_methods
self.success_url = kwargs.get('success_url', None)
# If setting a default name, don't initialize it too early
self.verbose_name = kwargs.get('verbose_name', self._get_action_name)
self.verbose_name_plural = kwargs.get(
'verbose_name_plural',
lambda: self._get_action_name('plural'))
self.current_present_action = 0
self.current_past_action = 0
# Keep record of successfully handled objects
self.success_ids = []
def _allowed(self, request, datum=None):
# Override the default internal action method to prevent batch
# actions from appearing on tables with no data.
if not self.table.data and not datum:
return False
return super(BatchAction, self)._allowed(request, datum)
def _get_action_name(self, items=None, past=False):
"""Builds combinations like 'Delete Object' and 'Deleted
Objects' based on the number of items and `past` flag.
:param items:
A list or tuple of items (or container with a __len__ method) to
count the number of concerned items for which this method is
called.
When this method is called for a single item (by the BatchAction
itself), this parameter can be omitted and the number of items
will be considered as "one".
If we want to evaluate to "zero" this parameter must not be omitted
(and should be an empty container).
:param past:
Boolean flag indicating if the action took place in the past.
By default a present action is considered.
"""
action_type = "past" if past else "present"
if items is None:
# Called without items parameter (by a single instance.)
count = 1
else:
count = len(items)
# TODO(ygbo): get rid of self.use_action_method once action_present and
# action_past are changed to methods handling plurals.
action_attr = getattr(self, "action_%s" % action_type)
if self.use_action_method:
action_attr = action_attr(count)
if isinstance(action_attr, (basestring, Promise)):
action = action_attr
else:
toggle_selection = getattr(self, "current_%s_action" % action_type)
action = action_attr[toggle_selection]
if self.use_action_method:
return action
# TODO(ygbo): get rid of all this bellow once action_present and
# action_past are changed to methods handling plurals.
data_type = ungettext_lazy(
self.data_type_singular,
self.data_type_plural,
count
)
if '%(data_type)s' in action:
# If full action string is specified, use action as format string.
msgstr = action
else:
if action_type == "past":
msgstr = pgettext_lazy("past", "%(action)s %(data_type)s")
else:
msgstr = pgettext_lazy("present", "%(action)s %(data_type)s")
return msgstr % {'action': action, 'data_type': data_type}
def action(self, request, datum_id):
"""Required. Accepts a single object id and performs the specific
action.
Return values are discarded, errors raised are caught and logged.
"""
def update(self, request, datum):
"""Switches the action verbose name, if needed."""
if getattr(self, 'action_present', False):
self.verbose_name = self._get_action_name()
self.verbose_name_plural = self._get_action_name('plural')
def get_success_url(self, request=None):
"""Returns the URL to redirect to after a successful action."""
if self.success_url:
return self.success_url
return request.get_full_path()
def handle(self, table, request, obj_ids):
action_success = []
action_failure = []
action_not_allowed = []
for datum_id in obj_ids:
datum = table.get_object_by_id(datum_id)
datum_display = table.get_object_display(datum) or _("N/A")
if not table._filter_action(self, request, datum):
action_not_allowed.append(datum_display)
LOG.info('Permission denied to %s: "%s"' %
(self._get_action_name(past=True).lower(),
datum_display))
continue
try:
self.action(request, datum_id)
# Call update to invoke changes if needed
self.update(request, datum)
action_success.append(datum_display)
self.success_ids.append(datum_id)
LOG.info('%s: "%s"' %
(self._get_action_name(past=True), datum_display))
except Exception as ex:
# Handle the exception but silence it since we'll display
# an aggregate error message later. Otherwise we'd get
# multiple error messages displayed to the user.
if getattr(ex, "_safe_message", None):
ignore = False
else:
ignore = True
action_failure.append(datum_display)
exceptions.handle(request, ignore=ignore)
# Begin with success message class, downgrade to info if problems.
success_message_level = messages.success
if action_not_allowed:
msg = _('You are not allowed to %(action)s: %(objs)s')
params = {"action":
self._get_action_name(action_not_allowed).lower(),
"objs": functions.lazy_join(", ", action_not_allowed)}
messages.error(request, msg % params)
success_message_level = messages.info
if action_failure:
msg = _('Unable to %(action)s: %(objs)s')
params = {"action": self._get_action_name(action_failure).lower(),
"objs": functions.lazy_join(", ", action_failure)}
messages.error(request, msg % params)
success_message_level = messages.info
if action_success:
msg = _('%(action)s: %(objs)s')
params = {"action":
self._get_action_name(action_success, past=True),
"objs": functions.lazy_join(", ", action_success)}
success_message_level(request, msg % params)
return shortcuts.redirect(self.get_success_url(request))
class DeleteAction(BatchAction):
"""A table action used to perform delete operations on table data.
.. attribute:: name
A short name or "slug" representing this action.
Defaults to 'delete'
.. method:: action_present
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_present (PendingDeprecation)
A string containing the transitive verb describing the delete action.
Defaults to 'Delete'
NOTE: action_present attribute is bad for translations and should be
avoided. Please use the action_present method instead.
This form is kept for legacy.
.. method:: action_past
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_past (PendingDeprecation)
A string set to the past tense of action_present.
Defaults to 'Deleted'
NOTE: action_past attribute is bad for translations and should be
avoided. Please use the action_past method instead.
This form is kept for legacy.
.. attribute:: data_type_singular (PendingDeprecation)
A string used to name the data to be deleted.
.. attribute:: data_type_plural (PendingDeprecation)
Optional. Plural of ``data_type_singular``.
Defaults to ``data_type_singular`` appended with an 's'. Relying on
the default is bad for translations and should not be done, so it's
absence will raise a DeprecationWarning. It is currently kept as
optional for legacy code.
NOTE: data_type_singular and data_type_plural attributes are bad for
translations and should be avoided. Please use the action_present and
action_past methods. This form is kept for legacy.
"""
name = "delete"
def __init__(self, **kwargs):
super(DeleteAction, self).__init__(**kwargs)
self.name = kwargs.get('name', self.name)
if not hasattr(self, "action_present"):
self.action_present = kwargs.get('action_present', _("Delete"))
if not hasattr(self, "action_past"):
self.action_past = kwargs.get('action_past', _("Deleted"))
self.icon = "remove"
def action(self, request, obj_id):
"""Action entry point. Overrides base class' action method.
Accepts a single object id passing it over to the delete method
responsible for the object's destruction.
"""
return self.delete(request, obj_id)
def delete(self, request, obj_id):
"""Required. Deletes an object referenced by obj_id.
Override to provide delete functionality specific to your data.
"""
def get_default_classes(self):
"""Appends ``btn-danger`` to the action's default css classes.
This method ensures the corresponding button is highlighted
as a trigger for a potentially dangerous action.
"""
classes = super(DeleteAction, self).get_default_classes()
classes += ("btn-danger",)
return classes
class UpdateAction(object):
"""A table action for cell updates by inline editing."""
name = "update"
action_present = _("Update")
action_past = _("Updated")
data_type_singular = "update"
def action(self, request, datum, obj_id, cell_name, new_cell_value):
self.update_cell(request, datum, obj_id, cell_name, new_cell_value)
def update_cell(self, request, datum, obj_id, cell_name, new_cell_value):
"""Method for saving data of the cell.
This method must implements saving logic of the inline edited table
cell.
"""
def allowed(self, request, datum, cell):
"""Determine whether updating is allowed for the current request.
This method is meant to be overridden with more specific checks.
Data of the row and of the cell are passed to the method.
"""
return True
|
apache-2.0
| 7,165,437,380,666,320,000 | 38.906709 | 79 | 0.614168 | false | 4.479995 | false | false | false |
marwoodandrew/superdesk-aap
|
server/aap/publish/formatters/anpa_formatter_test.py
|
1
|
15888
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from datetime import datetime
import io
from apps.publish import init_app
from superdesk.publish.subscribers import SUBSCRIBER_TYPES
from superdesk.tests import TestCase
from .aap_formatter_common import map_priority
from .anpa_formatter import AAPAnpaFormatter
class ANPAFormatterTest(TestCase):
subscribers = [{"_id": "1", "name": "notes", "subscriber_type": SUBSCRIBER_TYPES.WIRE, "media_type": "media",
"is_active": True, "sequence_num_settings": {"max": 10, "min": 1},
"destinations": [{"name": "ANPA", "delivery_type": "email", "format": "ANPA",
"config": {"recipients": "[email protected]"}
}]
}]
article = {
'source': 'AAP',
'_updated': datetime.strptime('2015-05-29 05:46', '%Y-%m-%d %H:%M'),
'anpa_category': [{'qcode': 'a'}],
'headline': 'This is a test headline',
'slugline': 'slugline',
'subject': [{'qcode': '02011001'}],
'anpa_take_key': 'take_key',
'urgency': 5,
'unique_id': '1',
'body_html': '<p>The story body</p>',
'type': 'text',
'word_count': '1',
'priority': 1,
'task': {'desk': 1},
'body_footer': '<p>call helpline 999 if you are planning<br>to quit smoking</p>'
}
desks = [{'_id': 1, 'name': 'National'},
{'_id': 2, 'name': 'Sports'},
{'_id': 3, 'name': 'Finance'}]
vocab = [{'_id': 'categories', 'items': [
{'is_active': True, 'name': 'Overseas Sport', 'qcode': 'S', 'subject': '15000000'},
{'is_active': True, 'name': 'Finance', 'qcode': 'F', 'subject': '04000000'},
{'is_active': True, 'name': 'General News', 'qcode': 'A'},
{'is_active': True, 'name': 'bogus', 'qcode': 'b'}]}]
def setUp(self):
self.app.data.insert('subscribers', self.subscribers)
self.app.data.insert('desks', self.desks)
self.app.data.insert('vocabularies', self.vocab)
init_app(self.app)
def testANPAFormatter(self):
subscriber = self.app.data.find('subscribers', None, None)[0]
f = AAPAnpaFormatter()
resp = f.format(self.article.copy(), subscriber, ['axx'])[0]
seq = resp['published_seq_num']
item = resp['encoded_item']
self.assertGreater(int(seq), 0)
lines = io.StringIO(item.decode())
line = lines.readline()
self.assertTrue('axx' in line[1:])
line = lines.readline()
self.assertEqual(line[:3], '') # Skip the sequence
line = lines.readline()
self.assertEqual(line[0:20], 'f a bc-slugline ') # skip the date
line = lines.readline()
self.assertEqual(line.strip(), 'This is a test headline')
line = lines.readline()
self.assertEqual(line.strip(), 'slugline take_key')
line = lines.readline()
self.assertEqual(line.strip(), 'The story body')
line = lines.readline()
self.assertEqual(line.strip(), 'call helpline 999 if you are planning')
line = lines.readline()
self.assertEqual(line.strip(), 'to quit smoking')
lines.readline()
line = lines.readline()
self.assertEqual(line.strip(), 'AAP')
def testANPAWithNoSelectorsFormatter(self):
subscriber = self.app.data.find('subscribers', None, None)[0]
subscriber['name'] = 'not notes'
f = AAPAnpaFormatter()
resp = f.format(self.article.copy(), subscriber)[0]
seq = resp['published_seq_num']
item = resp['encoded_item']
self.assertGreater(int(seq), 0)
lines = io.StringIO(item.decode())
line = lines.readline()
self.assertEqual(line[:3], '') # Skip the sequence
line = lines.readline()
self.assertEqual(line[0:20], 'f a bc-slugline ') # skip the date
line = lines.readline()
self.assertEqual(line.strip(), 'This is a test headline')
line = lines.readline()
self.assertEqual(line.strip(), 'slugline take_key')
line = lines.readline()
self.assertEqual(line.strip(), 'The story body')
line = lines.readline()
self.assertEqual(line.strip(), 'call helpline 999 if you are planning')
line = lines.readline()
self.assertEqual(line.strip(), 'to quit smoking')
lines.readline()
line = lines.readline()
self.assertEqual(line.strip(), 'AAP')
def testANPAWithBylineFormatter(self):
subscriber = self.app.data.find('subscribers', None, None)[0]
subscriber['name'] = 'not notes'
byline_article = dict(self.article.copy())
byline_article['byline'] = '<p>Joe Blogs</p>'
f = AAPAnpaFormatter()
resp = f.format(byline_article, subscriber)[0]
seq = resp['published_seq_num']
item = resp['encoded_item']
self.assertGreater(int(seq), 0)
lines = io.StringIO(item.decode())
line = lines.readline()
self.assertEqual(line[:3], '') # Skip the sequence
line = lines.readline()
self.assertEqual(line[0:20], 'f a bc-slugline ') # skip the date
line = lines.readline()
self.assertEqual(line.strip(), 'This is a test headline')
line = lines.readline()
self.assertEqual(line.strip(), 'slugline take_key')
line = lines.readline()
self.assertEqual(line.strip(), 'Joe Blogs')
line = lines.readline()
self.assertEqual(line.strip(), 'The story body')
line = lines.readline()
self.assertEqual(line.strip(), 'call helpline 999 if you are planning')
line = lines.readline()
self.assertEqual(line.strip(), 'to quit smoking')
lines.readline()
line = lines.readline()
self.assertEqual(line.strip(), 'AAP')
def testServiceLevelFormatter(self):
subscriber = self.app.data.find('subscribers', None, None)[0]
subscriber['name'] = 'not notes'
service_level_article = dict(self.article.copy())
service_level_article['genre'] = [{'qcode': 'Results (sport)'}]
service_level_article['anpa_category'] = [{'qcode': 'S'}]
f = AAPAnpaFormatter()
resp = f.format(service_level_article, subscriber)[0]
seq = resp['published_seq_num']
item = resp['encoded_item']
self.assertGreater(int(seq), 0)
lines = io.StringIO(item.decode())
line = lines.readline()
self.assertEqual(line[:3], '') # Skip the sequence
line = lines.readline()
self.assertEqual(line[0:20], 'f s bc-slugline ') # skip the date
def testMultipleCategoryFormatter(self):
subscriber = self.app.data.find('subscribers', None, None)[0]
multi_article = dict(self.article.copy())
multi_article.pop('anpa_category')
multi_article['anpa_category'] = [{'qcode': 'a'}, {'qcode': 'b'}]
f = AAPAnpaFormatter()
docs = f.format(multi_article, subscriber, ['Axy', 'Bkl'])
docs = f.format(multi_article, subscriber, ['Axy', 'Bkl'])
self.assertEqual(len(docs), 2)
cat = 'a'
for doc in docs:
item = doc['encoded_item']
lines = io.StringIO(item.decode())
line = lines.readline()
line = lines.readline()
line = lines.readline()
self.assertEqual(line[2:3], cat) # skip the date
cat = 'b'
def test_process_headline_empty_sequence_short_headline(self):
f = AAPAnpaFormatter()
article = {'headline': '1234567890' * 5}
anpa = []
f._process_headline(anpa, article, b'a')
self.assertEqual(anpa[0], b'12345678901234567890123456789012345678901234567890')
def test_headline_with_markup(self):
f = AAPAnpaFormatter()
article = {'headline': '<p>headline</p>'}
anpa = []
f._process_headline(anpa, article, b'a')
self.assertEqual(anpa[0], b'headline')
def test_process_headline_empty_sequence_long_headline(self):
f = AAPAnpaFormatter()
article = {'headline': '1234567890' * 7}
anpa = []
f._process_headline(anpa, article, b'a')
self.assertEqual(anpa[0], b'1234567890123456789012345678901234567890123456789012345678901234')
def test_process_headline_with_sequence_short_headline(self):
f = AAPAnpaFormatter()
article = {'headline': '1234567890=7', 'sequence': 7}
anpa = []
f._process_headline(anpa, article, b'a')
self.assertEqual(anpa[0], b'1234567890=7')
def test_process_headline_with_sequence_long_headline(self):
f = AAPAnpaFormatter()
article1 = {'headline': '1234567890' * 7 + '=7', 'sequence': 7}
anpa = []
f._process_headline(anpa, article1, b'a')
self.assertEqual(anpa[0], b'12345678901234567890123456789012345678901234567890123456789012=7')
article2 = {'headline': '1234567890' * 7 + '=7', 'sequence': 17}
anpa = []
f._process_headline(anpa, article2, b'a')
self.assertEqual(anpa[0], b'1234567890123456789012345678901234567890123456789012345678901=17')
def test_process_headline_locator_inject(self):
f = AAPAnpaFormatter()
article3 = {'headline': '1234567890' * 3, 'place': [{'qcode': 'VIC', 'name': 'VIC'}]}
anpa = []
f._process_headline(anpa, article3, b'a')
self.assertEqual(anpa[0], b'VIC:123456789012345678901234567890')
def test_map_priority(self):
self.assertEqual('f', map_priority(1))
self.assertEqual('u', map_priority(2))
self.assertEqual('b', map_priority(3))
self.assertEqual('r', map_priority(4))
self.assertEqual('r', map_priority(5))
self.assertEqual('r', map_priority(6))
self.assertEqual('r', map_priority(None))
self.assertEqual('r', map_priority(7))
self.assertEqual('r', map_priority(''))
def test_dateline_with_empty_text(self):
f = AAPAnpaFormatter()
subscriber = self.app.data.find('subscribers', None, None)[0]
item = self.article.copy()
item.update({'dateline': {'text': None}})
resp = f.format(item, subscriber)[0]
self.assertTrue('The story body' in resp['encoded_item'].decode('ascii'))
def test_dateline_injection(self):
f = AAPAnpaFormatter()
subscriber = self.app.data.find('subscribers', None, None)[0]
item = self.article.copy()
item.update({'dateline': {'text': 'SYDNEY, June 27 AAP -'}})
resp = f.format(item, subscriber)[0]
out = resp['encoded_item']
lines = io.StringIO(out.decode())
self.assertTrue(lines.getvalue().find('SYDNEY, June 27 AAP - The story body') > 0)
def test_ednote_injection(self):
f = AAPAnpaFormatter()
subscriber = self.app.data.find('subscribers', None, None)[0]
item = self.article.copy()
item.update({'ednote': 'Note this'})
resp = f.format(item, subscriber)[0]
out = resp['encoded_item']
lines = io.StringIO(out.decode())
self.assertTrue(lines.getvalue().find('Note this') > 0)
def test_div_body(self):
f = AAPAnpaFormatter()
subscriber = self.app.data.find('subscribers', None, None)[0]
item = self.article.copy()
item.update({
'body_html': '<div>Kathmandu Holdings has lodged a claim in the New Zealand High'
' Court for the recovery of costs associated with last year\'s takeover bid from Briscoe'
' Group.</div><div>Kathmandu Holdings has lodged a claim in the New Zealand High Court for '
'the recovery of costs associated with last year\'s takeover bid from Briscoe Group.'
'</div><div><br></div><div>Kathmandu incurred costs in relation to the takeover bid. '
'After an initial request for payment on November 20, 2015 and subsequent correspondence, '
'Briscoe made a payment of $637,711.65 on May 25, 2016 without prejudice to its position on '
'what sum Kathmandu is entitled to recover.</div><div><br></div><div>Kathmandu considers the '
'full amount claimed is recoverable and has issued legal proceedings for the balance of monies'
' owed.</div>'})
resp = f.format(item, subscriber)[0]
out = resp['encoded_item']
lines = io.StringIO(out.decode())
self.assertTrue(lines.getvalue().split('\n')[7].find(' Kathmandu incurred costs in relation') == 0)
def test_span_body(self):
f = AAPAnpaFormatter()
subscriber = self.app.data.find('subscribers', None, None)[0]
item = self.article.copy()
item.update({
'body_html': '<p>Dental materials maker and marketer SDI has boosted its shares after reporting a lift in'
' sales, with improvements across Europe, Brazil and North America.</p>'
'<p>SDI <span style=\"background-color: transparent;\">reported a 7.8 per cent lift in unaudited'
' sales to $74 million for the year to June 30, 2016 on Monday, up from $68.7 million a year '
'earlier.</span></p><p>The company said it expected to report a post-tax profit of between $7.2 million '
'and $7.8 million when it releases its full-year results on August 29.</p><p>Shares in SDI gained '
'6.5 cents - a 12.2 per cent increase - to close at 59.5 cents on Monday.</p>'})
resp = f.format(item, subscriber)[0]
out = resp['encoded_item']
lines = io.StringIO(out.decode())
self.assertTrue(lines.getvalue().split('\n')[5].find(' SDI reported a 7.8 per cent lift in unaudited') == 0)
def test_br_body(self):
f = AAPAnpaFormatter()
subscriber = self.app.data.find('subscribers', None, None)[0]
item = self.article.copy()
item.update({
'body_html': '<p>Dental materials maker and marketer SDI<br> has boosted its shares after '
'reporting a lift in'
' sales, with improvements across Europe, Brazil and North America.</p>'
'<p>SDI <span style=\"background-color: transparent;\">reported a 7.8 per cent lift in unaudited'
' sales to $74 million for the year to June 30, 2016 on Monday, up from $68.7 million a year '
'earlier.</span></p><p>The company said it expected to report a post-tax profit of between $7.2 million '
'and $7.8 million when it releases its full-year results on August 29.</p><p>Shares in SDI gained '
'6.5 cents - a 12.2 per cent increase - to close at 59.5 cents on Monday.</p>'})
resp = f.format(item, subscriber)[0]
out = resp['encoded_item']
lines = io.StringIO(out.decode())
self.assertTrue(lines.getvalue().split('\n')[4].find(' Dental materials maker and marketer SDI') == 0)
self.assertTrue(lines.getvalue().split('\n')[5].find(' has boosted its shares after reporting') == 0)
def test_none_body(self):
f = AAPAnpaFormatter()
subscriber = self.app.data.find('subscribers', None, None)[0]
item = self.article.copy()
item.update({
'anpa_take_key': None, 'byline': None, 'abstract': None})
resp = f.format(item, subscriber)[0]
self.assertTrue('encoded_item' in resp)
|
agpl-3.0
| -7,921,156,450,565,238,000 | 41.368 | 120 | 0.592837 | false | 3.659143 | true | false | false |
repotvsupertuga/repo
|
plugin.video.zen/resources/lib/indexers/navigator.py
|
1
|
14704
|
# -*- coding: utf-8 -*-
'''
zen Add-on
Copyright (C) 2016 zen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,sys,urlparse
from resources.lib.modules import control
from resources.lib.modules import trakt
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
artPath = control.artPath()
addonFanart = control.addonFanart()
imdbCredentials = False if control.setting('imdb.user') == '' else True
traktCredentials = trakt.getTraktCredentialsInfo()
traktIndicators = trakt.getTraktIndicatorsInfo()
queueMenu = control.lang(32065).encode('utf-8')
movielist1 = control.setting('tmdb.movielist_name1')
movielist2 = control.setting('tmdb.movielist_name2')
movielist3 = control.setting('tmdb.movielist_name3')
movielist4 = control.setting('tmdb.movielist_name4')
movielist5 = control.setting('tmdb.movielist_name5')
movielist6 = control.setting('tmdb.movielist_name6')
movielist7 = control.setting('tmdb.movielist_name7')
movielist8 = control.setting('tmdb.movielist_name8')
movielist9 = control.setting('tmdb.movielist_name9')
movielist10 = control.setting('tmdb.movielist_name10')
tvlist1 = control.setting('tmdb.tvlist_name1')
tvlist2 = control.setting('tmdb.tvlist_name2')
tvlist3 = control.setting('tmdb.tvlist_name3')
tvlist4 = control.setting('tmdb.tvlist_name4')
tvlist5 = control.setting('tmdb.tvlist_name5')
tvlist6 = control.setting('tmdb.tvlist_name6')
tvlist7 = control.setting('tmdb.tvlist_name7')
tvlist8 = control.setting('tmdb.tvlist_name8')
tvlist9 = control.setting('tmdb.tvlist_name9')
tvlist10 = control.setting('tmdb.tvlist_name10')
class navigator:
def root(self):
self.addDirectoryItem(32001, 'movieNavigator', 'movies.png', 'DefaultMovies.png')
self.addDirectoryItem(32002, 'tvNavigator', 'channels.png', 'DefaultTVShows.png')
if not control.setting('movie.widget') == '0': self.addDirectoryItem('Spotlight', 'movieWidget', 'latest-movies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Movies Watchlist', 'movieFavourites', 'mymovies.png', 'DefaultMovies.png')
self.addDirectoryItem('TV Shows Watchlist', 'tvFavourites', 'mymovies.png', 'DefaultMovies.png')
self.addDirectoryItem('New Movies', 'movies&url=premiere', 'trending.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(32026, 'tvshows&url=premiere', 'years.png', 'DefaultTVShows.png')
self.addDirectoryItem('My Lists', 'lists_navigator', 'trending.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(32027, 'calendars', 'networks.png', 'DefaultRecentlyAddedEpisodes.png')
self.addDirectoryItem(32007, 'channels', 'channels.png', 'DefaultMovies.png')
self.addDirectoryItem(32008, 'toolNavigator', 'tools.png', 'DefaultAddonProgram.png')
downloads = True if control.setting('downloads') == 'true' and (len(control.listDir(control.setting('movie.download.path'))[0]) > 0) else False
if downloads == True: self.addDirectoryItem(32009, 'downloadNavigator', 'downloads.png', 'DefaultFolder.png')
self.addDirectoryItem(32010, 'searchNavigator', 'search.png', 'DefaultFolder.png')
self.addDirectoryItem('Changelog', 'ShowChangelog', 'icon.png', 'DefaultFolder.png')
self.endDirectory()
def movies(self, lite=False):
self.addDirectoryItem('Featured', 'movies&url=featured', 'featured.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Trending', 'movies&url=trending', 'trending.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Populars', 'movies&url=popular', 'populars.png', 'DefaultMovies.png')
self.addDirectoryItem('New Movies', 'movies&url=premiere', 'trending.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Top Rated', 'movies&url=views', 'most-viewed.png', 'DefaultMovies.png')
self.addDirectoryItem('In Theaters', 'movies&url=theaters', 'in-theaters.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Marvel Universe', 'movies&url=tmdbmarvel', 'marvel.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Oscar Winners', 'movies&url=tmdboscars', 'oscars.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Disney Collection', 'movies&url=tmdbdisney', 'disney.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Genres', 'movieGenres', 'genres.png', 'DefaultMovies.png')
self.addDirectoryItem('Years', 'movieYears', 'years.png', 'DefaultMovies.png')
self.addDirectoryItem('Persons', 'moviePersons', 'people.png', 'DefaultMovies.png')
self.addDirectoryItem('Certificates', 'movieCertificates', 'certificates.png', 'DefaultMovies.png')
self.addDirectoryItem(32028, 'moviePerson', 'people-search.png', 'DefaultMovies.png')
self.addDirectoryItem(32010, 'movieSearch', 'search.png', 'DefaultMovies.png')
self.endDirectory()
def lists_navigator(self):
self.addDirectoryItem('Movies', 'movielist', 'movies.png', 'DefaultMovies.png')
self.addDirectoryItem('Tv Shows', 'tvlist', 'channels.png', 'DefaultTVShows.png')
self.endDirectory()
def mymovies(self):
self.addDirectoryItem(movielist1, 'movies&url=mycustomlist1', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist2, 'movies&url=mycustomlist2', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist3, 'movies&url=mycustomlist3', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist4, 'movies&url=mycustomlist4', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist5, 'movies&url=mycustomlist5', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist6, 'movies&url=mycustomlist6', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist7, 'movies&url=mycustomlist7', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist8, 'movies&url=mycustomlist8', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist9, 'movies&url=mycustomlist9', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist10, 'movies&url=mycustomlist10', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.endDirectory()
def mytv(self):
self.addDirectoryItem(tvlist1, 'tvshows&url=mycustomlist1', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist2, 'tvshows&url=mycustomlist2', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist3, 'tvshows&url=mycustomlist3', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist4, 'tvshows&url=mycustomlist4', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist5, 'tvshows&url=mycustomlist5', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist6, 'tvshows&url=mycustomlist6', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist7, 'tvshows&url=mycustomlist7', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist8, 'tvshows&url=mycustomlist8', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist9, 'tvshows&url=mycustomlist9', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist10, 'tvshows&url=mycustomlist10', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.endDirectory()
def tvshows(self, lite=False):
self.addDirectoryItem('Featured', 'tvshows&url=featured', 'populars.png', 'DefaultRecentlyAddedEpisodes.png')
self.addDirectoryItem('Populars', 'tvshows&url=popular', 'most-viewed.png', 'DefaultTVShows.png')
self.addDirectoryItem(32019, 'tvshows&url=views', 'most-viewed.png', 'DefaultTVShows.png')
self.addDirectoryItem(32026, 'tvshows&url=premiere', 'years.png', 'DefaultTVShows.png')
self.addDirectoryItem(32025, 'tvshows&url=active', 'years.png', 'DefaultTVShows.png')
self.addDirectoryItem(32023, 'tvshows&url=rating', 'featured.png', 'DefaultTVShows.png')
self.addDirectoryItem(32011, 'tvGenres', 'genres.png', 'DefaultTVShows.png')
self.addDirectoryItem(32016, 'tvNetworks', 'networks.png', 'DefaultTVShows.png')
self.addDirectoryItem(32024, 'tvshows&url=airing', 'airing-today.png', 'DefaultTVShows.png')
self.addDirectoryItem(32027, 'calendars', 'networks.png', 'DefaultRecentlyAddedEpisodes.png')
self.addDirectoryItem(32010, 'tvSearch', 'search.png', 'DefaultTVShows.png')
self.endDirectory()
def tools(self):
self.addDirectoryItem('[B]URL RESOLVER[/B]: Settings', 'urlresolversettings', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32043, 'openSettings&query=0.0', 'tools.png', 'DefaultAddonProgram.png')
# self.addDirectoryItem(32044, 'openSettings&query=3.1', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32045, 'openSettings&query=1.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]SETTINGS[/B]: Accounts', 'openSettings&query=2.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32047, 'openSettings&query=3.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32046, 'openSettings&query=5.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]SETTINGS[/B]: Downloads', 'openSettings&query=4.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]SETTINGS[/B]: Watchlist', 'openSettings&query=6.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]SETTINGS[/B]: Lists', 'openSettings&query=7.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]ZEN[/B]: Views', 'viewsNavigator', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]ZEN[/B]: Clear Providers', 'clearSources', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]ZEN[/B]: Clear Cache', 'clearCache', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]BACKUP[/B]: Watchlist', 'backupwatchlist', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]RESTORE[/B]: Watchlist', 'restorewatchlist', 'tools.png', 'DefaultAddonProgram.png')
self.endDirectory()
def downloads(self):
movie_downloads = control.setting('movie.download.path')
# tv_downloads = control.setting('tv.download.path')
if len(control.listDir(movie_downloads)[0]) > 0:
self.addDirectoryItem(32001, movie_downloads, 'movies.png', 'DefaultMovies.png', isAction=False)
self.endDirectory()
def search(self):
self.addDirectoryItem(32001, 'movieSearch', 'search.png', 'DefaultMovies.png')
self.addDirectoryItem(32002, 'tvSearch', 'search.png', 'DefaultTVShows.png')
# self.addDirectoryItem(32029, 'moviePerson', 'people-search.png', 'DefaultMovies.png')
# self.addDirectoryItem(32030, 'tvPerson', 'people-search.png', 'DefaultTVShows.png')
self.endDirectory()
def views(self):
try:
control.idle()
items = [ (control.lang(32001).encode('utf-8'), 'movies'), (control.lang(32002).encode('utf-8'), 'tvshows'), (control.lang(32054).encode('utf-8'), 'seasons'), (control.lang(32038).encode('utf-8'), 'episodes') ]
select = control.selectDialog([i[0] for i in items], control.lang(32049).encode('utf-8'))
if select == -1: return
content = items[select][1]
title = control.lang(32059).encode('utf-8')
url = '%s?action=addView&content=%s' % (sys.argv[0], content)
poster, banner, fanart = control.addonPoster(), control.addonBanner(), control.addonFanart()
item = control.item(label=title)
item.setInfo(type='Video', infoLabels = {'title': title})
item.setArt({'icon': poster, 'thumb': poster, 'poster': poster, 'tvshow.poster': poster, 'season.poster': poster, 'banner': banner, 'tvshow.banner': banner, 'season.banner': banner})
item.setProperty('Fanart_Image', fanart)
control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=False)
control.content(int(sys.argv[1]), content)
control.directory(int(sys.argv[1]), cacheToDisc=True)
from resources.lib.modules import cache
views.setView(content, {})
except:
return
def accountCheck(self):
if traktCredentials == False and imdbCredentials == False:
control.idle()
control.infoDialog(control.lang(32042).encode('utf-8'), sound=True, icon='WARNING')
sys.exit()
def clearCache(self):
control.idle()
yes = control.yesnoDialog(control.lang(32056).encode('utf-8'), '', '')
if not yes: return
from resources.lib.modules import cache
cache.clear()
control.infoDialog(control.lang(32057).encode('utf-8'), sound=True, icon='INFO')
def addDirectoryItem(self, name, query, thumb, icon, queue=False, isAction=True, isFolder=True):
try: name = control.lang(name).encode('utf-8')
except: pass
url = '%s?action=%s' % (sysaddon, query) if isAction == True else query
thumb = os.path.join(artPath, thumb) if not artPath == None else icon
cm = []
if queue == True: cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
item = control.item(label=name)
item.addContextMenuItems(cm)
item.setArt({'icon': thumb, 'thumb': thumb})
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=isFolder)
def endDirectory(self):
# control.do_block_check(False)
control.directory(syshandle, cacheToDisc=True)
|
gpl-2.0
| -5,556,857,216,013,222,000 | 55.122137 | 222 | 0.696477 | false | 3.348668 | false | false | false |
crccheck/atx-bandc
|
bandc/apps/agenda/tests/test_utils.py
|
1
|
3059
|
import datetime
import os.path
from unittest import mock
from django.test import TestCase
from ..factories import BandCFactory
from ..utils import (
MeetingCancelled,
parse_date,
clean_text,
process_page,
get_number_of_pages,
_save_page,
)
from .. import scrape_logger
BASE_DIR = os.path.dirname(__file__)
class UtilsTests(TestCase):
def test_parse_date_works(self):
date = parse_date("January 13, 2014")
self.assertEqual(date, datetime.date(2014, 1, 13))
with self.assertRaises(MeetingCancelled):
date = parse_date("January 28, 2014 (Cancelled)")
def test_clean_test(self):
fixture = (
("", ""),
("test", "test"),
("- May 27, 2014 PARB Agenda", "May 27, 2014 PARB Agenda"),
)
for input, expected in fixture:
self.assertEqual(clean_text(input), expected)
def test_process_page_works(self):
html = open(os.path.join(BASE_DIR, "samples/music.html")).read()
meeting_data, doc_data = process_page(html)
self.assertEqual(len(doc_data), 9)
self.assertEqual(doc_data[0]["date"], datetime.date(2014, 6, 2))
def test_get_number_of_pages_works(self):
html = open(os.path.join(BASE_DIR, "samples/music.html")).read()
self.assertEqual(get_number_of_pages(html), 1)
html = open(os.path.join(BASE_DIR, "samples/parks.html")).read()
self.assertEqual(get_number_of_pages(html), 2)
@mock.patch("bandc.apps.agenda.models.Document.refresh")
def test_save_page_works(self, mock_task):
html = open(os.path.join(BASE_DIR, "samples/music.html")).read()
meeting_data, doc_data = process_page(html)
bandc = BandCFactory()
# Sanity check
self.assertEqual(bandc.latest_meeting, None)
process_next = _save_page(meeting_data, doc_data, bandc)
self.assertFalse(process_next)
self.assertEqual(bandc.latest_meeting.date.isoformat(), "2014-02-03")
self.assertEqual(bandc.latest_meeting.documents.all()[0].edims_id, 204789)
self.assertTrue(mock_task.called)
def test_save_page_handles_no_data(self):
meeting_data, doc_data = [], []
bandc = BandCFactory()
# Sanity check
self.assertEqual(bandc.latest_meeting, None)
process_next = _save_page(meeting_data, doc_data, bandc)
self.assertFalse(process_next)
self.assertEqual(bandc.latest_meeting, None)
@mock.patch("bandc.apps.agenda.models.Document.refresh")
def test_save_page_logs_to_scrape_logger(self, mock_task):
html = open(os.path.join(BASE_DIR, "samples/music.html")).read()
meeting_data, doc_data = process_page(html)
bandc = BandCFactory()
# Sanity check
self.assertEqual(bandc.latest_meeting, None)
with scrape_logger.init() as context:
_save_page(meeting_data, doc_data, bandc)
self.assertEqual(len(context.meetings), 4)
self.assertEqual(len(context.documents), 9)
|
bsd-3-clause
| -7,105,842,265,565,726,000 | 33.761364 | 82 | 0.632887 | false | 3.44482 | true | false | false |
wyrdmeister/OnlineAnalysis
|
OAGui/src/GuiBase.py
|
1
|
5421
|
# -*- coding: utf-8 -*-
"""
Online Analysis Configuration Editor - Base class with logging functions
Version 2.0
Michele Devetta (c) 2013
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import logging
from PyQt4 import QtCore
from PyQt4 import QtGui
def declare_trUtf8(name):
""" Declare a UTF-8 translation function with the given module name. """
def function(string):
try:
return unicode(QtGui.QApplication.translate(name, string, None, QtGui.QApplication.UnicodeUTF8))
except:
return unicode(string)
return function
class GuiBase(QtCore.QObject):
""" Base OA GUI class with logging facilities. """
def __init__(self, name='OAGui'):
""" Constructor. """
# Parent constructor
QtCore.QObject.__init__(self)
# Parse command line args
ap = argparse.ArgumentParser(prog=name, add_help=False)
ap.add_argument('-d, --debug', dest="debug", action="store_const", const=Logger.DEBUG, default=Logger.INFO)
out = ap.parse_args()
# Init logger
self.logger = Logger(name, out.debug)
class WidgetHandler(logging.Handler):
""" Logging handler that send formatted output to QListWidget. """
def __init__(self, signal, level=logging.NOTSET):
""" Constructor. """
logging.Handler.__init__(self, level)
self._signal = signal
def emit(self, record):
""" Stores a record. """
self._signal.emit(unicode(self.format(record)))
class Logger(object):
""" Logger class. """
def __init__(self, name="Default", level=logging.INFO):
""" Constructor. """
self._name = name
self._level = level
self._init_logger()
def _init_logger(self):
""" Initialize the logger object. """
# Setup logger
self._logger = logging.getLogger(self._name)
self._logger.setLevel(self._level)
# Add standard handler if not present
for h in self._logger.handlers:
try:
if h.name == self._name + "_handler":
return
except:
pass
_handler = logging.StreamHandler()
_handler.setFormatter(logging.Formatter('[%(asctime)s] %(name)s:%(levelname)s:%(message)s', '%b %d, %H:%M:%S'))
_handler.setLevel(self._level)
_handler.name = self._name + "_handler"
self._logger.addHandler(_handler)
def setupWidgetLogger(self, signal):
""" Add a widget handler to the current logger. """
for h in self._logger.handlers:
try:
if h.name == self._name + "_WidgetLogger":
return
except:
pass
handler = WidgetHandler(signal, self._level)
handler.setFormatter(logging.Formatter('[%(asctime)s] %(levelname)s:%(message)s', '%b %d, %H:%M:%S'))
handler.name = "OAEditor_WidgetLogger"
self._logger.addHandler(handler)
def __getstate__(self):
""" Enable the logger object to be pickled. """
odict = self.__dict__.copy() # copy the dict since we change it
del odict['_logger'] # remove logger entry
return odict
def __setstate__(self, idict):
""" Enable the logger object to be unpickled. """
self.__dict__.update(idict) # restore dict
self._init_logger()
def level(self):
""" Return logger level. """
return self._level
def critical(self, msg, *args, **kwargs):
""" Equivalent to logging.critical """
if 'exc_info' in kwargs and self._logger.level != logging.DEBUG:
kwargs['exc_info'] = False
self._logger.log(logging.CRITICAL, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Equivalent to logging.error """
if 'exc_info' in kwargs and self._logger.level != logging.DEBUG:
kwargs['exc_info'] = False
self._logger.log(logging.ERROR, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Equivalent to logging.warning """
if 'exc_info' in kwargs and self._logger.level != logging.DEBUG:
kwargs['exc_info'] = False
self._logger.log(logging.WARN, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
""" Equivalent to logging.info """
if 'exc_info' in kwargs and self._logger.level != logging.DEBUG:
kwargs['exc_info'] = False
self._logger.log(logging.INFO, msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Equivalent to logging.debug """
self._logger.log(logging.DEBUG, msg, *args, **kwargs)
# Log levels
DEBUG = logging.DEBUG
INFO = logging.INFO
WARN = logging.WARN
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
|
gpl-3.0
| 5,279,471,035,327,872,000 | 32.060976 | 119 | 0.608006 | false | 4.116173 | false | false | false |
gpodder/gpodder
|
share/gpodder/extensions/notification-win32.py
|
1
|
6754
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Notification implementation for Windows
# Sean Munkel; 2012-12-29
"""
Current state (2018/07/29 ELL):
- I can't get pywin32 to work in msys2 (the platform used for this python3/gtk3 installer)
so existing code using COM doesn't work.
- Gio.Notification is not implemented on windows yet.
see https://bugzilla.gnome.org/show_bug.cgi?id=776583
- Gtk.StatusIcon with a context works but is deprecated. Showing a balloon using set_tooltip_markup
doesn't work.
See https://github.com/afiskon/py-gtk-example
- hexchat have implemented a solid c++ solution.
See https://github.com/hexchat/hexchat/tree/master/src/fe-gtk/notifications
I've chosen to implement notifications by calling a PowerShell script invoking
Windows Toast Notification API or Balloon Notification as fallback.
It's tested on Win7 32bit and Win10 64bit VMs from modern.ie
So we have a working solution until Gio.Notification is implemented on Windows.
"""
import logging
import os
import os.path
import subprocess
import sys
import tempfile
import gpodder
import gi # isort:skip
gi.require_version('Gtk', '3.0') # isort:skip
from gi.repository import Gtk # isort:skip
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('Notification Bubbles for Windows')
__description__ = _('Display notification bubbles for different events.')
__authors__ = 'Sean Munkel <[email protected]>'
__category__ = 'desktop-integration'
__mandatory_in__ = 'win32'
__only_for__ = 'win32'
class gPodderExtension(object):
def __init__(self, *args):
gpodder_script = sys.argv[0]
gpodder_script = os.path.realpath(gpodder_script)
self._icon = os.path.join(os.path.dirname(gpodder_script), "gpodder.ico")
def on_notification_show(self, title, message):
script = """
try {{
if ([Environment]::OSVersion.Version -ge (new-object 'Version' 10,0,10240)) {{
# use Windows 10 Toast notification
[Windows.UI.Notifications.ToastNotificationManager, Windows.UI.Notifications, ContentType = WindowsRuntime] | Out-Null
[Windows.UI.Notifications.ToastNotification, Windows.UI.Notifications, ContentType = WindowsRuntime] | Out-Null
[Windows.Data.Xml.Dom.XmlDocument, Windows.Data.Xml.Dom.XmlDocument, ContentType = WindowsRuntime] | Out-Null
# Need a real AppID (see https://stackoverflow.com/q/46814858)
# use gPodder app id if it's the installed, otherwise use PowerShell's AppID
try {{
$gpo_appid = Get-StartApps -Name "gpodder"
}} catch {{
write-host "Get-StartApps not available"
$gpo_appid = $null
}}
if ($gpo_appid -ne $null) {{
$APP_ID = $gpo_appid[0].AppID
}} else {{
$APP_ID = '{{1AC14E77-02E7-4E5D-B744-2EB1AE5198B7}}\\WindowsPowerShell\\v1.0\\powershell.exe'
}}
$template = @"
<toast activationType="protocol" launch="" duration="long">
<visual>
<binding template="ToastGeneric">
<image placement="appLogoOverride" src="{icon}" />
<text><![CDATA[{title}]]></text>
<text><![CDATA[{message}]]></text>
</binding>
</visual>
<audio silent="true" />
</toast>
"@
$xml = New-Object Windows.Data.Xml.Dom.XmlDocument
$xml.LoadXml($template)
$toast = New-Object Windows.UI.Notifications.ToastNotification $xml
[Windows.UI.Notifications.ToastNotificationManager]::CreateToastNotifier($APP_ID).Show($toast)
Remove-Item -LiteralPath $MyInvocation.MyCommand.Path -Force # Delete this script temp file.
}} else {{
# use older Baloon notification when not on Windows 10
[System.Reflection.Assembly]::LoadWithPartialName("System.Windows.Forms")
$o = New-Object System.Windows.Forms.NotifyIcon
$o.Icon = "{icon}"
$o.BalloonTipIcon = "None"
$o.BalloonTipText = @"
{message}
"@
$o.BalloonTipTitle = @"
{title}
"@
$o.Visible = $True
$Delay = 10 # Delay value in seconds.
$o.ShowBalloonTip($Delay*1000)
Start-Sleep -s $Delay
$o.Dispose()
Remove-Item -LiteralPath $MyInvocation.MyCommand.Path -Force # Delete this script temp file.
}}
}} catch {{
write-host "Caught an exception:"
write-host "Exception Type: $($_.Exception.GetType().FullName)"
write-host "Exception Message: $($_.Exception.Message)"
exit 1
}}
""".format(icon=self._icon, message=message, title=title)
fh, path = tempfile.mkstemp(suffix=".ps1")
with open(fh, "w", encoding="utf_8_sig") as f:
f.write(script)
try:
# hide powershell command window using startupinfo
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# to run 64bit powershell on Win10 64bit when running from 32bit gPodder
# (we need 64bit powershell on Win10 otherwise Get-StartApps is not available)
powershell = r"{}\sysnative\WindowsPowerShell\v1.0\powershell.exe".format(os.environ["SystemRoot"])
if not os.path.exists(powershell):
powershell = "powershell.exe"
subprocess.Popen([powershell,
"-ExecutionPolicy", "Bypass", "-File", path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo)
except subprocess.CalledProcessError as e:
logger.error("Error in on_notification_show(title=%r, message=%r):\n"
"\t%r exit code %i\n\tstdout=%s\n\tstderr=%s",
title, message, e.cmd, e.returncode, e.stdout, e.stderr)
except FileNotFoundError:
logger.error("Error in on_notification_show(title=%r, message=%r): %s not found",
title, message, powershell)
def on_unload(self):
pass
|
gpl-3.0
| -7,643,252,481,788,382,000 | 40.950311 | 126 | 0.659165 | false | 3.652785 | false | false | false |
chencoyote/owasp-pysec
|
pysec/load.py
|
1
|
11062
|
# Python Security Project (PySec) and its related class files.
#
# PySec is a set of tools for secure application development under Linux
#
# Copyright 2014 PySec development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: ascii -*-
"""Module to manage more efficently import of modules"""
# ASCII_LETTERS = <str>
# DIGITS = <str>
# HEXDIGITS = <str>
# ModuleType = <type>
# _CACHE = {(<str>,<tuple>): <module>}
# _FIRST_LETTERS = <str>
# _HASHES = {<str>: <built-in function>}
# _OTHER_LETTERS = <str>
# _TAB = {<str>: <dict>}
# base64 = <module base64>
# fd = <module pysec.io.fd>
# hashlib = <module hashlib>
# imp = <module imp>
# os = <module os>
import imp
import os
import hashlib
import base64
from types import ModuleType
from pysec.core import Object
from pysec.io import fd
from pysec import log
from pysec import lang
__name__ = 'pysec.load'
__all__ = 'load_tab', 'import_lib', 'make_line'
# set actions
log.register_actions('LOAD_TAB', 'IMPORT_LIB')
ASCII_LETTERS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
DIGITS = '0123456789'
HEXDIGITS = '0123456789abcdefABCDEF'
_HASHES = {
# raise NameError: undefined: getattr
'md5': getattr(hashlib, 'md5'),
# raise NameError: undefined: getattr
'sha1': getattr(hashlib, 'sha1'),
# raise NameError: undefined: getattr
'sha256': getattr(hashlib, 'sha256'),
# raise NameError: undefined: getattr
'sha512': getattr(hashlib, 'sha512'),
}
_FIRST_LETTERS = '_%s' % ASCII_LETTERS
_OTHER_LETTERS = '_%s%s' % (ASCII_LETTERS, DIGITS)
def is_hex(string):
"""Returns True if string is a valid hexadecimal number, otherwise False"""
# string = <str>
# ch = <str>
# return <bool>
return all(ch in HEXDIGITS for ch in string)
def check_libname(name):
"""Returns True if name is a valid string for a library, other False"""
# name = <str>
# ch = <str>
# return <int>|<bool>
name = str(name)
if not name:
return 0
return (name[0] in _FIRST_LETTERS and
all(ch in _OTHER_LETTERS for ch in name[1:]))
def parse_version(version):
"""Parse version string in a tuple, if version is an invalid string
returns None"""
# version = <str>
# vs = <str>
# return <NoneType>|(*<int>)
version = str(version).split('.')
if len(version) != 3:
return None
if all(vs.isdigit() for vs in version):
return tuple(int(vs) for vs in version)
return None
def parse_hashes(hashes):
"""Parse hashes' string in hashes' dict, if it's invalid returns None"""
# hashes = <str>
# _hashes = {<NoneType>: ?}
# hname = <str>
# hs_field = <str>
# hval = <str>
# return {<str>: <str>}|<NoneType>
_hashes = {}
if hashes:
for hs_field in hashes.split(' '):
hname, _, hval = hs_field.strip().partition(':')
hs_field = _HASHES.get(hname, None)
if hs_field is None:
return None
if not is_hex(hval):
return None
if hs_field in _hashes:
return None
_hashes[hs_field] = hval
return _hashes
def _hash(path, hs_obj):
"""Calculate the hash of path using hs_obj (a Hash Object)"""
# path = <str>
# hs_obj = <HASH object>
# chunk = <str>
# fmod = <file>
# return <NoneType>
with fd.File.open(path, fd.FO_READEX) as fmod:
chunk = fmod.read(4096)
while chunk:
hs_obj.update(chunk)
chunk = fmod.read(4096)
def get_hash(path, hs_maker):
"""Calculates the hash of module in path"""
# path = <str>
# hs_maker = <function>
# dirpath = <str>
# filenames = [<str>]
# files = [<str>]
# fname = <str>
# fpath = <str>
# hs_mod = <HASH object>
# return <str>
hs_mod = hs_maker()
if os.path.isfile(path):
files = [path]
elif os.path.isdir(path):
files = sorted([os.path.join(dirpath, fname)
for dirpath, _, filenames in os.walk(path)
for fname in filenames
if os.path.isfile(os.path.join(dirpath, fname))])
else:
# raise <instance ImportError>
raise ImportError("invalid file type %r" % path)
for fpath in files:
_hash(fpath, hs_mod)
return hs_mod.hexdigest()
_CACHE = {}
_TAB = {}
class _LazyModule(Object, ModuleType):
"""_LazyModule is a module that instances their attribute in lazy mode"""
# instance.module = <NoneType>|<module>
# instance.name = <str>
def __init__(self, name, version):
# self = <instance load._LazyModule>
# name = <str>
# version = (*<int>)
# return <NoneType>
self.name = str(name)
self.version = version
self.module = None
def __getattr__(self, name):
# self = <instance load._LazyModule>
# name = <str>
# return <module>
# raise NameError: undefined: getattr
return getattr(self.module or importlib(self.name, self.version), name)
def __setattr__(self, name, value):
# self = <instance load._LazyModule>
# name = <str>
# value = ?
# return <NoneType>
setattr(self.module or importlib(self.name, self.version), name, value)
def __delattr__(self, name):
# self = <instance load._LazyModule>
# name = <str>
# return <NoneType>
delattr(self.module or importlib(self.name, self.version), name)
@log.wrap(log.actions.LOAD_TAB, fields=('path',), lib=__name__)
def load_tab(path):
"""Updates internal tab of modules"""
# path = <str>
# _tab = {<str>: <dict>}
# fields = <str>
# ftab = <instance pysec.io.fd.File>
# hashes = {<str>: <str>}|<NoneType>
# line = <str>
# lineno = <int>
# mod_vers = {<str>: <dict>}
# name = <str>
# version = <NoneType>|(*<int>)
# return <NoneType>
path = os.path.abspath(str(path))
_tab = {}
with fd.File.open(path, fd.FO_READEX) as ftab:
for lineno, line in enumerate(ftab.lines()):
fields = line.strip().split(';')
# name, version, path, hashes
if len(fields) != 4:
# raise <instance ImportError>
raise ImportError(lang.LOAD_WRONG_FIELDS % lineno)
name, version, path, hashes = fields
# name
if not check_libname(name):
# raise <instance ImportError>
raise ImportError(lang.LOAD_WRONG_LIB_NAME % lineno)
# version
version = parse_version(version)
if version is None:
# raise <instance ImportError>
raise ImportError(lang.LOAD_WRONG_VERSION_FORMAT % lineno)
# path
path = os.path.abspath(base64.b64decode(path))
# hashes
hashes = parse_hashes(hashes)
if hashes is None:
# raise <instance ImportError>
raise ImportError(lang.LOAD_WRONG_HASH_FORMAT % lineno)
# update tab
mod_vers = _tab.setdefault(name, {})
if version in mod_vers:
# raise <instance ImportError>
raise ImportError(lang.LOAD_DUP_LIB
% (name, version[0], version[1], version[2]))
mod_vers[version] = {'path': path, 'hash': hashes}
_TAB.update(_tab)
@log.wrap(log.actions.IMPORT_LIB,
fields=('name', 'version', 'lazy', '_reload'),
result='module', lib=__name__)
def importlib(name, version=None, lazy=0, _reload=0):
"""Load a library and return it.
name library's name
version if it's None it load lastest library, otherwise load the
version specified
lazy if false it returns normal module, otherwise it returns a
module placeholder and it will be loaded the first time that
it will be used
_reload if false search library in cache and returns it if exists
otherwise it load it. If _reload is true load library anse save
it in cache
"""
# name = <str>
# version = <NoneType>
# lazy = <int>
# _reload = <int>
# desc = <tuple>
# fdir = <str>
# fname = <str>
# fobj = <file>
# hs_maker = <function>
# hval = <str>
# mod = <NoneType>
# mod_info = {<function>: <str>}
# path = <str>
# vers = <NoneType>
# return <instance load._LazyModule>
name = str(name)
vers = _TAB.get(name, None)
if vers is None:
# raise <instance ImportError>
raise ImportError(lang.LOAD_LIB_NOT_FOUND % name)
if version is None:
version = max(vers.iterkeys())
elif version not in vers:
# raise <instance ImportError>
raise ImportError(lang.LOAD_LIB_VER_NOT_FOUND % (name, version))
if not _reload and (name, version) in _CACHE:
return _CACHE[(name, version)]
mod_info = vers.get(version)
try:
imp.acquire_lock()
path = mod_info['path']
if lazy:
return _LazyModule(name, path)
else:
fdir, fname = os.path.split(path)
for hs_maker, hval in mod_info['hash'].iteritems():
if get_hash(path, hs_maker) != hval:
# raise <instance ImportError>
raise ImportError(lang.LOAD_INVALID_HASH
% (name, version, path, hval))
# raise <instance ImportError>
fobj, path, desc = imp.find_module(os.path.splitext(fname)[0],
[fdir])
# raise <instance ImportError>
mod = imp.load_module(name, fobj, path, desc)
_CACHE[(name, version)] = mod
return mod
finally:
imp.release_lock()
def make_line(path, name, version):
"""Makes a complete string for loader's file"""
# path = <str>
# name = <str>
# version = (*<int>)
# hashes = [<str>]
# hs_func = <function>
# hs_name = <str>
# path64 = <str>
# vs = <int>
# return <str>
path = os.path.abspath(path)
path64 = base64.b64encode(path)
name = str(name)
version = tuple(version)
hashes = []
for hs_name, hs_func in _HASHES.iteritems():
hashes.append('%s:%s' % (hs_name, get_hash(path, hs_func)))
return '%s;%s;%s;%s' % (str(name), '.'.join(str(vs) for vs in version),
path64, ' '.join(hashes))
|
apache-2.0
| 2,443,859,515,778,118,700 | 30.696275 | 79 | 0.568975 | false | 3.666556 | false | false | false |
kedz/cuttsum
|
old/python/cuttsum/readers.py
|
1
|
2494
|
import codecs
import numpy as np
from sklearn.feature_extraction import DictVectorizer
def gold_reader(bow_file, l_file, sim_idx, vector=u'latent'):
op = codecs.open
sims = []
vectors = []
labels = []
unicodes = []
last_hour = None
with op(bow_file, u'r', u'utf-8') as bf, op(l_file, u'r', u'utf-8') as lf:
header = lf.readline().strip()
b_line = bf.readline()
l_line = lf.readline()
while b_line and l_line:
b_datum = b_line.strip().split(u'\t')
b_hour, b_stream_id, b_sent_id, b_unicode = b_datum[0:4]
bow = {x:1 for x in b_datum[4].split(u' ')}
l_datum = l_line.strip().split(u'\t')
l_hour, l_stream_id, l_sent_id = l_datum[0:3]
sim = float(l_datum[sim_idx])
lvec = [float(x) for x in l_datum[6:]]
b_label = (b_hour, b_stream_id, b_sent_id)
l_label = (l_hour, l_stream_id, l_sent_id)
assert b_label == l_label
if b_hour != last_hour:
if last_hour is not None:
n_points = len(sims)
sims = np.array(sims)
if vector == u'latent':
vectors = np.array(vectors)
elif vector == u'bow':
vctr = DictVectorizer()
vectors = vctr.fit_transform(vectors)
unicodes = np.array(unicodes, dtype=(unicode, 1000))
yield (last_hour, labels, unicodes, sims, vectors)
sims = []
vectors = []
labels = []
unicodes = []
last_hour = b_hour
sims.append(sim)
if vector == u'latent':
vectors.append(lvec)
elif vector == u'bow':
vectors.append(bow)
labels.append(b_label)
unicodes.append(b_unicode)
b_line = bf.readline()
l_line = lf.readline()
if len(vectors) > 0:
n_points = len(sims)
sims = np.array(sims)
if vector == u'latent':
vectors = np.array(vectors)
elif vector == u'bow':
vctr = DictVectorizer()
vectors = vctr.fit_transform(vectors)
unicodes = np.array(unicodes, dtype=(unicode, 1000))
yield (last_hour, labels, unicodes, sims, vectors)
|
apache-2.0
| -8,457,494,943,271,030,000 | 32.702703 | 78 | 0.468324 | false | 3.667647 | false | false | false |
Karajlug/karajlug
|
dbot/views.py
|
1
|
2174
|
# -----------------------------------------------------------------------------
# Karajlug.org
# Copyright (C) 2010 Karajlug community
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------------
import hashlib
import socket
from django.http import Http404, HttpResponse, HttpResponseForbidden
from django.contrib.auth.models import User
from django.views.decorators.csrf import csrf_exempt
from forms import WebServiceForm
@csrf_exempt
def webservice(request):
"""
Simple HTTP POST service.
"""
if not request.method == "POST":
raise Http404()
form = WebServiceForm(request.POST)
if form.is_valid():
try:
user = User.objects.get(username=form.cleaned_data["user"])
except User.DoesNotExist:
raise Http404()
m = hashlib.sha1()
m.update("%s%s" % (form.cleaned_data["msg"],
user.password))
hash_ = m.hexdigest()
if not hash_ == form.cleaned_data["hash"]:
return HttpResponseForbidden()
sock = socket.socket(socket.AF_UNIX,
socket.SOCK_STREAM)
try:
sock.connect("/tmp/socket")
sock.send(form.cleaned_data["msg"])
sock.recv(1024)
except socket.error:
pass
sock.close()
return HttpResponse("0")
else:
return HttpResponse(form.errors)
|
gpl-2.0
| 284,063,036,326,837,600 | 33.507937 | 79 | 0.596136 | false | 4.464066 | false | false | false |
tos-kamiya/agec2
|
src/gen_ngram.py
|
1
|
25223
|
#!/usr/bin/env python
#coding: utf-8
__author__ = 'Toshihiro Kamiya <[email protected]>'
__status__ = 'experimental'
import collections
import os
import sys
import datetime
from _utilities import sort_uniq
import asm_manip as am
import ope_manip as om
import precomp_manip as pm
UNTRACKED_CLAZS = frozenset([
"java/lang/StringBuilder",
"java/util/Iterator"
])
UNDIGGED_METHODS = frozenset([
'getClass:()Ljava/lang/Class;',
'equals:(Ljava/lang/Object;)Z',
'hashCode:()I',
'compareTo:(Ljava/lang/Object;)I',
'toString:()Ljava/lang/String;',
'get:(Ljava/lang/Object;)Ljava/lang/Object;',
'put:(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;',
'getProperty:(Ljava/lang/Object;)Ljava/lang/Object;',
])
def to_ngram_tuples_iter(found_ngrams):
for head, tails in sorted(found_ngrams.iteritems(), key=lambda h_ts: h_ts[0]):
ngram_tuples = []
for tail in tails:
ngram = (head,) + tail
buf = []
for c_m, frame in ngram:
claz, method = frame.claz_method
c, m = c_m
s = m if c is None else "%s.%s" % c_m
buf.append((s, "%s.%s,%d" % (claz, method, frame.index), frame.depth))
ngram_tuple = tuple(buf)
ngram_tuples.append(ngram_tuple)
yield sort_uniq(ngram_tuples)
# Drop intermediate paths data from each n-gram and merge the n-grams.
# Because printed n-grams do not have such intermediate path data,
# so two n-grams, which starts the same posision and the same depth but the distinct paths,
# are not able to be distinguished in the output and look just duplication in the output.
def make_method2claz2code(sig2code):
method2claz2code = {}
for (claz, sig), code in sorted(sig2code.iteritems()):
method2claz2code.setdefault(sig, {})[claz] = code
return method2claz2code
def make_claz2methods(method2claz2code):
claz2methods = {}
for m, c2c in method2claz2code.iteritems():
for c in c2c.iterkeys():
claz2methods.setdefault(c, []).append(m)
return claz2methods
class StackFrame(object):
def __init__(self, claz_method, index, prev_frame):
self.claz_method = claz_method
self.index = index
self.prev_frame = prev_frame
self.depth = 0 if prev_frame is None else prev_frame.depth + 1
def __eq__(self, other):
return other is not None and \
self.claz_method == other.claz_method and self.index == other.index and \
self.depth == other.depth and self.prev_frame == other.prev_frame
def __lt__(self, other):
if other is None:
return False
if self.claz_method < other.claz_method:
return True
elif self.claz_method == other.claz_method:
if self.index < other.index:
return True
elif self.index == other.index:
if self.depth < other.depth:
return True
elif self.depth == other.depth:
return self.prev_frame < other.prev_frame
def __hash__(self):
return hash(self.claz_method) + hash(self.index) + hash(self.depth) # prev_frame is not used in hash computation
def copy(self, index=None):
return StackFrame(self.claz_method, index if index is not None else self.index, self.prev_frame)
def __repr__(self):
return "StackFrame(%s,%s,*,depth=%d)" % (repr(self.claz_method), repr(self.index), self.depth) # prev_frame is not printed
class CodeNgramGenerator:
def __init__(self, method2claz2precomp):
self.method2claz2precomp = method2claz2precomp
self.ngram_size = 6
self.max_call_depth = -1
self.allow_repetitive_ngram = False
self.no_branch_ngram = False
self.no_returning_execution_path = False
self.use_undigg_method_list = False
self.count_branch_in_surface_level = False
self.clear_temp()
def clear_temp(self):
self._claz_method0 = None
self._max_call_depth = None
self._stack_already_printed_on_raise = False
def _remove_repetition(self, cur_gram):
if self.allow_repetitive_ngram:
return 0
for replen in range(1, min(len(cur_gram), self.ngram_size) // 2 + 1):
for j in range(-1, -1 - replen, -1):
c, r = cur_gram[j], cur_gram[j - replen]
if c[0] != r[0]:
break # for j
else:
del cur_gram[-replen:]
return replen
return 0
def _setup_temp(self, claz, method):
self._claz_method0 = claz, method
self._max_call_depth = self.max_call_depth if self.max_call_depth >= 0 else \
self.ngram_size * (-self.max_call_depth)
self._stack_already_printed_on_raise = False
self._found_grams = {} # head item -> set if tuple of tail items
# here, head is the first item of ngram tail is the other items
def gen_ngrams(self, claz, method):
self._setup_temp(claz, method)
self._dig_method(self._max_call_depth , [], (claz, method), None, None)
self.clear_temp()
return self._found_grams
def _dig_method(self, dig_count, cur_gram, claz_method, prev_frame, prev_footmarks_frame,
start_cell=None, is_return_dig=False):
if not is_return_dig:
p = self.method2claz2precomp[claz_method[1]][claz_method[0]]
if start_cell is None:
start_cell = p.start_cell
cur_frame = StackFrame(claz_method, start_cell[0], prev_frame)
cur_footmarks_frame = [], prev_footmarks_frame
else:
assert claz_method is None
assert start_cell is None
cur_frame = prev_frame
claz_method = cur_frame.claz_method
p = self.method2claz2precomp[claz_method[1]][claz_method[0]]
start_cell = p.cells[cur_frame.index][1]
cur_footmarks_frame = prev_footmarks_frame[0][:], prev_footmarks_frame[1]
cur_block_entrance_cells = p.bent_cells
depth = cur_frame.depth
try:
branches = []
def dig_branch(dig_count, cur_gram, cur_cell, cur_footmarks_frame):
footmarks = cur_footmarks_frame[0]
while True:
index, next_cell, precomp_cmd, precomp_arg = cur_cell
if index in cur_block_entrance_cells:
if index in footmarks:
break # while True
footmarks.append(index)
# in deeper levels than the surface, branchs are counted
# in order to avoid interprting too complex control dependencies
if self.count_branch_in_surface_level or depth > 0:
if dig_count <= 0:
break # while True
dig_count -= 1
if precomp_cmd == pm.INVOKE:
stk = cur_frame.copy(index)
c_m = c, m = precomp_arg
if cur_gram and dig_count > 0 and self._is_method_digg_target(c, m, cur_gram):
c2p = self.method2claz2precomp.get(m)
if c2p:
cs = sorted(c2p.iterkeys()) if c is None else \
[c] if c in c2p else \
[]
for c2 in cs:
c2_m = (c2, m)
if not CodeNgramGenerator.is_recursion(c2_m, cur_frame):
self._dig_method(dig_count - 1, cur_gram[-self.ngram_size:], c2_m, stk, cur_footmarks_frame)
cur_gram.append((c_m, stk))
if self._remove_repetition(cur_gram) == 0 and len(cur_gram) >= self.ngram_size:
if self._is_escaping(cur_gram[-self.ngram_size][1]):
break # while True
cand_gram = tuple(cur_gram[-self.ngram_size:])
if not self._store_if_new_ngram(cand_gram):
break # while True
elif precomp_cmd == pm.RETURN:
if self.no_returning_execution_path:
break # while True
if cur_frame.prev_frame is not None:
self._dig_method(dig_count, cur_gram, None,
cur_frame.prev_frame, cur_footmarks_frame[1], is_return_dig=True)
break # while True
elif precomp_cmd == pm.GOTO:
if not self.no_branch_ngram:
next_cell = precomp_arg
elif precomp_cmd == pm.BRANCHS:
if not self.no_branch_ngram:
branches.extend((dig_count, cur_gram[-self.ngram_size:], dc, (footmarks[:], prev_footmarks_frame)) \
for dc in precomp_arg)
elif precomp_cmd == pm.THROW:
break # while True
else:
assert False
cur_cell = next_cell
dig_branch(dig_count, cur_gram, start_cell, cur_footmarks_frame)
while branches:
b = branches.pop()
dig_branch(*b)
except:
self._print_stack(cur_frame)
raise
@staticmethod
def is_recursion(claz_method, frame):
method = claz_method[1]
while frame:
if method == frame.claz_method[1]:
return True
frame = frame.prev_frame
return False
def _store_if_new_ngram(self, cand_gram):
assert len(cand_gram) >= 1
tails = self._found_grams.setdefault(cand_gram[0], set())
tail = tuple(cand_gram[1:])
if tail in tails:
return False
tails.add(tail)
return True
def _is_method_digg_target(self, c, method, cur_gram):
assert method
if self.use_undigg_method_list and method in UNDIGGED_METHODS:
return False
if c is None and method.endswith(":()V"):
return False
for i in xrange(0, min(len(cur_gram), self.ngram_size - 1)):
if cur_gram[-i-1][0][1] == method:
return False
return True
def _is_escaping(self, head_frame):
return head_frame.depth != 0 # escaped from the original method?
# a head item of a n-gram always comes from the original method.
# if not (that is, a head item is comes from some called method by the original method),
# such code fragment is not a part of the orignal method, but a part of the called method.
def _print_stack(self, frame):
if self._stack_already_printed_on_raise:
return
buf = []
while frame:
buf.append((frame.claz_method[0], frame.claz_method[1], frame.index))
frame = frame.prev_frame
sys.stderr.write("debug info> cur_call_stack = [%s]\n" % ", ".join("%s.%s:%d" % f for f in buf))
self._stack_already_printed_on_raise = True
class CodeNgramGeneratorWStartIndices(CodeNgramGenerator):
def clear_temp(self):
CodeNgramGenerator.clear_temp(self)
self._start_index = None
def gen_ngrams(self, claz, method, start_indices):
self._setup_temp(claz, method)
for start_index in start_indices:
self._start_index = start_index
claz2precomp = self.method2claz2precomp[method]
precomp_cells = claz2precomp[claz].cells
head_cell = precomp_cells[start_index]
self._dig_method(self._max_call_depth, [], (claz, method), None, None, head_cell)
self.clear_temp()
return self._found_grams
def _is_escaping(self, head_frame):
if self._start_index is not None:
if head_frame.claz_method == self._claz_method0 and head_frame.index != self._start_index:
return True
return CodeNgramGenerator._is_escaping(self, head_frame)
def gen_code_ngrams(claz, method, method2claz2precomp, ngram_size, start_indices=None,
max_call_depth=-1, allow_repetitive_ngram=False, no_branch_ngram=False,
no_returning_execution_path=False, use_undigg_method_list=False,
count_branch_in_surface_level=False):
if start_indices:
cng = CodeNgramGeneratorWStartIndices(method2claz2precomp)
else:
cng = CodeNgramGenerator(method2claz2precomp)
cng.ngram_size = ngram_size
cng.max_call_depth = max_call_depth
cng.allow_repetitive_ngram = allow_repetitive_ngram
cng.no_branch_ngram = no_branch_ngram
cng.no_returning_execution_path = no_returning_execution_path
cng.use_undigg_method_list = use_undigg_method_list
cng.count_branch_in_surface_level = count_branch_in_surface_level
if start_indices:
return cng.gen_ngrams(claz, method, start_indices)
else:
return cng.gen_ngrams(claz, method)
def identify_claz(method2claz2code, class_patterns):
exclude_clazs = frozenset([e for e in class_patterns if not e.endswith('/*')])
exclude_packages = frozenset([e[:-1] for e in class_patterns if e.endswith('/*')])
clazs = set()
for method, claz2code in method2claz2code.iteritems():
clazs.update(claz2code.iterkeys())
clazs_tobe_excluded = set()
for claz in sorted(clazs):
if claz in exclude_clazs:
clazs_tobe_excluded.add(claz)
else:
p = claz.rfind('/')
if p >= 0:
package = claz[:p + 1] # include trailing '/'
if package in exclude_packages:
clazs_tobe_excluded.add(claz)
return clazs_tobe_excluded
def identify_target_claz_method(method2claz2code,entry_class_patterns):
if entry_class_patterns:
claz_set = frozenset(identify_claz(method2claz2code, entry_class_patterns))
claz_method_list = sorted((claz, method) for method, claz2pre in method2claz2code.iteritems() \
for claz in claz2pre.iterkeys() if claz in claz_set)
else:
claz_method_list = sorted((claz, method) for method, claz2pre in method2claz2code.iteritems() \
for claz in claz2pre.iterkeys())
claz_method_count = collections.Counter()
for claz, method in claz_method_list:
claz_method_count[claz] += 1
return claz_method_list, claz_method_count
def exclude_clazs(method2claz2code, excludeded_class_patterns):
removed_clazs = identify_claz(method2claz2code, excludeded_class_patterns)
for method, claz2code in method2claz2code.items():
for c in removed_clazs.intersection(claz2code.iterkeys()):
del claz2code[c]
if len(claz2code) == 0:
del method2claz2code[method]
return removed_clazs
def exclude_ctors(method2claz2code):
ctors = [m for m in method2claz2code.iterkeys() \
if m.startswith('"<init>"') or m.startswith("access$")]
for m in ctors:
del method2claz2code[m]
return ctors
def remove_too_many_definition_methods(method2claz2code, max_method_definition):
assert max_method_definition > 0
too_many_definition_methods = [method \
for method, claz2code in method2claz2code.iteritems() \
if len(claz2code) > max_method_definition]
for m in too_many_definition_methods:
del method2claz2code[m]
return too_many_definition_methods
def do_filtering_clazs(write, method2claz2code, excluded_class_patterns):
if excluded_class_patterns:
removed_clazs = exclude_clazs(method2claz2code, excluded_class_patterns)
write("removed classes by --exclude option(s): %d\n" % \
len(removed_clazs))
def do_filtering_methods(write, method2claz2code, include_ctors, max_method_definition):
if not include_ctors:
ctors = exclude_ctors(method2claz2code)
write("removed ctors: %d\n" % len(ctors))
if max_method_definition > 0:
too_many_definition_methods = remove_too_many_definition_methods(
method2claz2code, max_method_definition)
write("removed methods by option --max-definition=%d: %d\n" % \
(max_method_definition, len(too_many_definition_methods)))
def gen_argpsr():
from argparse import ArgumentParser
from _version_data import VERSION
psr = ArgumentParser(description='Generate n-grams of method calls')
psr.add_argument('-a', '--asm-directory', action='store', required=True)
psr.add_argument('-n', '--ngram-size', action='store', type=int, default=6)
psr.add_argument('-v', '--verbose', action='store_true')
psr.add_argument('--max-call-depth', action='store', type=int, default=-2,
help='max depth in expanding method calls. negative number means scale factor to n-gram size. (default is -2, that is. 2 * n-gram size.)')
psr.add_argument('--max-method-definition', action='store', type=int, default=-1,
help='max method defintions for a signiture. =-1 means unlimited')
psr.add_argument('--allow-repetitive-ngram', action='store_true')
psr.add_argument('--no-branch-ngram', action='store_true')
psr.add_argument('-e', '--exclude', action='append',
help="specify class in fully-qualified name, e.g. org/myapp/MyClass$AInnerClass. a wildcard '*' can be used as class name, e.g. org/myapp/*")
psr.add_argument('--entry', action='append',
help="class to be a entry point of abstract interpretation. specify class in fully-qualified name. wildcard can be used.")
psr.add_argument('--include-ctors', action='store_true',
help='include "<init>" and access$... methods as targets')
grp = psr.add_mutually_exclusive_group(required=False)
grp.add_argument('--mode-diagnostic', action='store_true',
help='show bytecode info and the filtering results')
grp.add_argument('--mode-method-signature', action='store_true',
help='show method signatures')
grp.add_argument('--mode-method-body', action='store_true',
help='show method bodies (byte code)')
psr.add_argument('--debug-wo-leaf-class-dispatch-optimization', action='store_true')
psr.add_argument('--debug-no-returning-execution-path', action='store_true')
psr.add_argument('--debug-count-branch-in-surface-level', action='store_true')
psr.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
return psr
def is_untracked_method_call(c, m):
return c in UNTRACKED_CLAZS or m.find("access$") >= 0
def main(argv):
psr = gen_argpsr()
args = psr.parse_args(argv[1:])
max_method_definition = max(-1, args.max_method_definition)
excluded_class_patterns = frozenset(args.exclude if args.exclude else [])
entry_class_patterns = frozenset(args.entry if args.entry else [])
verbose = args.verbose
debug_wo_leaf_class_dispatch_optimization = args.debug_wo_leaf_class_dispatch_optimization
debug_no_returning_execution_path = args.debug_no_returning_execution_path
if verbose:
def verbose_write(mes): sys.stderr.write("> %s" % mes)
else:
def verbose_write(mes): pass
if not os.path.isdir(args.asm_directory):
sys.exit("error: fail to access asm_directory: %s" % args.asm_directory)
sig2code = {}
#sig2exceptiontable = {}
#sig2linenumbertable = {}
claz2deriving = {} # claz -> list of the clazs that inherit it
for typ, values in am.get_asm_info_iter(args.asm_directory):
if typ == am.METHOD_CODE:
claz_sig, code, etbl, ltbl = values
sig2code[claz_sig] = tuple(code)
#sig2exceptiontable[sig] = etbl
#sig2linenumbertable[sig] = ltbl
elif typ == am.INHERITANCE:
claz, imps, exts = values
for e in exts:
claz2deriving.setdefault(e, []).append(claz)
if args.mode_method_signature:
for claz_sig in sorted(sig2code.iterkeys()):
sys.stdout.write('%s.%s\n' % claz_sig.encode('utf-8'))
elif args.mode_method_body:
for claz_sig, method_body in sorted(sig2code.iteritems()):
ol = om.body_text_to_ope_list(method_body, claz_sig)
try:
om.verify_branch_ope(ol)
except om.InvalidOpe as e:
raise om.InvalidOpe("%s.%s: %s" % (claz_sig[0], claz_sig[1], str(e)))
sys.stdout.write('%s.%s\n' % claz_sig)
for L in om.format_ope_list(ol): #, fields=om.FORMAT_FIELD.OPE):
sys.stdout.write('%s\n' % L)
else:
sig2oplist = {}
for claz_sig, method_body in sorted(sig2code.iteritems()):
ol = om.body_text_to_ope_list(method_body, claz_sig)
sig2oplist[claz_sig] = ol
del sig2code
method2claz2code = make_method2claz2code(sig2oplist)
del sig2oplist
claz2methods = make_claz2methods(method2claz2code)
do_filtering_clazs(verbose_write, method2claz2code, excluded_class_patterns)
do_filtering_methods(verbose_write, method2claz2code, args.include_ctors, max_method_definition)
claz_method_list, claz_method_count = identify_target_claz_method(method2claz2code,entry_class_patterns)
if args.mode_diagnostic:
sys.stdout.write("classes: %d\n" % len(claz_method_count))
sys.stdout.write("method bodies: %d\n" % sum(claz_method_count.itervalues()))
m2ccount = collections.Counter()
for m, c2c in method2claz2code.iteritems():
m2ccount[m] += len(c2c)
mccounts = sorted(((m, c) for m, c in m2ccount.iteritems()), key=lambda m_c: m_c[1], reverse=True)
sys.stdout.write("method having many definitions:\n")
for m, c in mccounts:
if c < 50: break # for m, c
sys.stdout.write(" %4d %s\n" % (c, m))
return
if debug_wo_leaf_class_dispatch_optimization:
claz2methods = claz2deriving = None
method2claz2precomp = {}
claz_method_tables = pm.ClazMethodTables(claz2methods, claz2deriving, is_untracked_method_call)
for method, c2c in method2claz2code.iteritems():
for claz, ope_list in c2c.iteritems():
# if claz == "org/gjt/sp/jedit/bufferio/BufferSaveRequest" and method == "run:()V":
# assert True
precomp = pm.precompile_code(claz, ope_list,
claz_method_tables=claz_method_tables,
remove_repetition=not args.allow_repetitive_ngram)
method2claz2precomp.setdefault(method, {})[claz] = precomp
del claz_method_tables
del method2claz2code
sys.stdout.write("# --ngram-size=%d\n" % args.ngram_size)
sys.stdout.write("# --max-call-depth=%d\n" % args.max_call_depth)
sys.stdout.write("# --max-method-definition=%d\n" % max_method_definition)
if args.allow_repetitive_ngram:
sys.stdout.write("# --allow-repetitive-ngram\n")
if args.no_branch_ngram:
sys.stdout.write("# --no-branch-ngram\n")
if args.include_ctors:
sys.stdout.write("# --include-ctors\n")
for e in excluded_class_patterns:
sys.stdout.write("# --exclude=%s\n" % e)
for e in entry_class_patterns:
sys.stdout.write("# --entry=%s\n" % e)
sys.stdout.write('\n')
prev_claz = None
for i, (claz, method) in enumerate(claz_method_list):
code = method2claz2precomp[method][claz]
if verbose and claz != prev_claz:
t = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
s = '%s (%d-%d of %d) %s\n' % (t, i+1, i+1 + claz_method_count[claz] - 1, len(claz_method_list), claz)
verbose_write(s.encode('utf-8'))
prev_claz = claz
found_ngrams = gen_code_ngrams(claz, method, method2claz2precomp, args.ngram_size,
max_call_depth=args.max_call_depth, allow_repetitive_ngram=args.allow_repetitive_ngram,
no_branch_ngram=args.no_branch_ngram, no_returning_execution_path=debug_no_returning_execution_path,
use_undigg_method_list=debug_wo_leaf_class_dispatch_optimization,
count_branch_in_surface_level=args.debug_count_branch_in_surface_level)
for ngrams in to_ngram_tuples_iter(found_ngrams):
for ngram in ngrams:
sys.stdout.write(''.join("%s\t%s\t%d\n" % op_loc_dep for op_loc_dep in ngram))
sys.stdout.write('\n')
if __name__ == '__main__':
main(sys.argv)
|
mit
| 5,821,878,241,560,408,000 | 43.960784 | 153 | 0.587638 | false | 3.543552 | false | false | false |
mattclark/osf.io
|
addons/github/models.py
|
1
|
16200
|
# -*- coding: utf-8 -*-
import os
import urlparse
import markupsafe
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from django.db import models
from framework.auth import Auth
from github3 import GitHubError
from osf.models.external import ExternalProvider
from osf.models.files import File, Folder, BaseFileNode
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from website import settings
from addons.base import exceptions
from addons.github import settings as github_settings
from addons.github import utils
from addons.github.api import GitHubClient
from addons.github.exceptions import ApiError, NotFoundError
from addons.github.serializer import GitHubSerializer
from website.util import web_url_for
hook_domain = github_settings.HOOK_DOMAIN or settings.DOMAIN
class GithubFileNode(BaseFileNode):
_provider = 'github'
class GithubFolder(GithubFileNode, Folder):
pass
class GithubFile(GithubFileNode, File):
version_identifier = 'ref'
@property
def _hashes(self):
try:
return {'fileSha': self.history[-1]['extra']['hashes']['git']}
except (IndexError, KeyError):
return None
def touch(self, auth_header, revision=None, ref=None, branch=None, **kwargs):
revision = revision or ref or branch
return super(GithubFile, self).touch(auth_header, revision=revision, **kwargs)
class GitHubProvider(ExternalProvider):
name = 'GitHub'
short_name = 'github'
client_id = github_settings.CLIENT_ID
client_secret = github_settings.CLIENT_SECRET
auth_url_base = github_settings.OAUTH_AUTHORIZE_URL
callback_url = github_settings.OAUTH_ACCESS_TOKEN_URL
default_scopes = github_settings.SCOPE
def handle_callback(self, response):
"""View called when the OAuth flow is completed. Adds a new GitHubUserSettings
record to the user and saves the account info.
"""
client = GitHubClient(
access_token=response['access_token']
)
user_info = client.user()
return {
'provider_id': str(user_info.id),
'profile_url': user_info.html_url,
'display_name': user_info.login
}
class UserSettings(BaseOAuthUserSettings):
"""Stores user-specific github information
"""
oauth_provider = GitHubProvider
serializer = GitHubSerializer
def revoke_remote_oauth_access(self, external_account):
"""Overrides default behavior during external_account deactivation.
Tells GitHub to remove the grant for the OSF associated with this account.
"""
connection = GitHubClient(external_account=external_account)
try:
connection.revoke_token()
except GitHubError:
pass
# Required for importing username from social profile configuration page
# Assumes oldest connected account is primary.
@property
def public_id(self):
gh_accounts = self.owner.external_accounts.filter(provider=self.oauth_provider.short_name)
if gh_accounts:
return gh_accounts[0].display_name
return None
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = GitHubProvider
serializer = GitHubSerializer
user = models.TextField(blank=True, null=True)
repo = models.TextField(blank=True, null=True)
hook_id = models.TextField(blank=True, null=True)
hook_secret = models.TextField(blank=True, null=True)
registration_data = DateTimeAwareJSONField(default=dict, blank=True, null=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
@property
def folder_id(self):
return self.repo or None
@property
def folder_name(self):
if self.complete:
return '{}/{}'.format(self.user, self.repo)
return None
@property
def folder_path(self):
return self.repo or None
@property
def complete(self):
return self.has_auth and self.repo is not None and self.user is not None
def authorize(self, user_settings, save=False):
self.user_settings = user_settings
self.owner.add_log(
action='github_node_authorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=Auth(user_settings.owner),
)
if save:
self.save()
def clear_settings(self):
self.user = None
self.repo = None
self.hook_id = None
self.hook_secret = None
self.registration_data = None
def deauthorize(self, auth=None, log=True):
self.delete_hook(save=False)
self.clear_settings()
if log:
self.owner.add_log(
action='github_node_deauthorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=auth,
)
self.clear_auth()
def delete(self, save=False):
super(NodeSettings, self).delete(save=False)
self.deauthorize(log=False)
if save:
self.save()
@property
def repo_url(self):
if self.user and self.repo:
return 'https://github.com/{0}/{1}/'.format(
self.user, self.repo
)
@property
def short_url(self):
if self.user and self.repo:
return '/'.join([self.user, self.repo])
@property
def is_private(self):
connection = GitHubClient(external_account=self.external_account)
try:
return connection.repo(user=self.user, repo=self.repo).private
except GitHubError:
return
def get_folders(self, **kwargs):
if not self.has_auth:
raise exceptions.InvalidAuthError()
else:
connection = GitHubClient(external_account=self.external_account)
# Since /user/repos excludes organization repos to which the
# current user has push access, we have to make extra requests to
# find them
try:
repo_data = [
{
'addon': 'github',
'kind': 'repo',
'id': repo.id,
'name': repo.name,
'path': os.path.join(repo.owner.login, repo.name)
}
for repo in connection.repos()]
except GitHubError:
repo_data = []
return repo_data
# TODO: Delete me and replace with serialize_settings / Knockout
def to_json(self, user):
ret = super(NodeSettings, self).to_json(user)
user_settings = user.get_addon('github')
ret.update({
'user_has_auth': user_settings and user_settings.has_auth,
'is_registration': self.owner.is_registration,
})
if self.has_auth:
owner = self.user_settings.owner
if owner == user:
ret.update({'repo_names': self.get_folders()})
ret.update({
'node_has_auth': True,
'github_user': self.user or '',
'github_repo': self.repo or '',
'github_repo_full_name': '{0}/{1}'.format(self.user, self.repo) if (self.user and self.repo) else '',
'auth_osf_name': owner.fullname,
'auth_osf_url': owner.url,
'auth_osf_id': owner._id,
'github_user_name': self.external_account.display_name,
'github_user_url': self.external_account.profile_url,
'is_owner': owner == user,
'valid_credentials': GitHubClient(external_account=self.external_account).check_authorization(),
'addons_url': web_url_for('user_addons'),
'files_url': self.owner.web_url_for('collect_file_trees')
})
return ret
def serialize_waterbutler_credentials(self):
if not self.complete or not self.repo:
raise exceptions.AddonError('Addon is not authorized')
return {'token': self.external_account.oauth_key}
def serialize_waterbutler_settings(self):
if not self.complete:
raise exceptions.AddonError('Repo is not configured')
return {
'owner': self.user,
'repo': self.repo,
}
def create_waterbutler_log(self, auth, action, metadata):
path = metadata['path']
url = self.owner.web_url_for('addon_view_or_download_file', path=path, provider='github')
sha, urls = None, {}
try:
sha = metadata['extra']['commit']['sha']
urls = {
'view': '{0}?ref={1}'.format(url, sha),
'download': '{0}?action=download&ref={1}'.format(url, sha)
}
except KeyError:
pass
self.owner.add_log(
'github_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': path,
'urls': urls,
'github': {
'user': self.user,
'repo': self.repo,
'sha': sha,
},
},
)
#############
# Callbacks #
#############
def before_page_load(self, node, user):
"""
:param Node node:
:param User user:
:return str: Alert message
"""
messages = []
# Quit if not contributor
if not node.is_contributor_or_group_member(user):
return messages
# Quit if not configured
if self.user is None or self.repo is None:
return messages
# Quit if no user authorization
if self.user_settings is None:
return messages
connect = GitHubClient(external_account=self.external_account)
try:
repo = connect.repo(self.user, self.repo)
except (ApiError, GitHubError):
return
node_permissions = 'public' if node.is_public else 'private'
repo_permissions = 'private' if repo.private else 'public'
if repo_permissions != node_permissions:
message = (
'Warning: This OSF {category} is {node_perm}, but the GitHub '
'repo {user} / {repo} is {repo_perm}.'.format(
category=markupsafe.escape(node.project_or_component),
node_perm=markupsafe.escape(node_permissions),
repo_perm=markupsafe.escape(repo_permissions),
user=markupsafe.escape(self.user),
repo=markupsafe.escape(self.repo),
)
)
if repo_permissions == 'private':
message += (
' Users can view the contents of this private GitHub '
'repository through this public project.'
)
else:
message += (
' The files in this GitHub repo can be viewed on GitHub '
'<u><a href="https://github.com/{user}/{repo}/">here</a></u>.'
).format(
user=self.user,
repo=self.repo,
)
messages.append(message)
return messages
def before_remove_contributor_message(self, node, removed):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
try:
message = (super(NodeSettings, self).before_remove_contributor_message(node, removed) +
'You can download the contents of this repository before removing '
'this contributor <u><a href="{url}">here</a></u>.'.format(
url=node.api_url + 'github/tarball/'
))
except TypeError:
# super call returned None due to lack of user auth
return None
else:
return message
def after_remove_contributor(self, node, removed, auth=None):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
if self.user_settings and self.user_settings.owner == removed:
# Delete OAuth tokens
self.user_settings = None
self.save()
message = (
u'Because the GitHub add-on for {category} "{title}" was authenticated '
u'by {user}, authentication information has been deleted.'
).format(
category=markupsafe.escape(node.category_display),
title=markupsafe.escape(node.title),
user=markupsafe.escape(removed.fullname)
)
if not auth or auth.user != removed:
url = node.web_url_for('node_setting')
message += (
u' You can re-authenticate on the <u><a href="{url}">Settings</a></u> page.'
).format(url=url)
#
return message
def after_fork(self, node, fork, user, save=True):
"""
:param Node node: Original node
:param Node fork: Forked node
:param User user: User creating fork
:param bool save: Save settings after callback
:return the cloned settings
"""
clone = super(NodeSettings, self).after_fork(
node, fork, user, save=False
)
# Copy authentication if authenticated by forking user
if self.user_settings and self.user_settings.owner == user:
clone.user_settings = self.user_settings
if save:
clone.save()
return clone
def before_make_public(self, node):
try:
is_private = self.is_private
except NotFoundError:
return None
if is_private:
return (
'This {cat} is connected to a private GitHub repository. Users '
'(other than contributors) will not be able to see the '
'contents of this repo unless it is made public on GitHub.'
).format(
cat=node.project_or_component,
)
def after_delete(self, user):
self.deauthorize(Auth(user=user), log=True)
#########
# Hooks #
#########
# TODO: Should Events be added here?
# TODO: Move hook logic to service
def add_hook(self, save=True):
if self.user_settings:
connect = GitHubClient(external_account=self.external_account)
secret = utils.make_hook_secret()
hook = connect.add_hook(
self.user, self.repo,
'web',
{
'url': urlparse.urljoin(
hook_domain,
os.path.join(
self.owner.api_url, 'github', 'hook/'
)
),
'content_type': github_settings.HOOK_CONTENT_TYPE,
'secret': secret,
},
events=github_settings.HOOK_EVENTS,
)
if hook:
self.hook_id = hook.id
self.hook_secret = secret
if save:
self.save()
def delete_hook(self, save=True):
"""
:return bool: Hook was deleted
"""
if self.user_settings and self.hook_id:
connection = GitHubClient(external_account=self.external_account)
try:
response = connection.delete_hook(self.user, self.repo, self.hook_id)
except (GitHubError, NotFoundError):
return False
if response:
self.hook_id = None
if save:
self.save()
return True
return False
|
apache-2.0
| 8,612,263,753,383,808,000 | 32.333333 | 117 | 0.553457 | false | 4.422604 | false | false | false |
rhyolight/hypersearch
|
setup.py
|
1
|
1371
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""TODO"""
from distutils.core import setup
setup(name="hypersearch",
version="0.0.1",
description=("A particle swarm optimization library created by Numenta "
"for hyperparameter optimization."),
author="Numenta",
author_email="[email protected]",
url="http://numenta.org",
packages=["hypersearch"],
)
|
gpl-3.0
| 1,950,163,098,414,486,500 | 36.054054 | 78 | 0.644785 | false | 4.554817 | false | false | false |
tboggs/fretboard
|
fretboard/displays/common.py
|
1
|
1370
|
from __future__ import division, print_function, unicode_literals
class Fret(object):
'''Represents an individual fret on a fretboard.'''
def __init__(self, string, fret, note):
self.string = string
self.number = fret
self.note = note
self.text = None
class FretboardDisplay(object):
'''Base class for fretboard displays.'''
def __init__(self, tuning='E A D G B E', nfrets=19):
'''
ARGUMENTS:
tuning (str):
The string tuning for the display. Should be a space-separated
string of note names.
nfrets (int):
Number of frets in the display.
'''
self.nfrets = nfrets
self.create_tuning(tuning)
self.create_strings()
def create_tuning(self, tuning_str):
from ..notes import Note
names = tuning_str.split()
tuning = [Note(n) for n in names]
# Adjust tone across octaves
for i in range(1, len(tuning)):
tuning[i] = tuning[i - 1] + tuning[i - 1].interval(tuning[i])
self.tuning = tuning
def create_strings(self):
tuning = list(reversed(self.tuning))
self.strings = [[Fret(i + 1, j, tuning[i] + j) for j in range(self.nfrets + 1)]
for i in range(len(tuning))]
|
gpl-3.0
| -8,315,247,631,235,190,000 | 29.444444 | 87 | 0.545985 | false | 3.702703 | false | false | false |
omji/django-tabbed-admin
|
tabbed_admin/templatetags/tabbed_admin_tags.py
|
1
|
1878
|
# -*- coding: utf-8 -*-
from django import template
from django.contrib.admin.helpers import Fieldset
from django.template.loader import render_to_string
from django.core.exceptions import ImproperlyConfigured
register = template.Library()
@register.simple_tag(takes_context=True)
def render_tab_fieldsets_inlines(context, entry):
"""
Render the fieldsets and inlines for a tab.
"""
template = "admin/includes/fieldset.html"
admin_form = context['adminform']
if 'request' not in context:
raise ImproperlyConfigured(
'"request" missing from context. Add django.core.context'
'_processors.request to your'
'TEMPLATE_CONTEXT_PROCESSORS')
request = context['request']
obj = context.get('original', None)
readonly_fields = admin_form.model_admin.get_readonly_fields(request, obj)
inline_matching = {}
if "inline_admin_formsets" in context:
inline_matching = dict((inline.opts.__class__.__name__, inline)
for inline in context["inline_admin_formsets"])
if entry['type'] == 'fieldset':
name = entry['name']
f = Fieldset(
admin_form.form,
name,
readonly_fields=readonly_fields,
model_admin=admin_form.model_admin,
**entry['config']
)
context["fieldset"] = f
return render_to_string(template, context.flatten(), request=request)
elif entry['type'] == 'inline':
try:
inline_admin_formset = inline_matching[entry["name"]]
context["inline_admin_formset"] = inline_admin_formset
return render_to_string(inline_admin_formset.opts.template,
context.flatten(), request=request)
except KeyError: # The user does not have the permission
pass
return ''
|
bsd-3-clause
| 6,588,136,163,030,727,000 | 37.326531 | 78 | 0.620873 | false | 4.327189 | false | false | false |
fredgj/lazy
|
lazy/lazy.py
|
1
|
1420
|
class LazyRef(object):
def __init__(self, fget):
self.fget = fget
self.name = fget.__name__
def __get__(self, instance, cls):
value = self.fget(instance)
instance.__dict__[self.name] = value
return value
class LazyProperty(object):
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
if doc is None and fget is not None and hasattr(fget, '__doc__'):
doc = fget.__doc__
self.fget = fget
self.fset = fset
self.fdel = fdel
self.__doc__ = doc
self.value = None
def __get__(self, instance, cls):
if self.fget is None:
raise AttributeError('unreadable attribute')
if self.value is None:
self.value = self.fget(instance)
return self.value
def __set__(self, instance, value):
if self.fset is None:
raise AttributeError('can\'t set attribute')
self.value = None
return self.fset(instance, value)
def __delete__(self, instance):
if self.fdel is None:
raise AttributeError('can\'t delete attribute')
self.value = None
return self.fdel(instance)
def getter(self, func):
self.fget = func
return self
def setter(self, func):
self.fset = func
return self
def deleter(self, func):
self.fdel = func
return self
|
mit
| -501,561,031,706,150,700 | 26.307692 | 73 | 0.557042 | false | 3.944444 | false | false | false |
frePPLe/frePPLe
|
contrib/odoo/addons_v8/frepple/res_company.py
|
1
|
1239
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openerp.osv import osv
from openerp.osv import fields
class res_company(osv.osv):
_name = 'res.company'
_inherit = 'res.company'
_columns = {
'manufacturing warehouse': fields.many2one('stock.warehouse', 'Manufacturing warehouse', ondelete='set null'),
'calendar': fields.many2one('resource.calendar', 'Calendar', ondelete='set null'),
'cmdline': fields.char('Command line', size=128)
}
_defaults = {
'cmdline': lambda *a: 'frepplectl --env=odoo_read,odoo_write'
}
res_company()
|
agpl-3.0
| -7,058,372,471,046,832,000 | 34.4 | 114 | 0.719935 | false | 3.731928 | false | false | false |
cpcloud/numpy
|
numpy/distutils/command/autodist.py
|
3
|
1650
|
"""This module implements additional tests ala autoconf which can be useful.
"""
from __future__ import division, absolute_import, print_function
# We put them here since they could be easily reused outside numpy.distutils
def check_inline(cmd):
"""Return the inline identifier (may be empty)."""
cmd._check_compiler()
body = """
#ifndef __cplusplus
static %(inline)s int static_func (void)
{
return 0;
}
%(inline)s int nostatic_func (void)
{
return 0;
}
#endif"""
for kw in ['inline', '__inline__', '__inline']:
st = cmd.try_compile(body % {'inline': kw}, None, None)
if st:
return kw
return ''
def check_compiler_gcc4(cmd):
"""Return True if the C compiler is GCC 4.x."""
cmd._check_compiler()
body = """
int
main()
{
#if (! defined __GNUC__) || (__GNUC__ < 4)
#error gcc >= 4 required
#endif
}
"""
return cmd.try_compile(body, None, None)
def check_gcc_function_attribute(cmd, attribute, name):
"""Return True if the given function attribute is supported."""
cmd._check_compiler()
body = """
#pragma GCC diagnostic error "-Wattributes"
#pragma clang diagnostic error "-Wattributes"
int %s %s(void*);
int
main()
{
}
""" % (attribute, name)
return cmd.try_compile(body, None, None) != 0
def check_gcc_variable_attribute(cmd, attribute):
"""Return True if the given variable attribute is supported."""
cmd._check_compiler()
body = """
#pragma GCC diagnostic error "-Wattributes"
#pragma clang diagnostic error "-Wattributes"
int %s foo;
int
main()
{
return 0;
}
""" % (attribute, )
return cmd.try_compile(body, None, None) != 0
|
bsd-3-clause
| 3,202,266,910,410,981,400 | 20.428571 | 76 | 0.638788 | false | 3.416149 | false | false | false |
GeoMop/GeoMop
|
src/LayerEditor/ui/dialogs/layers/split_layer.py
|
1
|
4224
|
"""
Dialog for appending new layer to the end.
"""
import PyQt5.QtWidgets as QtWidgets
import PyQt5.QtGui as QtGui
from .layers_helpers import LayersHelpers
from LayerEditor.leconfig import cfg
import gm_base.icon as icon
class SplitLayerDlg(QtWidgets.QDialog):
def __init__(self, min, max, copy_block, parent=None):
super(SplitLayerDlg, self).__init__(parent)
self.setWindowTitle("Split Layer")
grid = QtWidgets.QGridLayout(self)
d_layer_name = QtWidgets.QLabel("Layer Name:", self)
self.layer_name = QtWidgets.QLineEdit()
self.layer_name.setToolTip("New Layer name (New layer is in the bottom)")
self.have_default_name = True
self.set_default_name()
self.layer_name.textChanged.connect(self.lay_name_changed)
self.image = QtWidgets.QLabel(self)
self.image.setMinimumWidth(self.layer_name.sizeHint().height())
self.image.setPixmap(icon.get_app_icon("sign-check").pixmap(self.layer_name.sizeHint().height()))
self.image.setToolTip('Layer name is unique, everything is fine.')
grid.addWidget(d_layer_name, 0, 0)
grid.addWidget(self.layer_name, 0, 1)
grid.addWidget(self.image, 0, 2)
d_split_type = QtWidgets.QLabel("Split Interface Type:", self)
self.split_type = LayersHelpers.split_type_combo(copy_block)
grid.addWidget(d_split_type, 1, 0)
grid.addWidget(self.split_type, 1, 1)
d_surface = QtWidgets.QLabel("Split in Surface:", self)
grid.addWidget(d_surface, 2, 0)
i = LayersHelpers.add_surface_to_grid(self, grid, 3)
self.validator = QtGui.QDoubleValidator()
self.validator.setBottom(min)
self.validator.setTop(max)
self.elevation.setValidator(self.validator)
self.elevation.setText(str((min+max)/2))
self._tranform_button = QtWidgets.QPushButton("Split", self)
self._tranform_button.clicked.connect(self.accept)
self._cancel_button = QtWidgets.QPushButton("Cancel", self)
self._cancel_button.clicked.connect(self.reject)
button_box = QtWidgets.QDialogButtonBox()
button_box.addButton(self._tranform_button, QtWidgets.QDialogButtonBox.AcceptRole)
button_box.addButton(self._cancel_button, QtWidgets.QDialogButtonBox.RejectRole)
grid.addWidget(button_box, i, 1, 1, 2)
self.setLayout(grid)
@classmethod
def is_unique_layer_name(self, lay_name):
""" Return False in the case of colision with an existing region name."""
for _, layer in cfg.diagram.regions.layers.items():
if lay_name == layer:
return False
return True
def lay_name_changed(self, name):
""" Called when Region Line Edit is changed."""
self.have_default_name = False
if self.is_unique_layer_name(name):
self.image.setPixmap(
icon.get_app_icon("sign-check").pixmap(self.layer_name.sizeHint().height())
)
self.image.setToolTip('Unique name is OK.')
self._tranform_button.setEnabled(True)
else:
self.image.setPixmap(
icon.get_app_icon("warning").pixmap(self.layer_name.sizeHint().height())
)
self.image.setToolTip('Name is not unique!')
self._tranform_button.setEnabled(False)
def set_default_name(self):
""" Set default name if it seems to be default name. """
if self.have_default_name:
lay_id = 0
name = cfg.diagram.regions.layers[0]
while not self.is_unique_layer_name(name):
lay_id += 1
name = "Layer_" + str(lay_id)
self.layer_name.setText(name)
self.have_default_name = True
def accept(self):
"""
Accepts the form if elevation data fields are valid.
:return: None
"""
if LayersHelpers.validate_depth(self.elevation, self.validator, self):
super(SplitLayerDlg, self).accept()
def fill_surface(self, interface):
"""Fill set surface"""
return LayersHelpers.fill_surface(self, interface)
|
gpl-3.0
| 2,672,201,235,913,936,000 | 38.111111 | 105 | 0.624763 | false | 3.885925 | false | false | false |
MartinThoma/hwrt
|
hwrt/features_plugin.py
|
1
|
2007
|
"""Features in development."""
# Core Library modules
import os
import urllib.request
# Local modules
from . import handwritten_data, utils
class Bitmap:
"""n × n grayscale bitmap of the recording."""
normalize = True
def __init__(self, n=28):
self.n = n # Size of the bitmap (n x n)
def __repr__(self):
return ("Bitmap (n=%i)\n") % (self.n)
def __str__(self):
return repr(self)
def get_dimension(self):
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return self.n ** 2
def __call__(self, hwr_obj):
assert isinstance(
hwr_obj, handwritten_data.HandwrittenData
), "handwritten data is not of type HandwrittenData, but of %r" % type(hwr_obj)
x = []
url = "http://localhost/write-math/website/raw-data/"
raw_data_id = hwr_obj.raw_data_id
project_root = utils.get_project_root()
foldername = os.path.jon(project_root, "bitmaps")
f = urllib.request.urlopen(f"{url}{raw_data_id}.svg")
with open("%s%i.svg" % (foldername, raw_data_id), "wb") as imgFile:
imgFile.write(f.read())
command = (
"convert -size 28x28 {folder}{id}.svg -resize {n}x{n} "
"-gravity center -extent {n}x{n} "
"-monochrome {folder}{id}.png"
).format(
id=raw_data_id,
n=self.n,
folder=foldername,
)
os.system(command)
# Third party modules
from PIL import Image
im = Image.open("%s%i.png" % (foldername, raw_data_id))
pix = im.load()
for i in range(28):
for j in range(28):
x.append(pix[i, j])
assert self.get_dimension() == len(
x
), "Dimension of %s should be %i, but was %i" % (
str(self),
self.get_dimension(),
len(x),
)
return x
|
mit
| -5,350,278,061,378,514,000 | 28.072464 | 87 | 0.536391 | false | 3.575758 | false | false | false |
openstack/heat
|
heat/policies/build_info.py
|
1
|
1502
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import versionutils
from oslo_policy import policy
from heat.policies import base
DEPRECATED_REASON = """
The build API now supports system scope and default roles.
"""
POLICY_ROOT = 'build_info:%s'
deprecated_build_info = policy.DeprecatedRule(
name=POLICY_ROOT % 'build_info',
check_str=base.RULE_DENY_STACK_USER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
build_info_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'build_info',
check_str=base.SYSTEM_OR_PROJECT_READER,
scope_types=['system', 'project'],
description='Show build information.',
operations=[
{
'path': '/v1/{tenant_id}/build_info',
'method': 'GET'
}
],
deprecated_rule=deprecated_build_info
)
]
def list_rules():
return build_info_policies
|
apache-2.0
| 7,344,856,664,631,625,000 | 29.04 | 78 | 0.673768 | false | 3.901299 | false | false | false |
levibostian/VSAS
|
VSAS system/VSAS/vsasGUI/EmailScreen.py
|
1
|
4232
|
"""
Test of an Email Screen
Author: Kristen Nielsen [email protected]
Modeled after tkSimpleDialog.py from pythonware.com
"""
from Tkinter import *
import tkMessageBox as MsgBox
from multilistbox import MultiListbox
from emailInputNew import EmailInput
class EmailSettings(Toplevel):
def __init__(self, parent):
Toplevel.__init__(self, parent, height=400, width=700)
#self.pack_propagate(0)
self.transient(parent)
self.title("VSAS - Email Settings")
self._parent = parent
self.adminEmail=""
emailFile = open("vsasGUI/emailTester.txt","r")
self.emailList = emailFile.readlines()
emailFile.close()
body = Frame(self, bg="black")
self._initialFocus = self.body(body)
body.pack_propagate(0)
body.pack(padx=5,pady=5)
self.buttonBox()
self.current=None
self.grab_set()
self.bind("<F1>",self.displayHelp)
if not self._initialFocus:
self._initialFocus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self._initialFocus.focus_set()
self._parent.wait_window(self)
def body(self, master):
# create canvas to hold scrollbar and listbox objects
emailListCanvas = Canvas(master, width=350, height=400)
emailListCanvas.config(scrollregion=emailListCanvas.bbox(ALL))
emailListCanvas.grid(column=0, sticky=W)
# create multiListbox to hold email list
self._emailListbox = MultiListbox(emailListCanvas,
(('Email', 160),("",1)),
command = self.deleteEmail)
for item in self.emailList:
self._emailListbox.insert(END, (item,""))
self._emailListbox.grid(column = 0,columnspan=3, sticky=W)
addButton = Button(emailListCanvas, text="Add",command=self.addEmail)
addButton.grid(row=1,column=0)
deleteButton = Button(emailListCanvas, text="Delete",command=self.deleteEmail)
deleteButton.grid(row=1,column=1)
helpButton = Button(emailListCanvas, text="Help", command = self.displayHelp)
helpButton.grid(row=1,column=2)
#Label(master, text="The administrator email will receive\nall information regarding all alerts",
#fg="green",bg="black").grid(column=1, row=0)
#self.adminEmailDisplay = Label(master, text=self.adminEmail)
#self.adminEmailDisplay.grid(column=1, row=1)
def buttonBox(self):
pass
def addEmail(self):
email = EmailInput(self, title="Add Email").get()
if len(email)>0:
emailFile = open("vsasGUI/emailTester.txt","a")
#emailComposite = email.split(",")
#emailTuple = (emailComposite[0], emailComposite[1])
print email
email = email+"\n"
self.emailList.append(email)
emailFile.write(email)
emailFile.close()
self._emailListbox.insert(END, (email,""))
self.update()
def deleteEmail(self, event=None):
if MsgBox.askyesno("Delete Email?","Are you sure you want to delete selected email?"):
index = self.emailList[eval(self._emailListbox.curselection()[0])]
self.emailList.remove(index)
self._emailListbox.delete(0,END)
emailFile = open("vsasGUI/emailTester.txt","w")
for item in self.emailList:
emailFile.write(item)
self._emailListbox.insert(END, (item,""))
emailFile.close()
def displayHelp(self, event=None):
helpText = open("vsasGUI/EmailScreenHelp.txt","r").read()
MsgBox.showinfo(title="VSAS Email Settings - Help", message=helpText)
def cancel(self, event=None):
if MsgBox.askyesno("Done?",
"All changes have been saved.\nReturn to VSAS Main?"):
self._parent.focus_set()
self.destroy()
|
mit
| 3,505,993,686,349,974,500 | 34.17094 | 105 | 0.587902 | false | 3.925788 | false | false | false |
Lana-B/Pheno4T
|
madanalysis/enumeration/observable_type.py
|
1
|
6721
|
################################################################################
#
# Copyright (C) 2012-2013 Eric Conte, Benjamin Fuks
# The MadAnalysis development team, email: <[email protected]>
#
# This file is part of MadAnalysis 5.
# Official website: <https://launchpad.net/madanalysis5>
#
# MadAnalysis 5 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MadAnalysis 5 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MadAnalysis 5. If not, see <http://www.gnu.org/licenses/>
#
################################################################################
from madanalysis.enumeration.ma5_running_type import MA5RunningType
import math
class ObservableType(object):
# name : accept_particles
values = { 'UNKNOWN' : [False,'','','','',0,0,0,False,False],\
'SQRTS' : [False,'PHYSICS->SqrtS(event.mc())','PHYSICS->SqrtS(event.mc())','','GeV',100,0.,1000., True, False],\
'TET' : [False,'PHYSICS->Transverse->EventTET(event.mc())','PHYSICS->Transverse->EventTET(event.mc())','PHYSICS->Transverse->EventTET(event.rec())','GeV',100,0.,1000., True,False],\
'MET' : [False,'PHYSICS->Transverse->EventMET(event.mc())','PHYSICS->Transverse->EventMET(event.mc())','PHYSICS->Transverse->EventMET(event.rec())','GeV',100,0.,1000., True,False],\
'THT' : [False,'PHYSICS->Transverse->EventTHT(event.mc())','PHYSICS->Transverse->EventTHT(event.mc())','PHYSICS->Transverse->EventTHT(event.rec())','GeV',100,0.,1000., True,False],\
'MHT' : [False,'PHYSICS->Transverse->EventMHT(event.mc())','PHYSICS->Transverse->EventMHT(event.mc())','PHYSICS->Transverse->EventMHT(event.rec())','GeV',100,0.,1000.,True,False],\
'NPID': [False,'NPID','NPID','NPID','',100,0.,100.,False,False],\
'NAPID': [False,'NAPID','NAPID','NAPID','',100,0.,100.,False,False],\
'E' : [True,'e()','e()','e()','GeV',100,0.,1000.,True,True],\
'M' : [True,'m()','m()','m()','GeV/c^{2}',100,0.,1000.,True,True],\
'P' : [True,'p()','p()','p()','GeV/c',100,0.,1000.,True,True],\
'ET' : [True,'et()','et()','et()','GeV',100,0.,1000.,True,True],\
'MT' : [True,'mt()','mt()','mt()','GeV/c^{2}',100,0.,1000.,True,True],\
'PT' : [True,'pt()','pt()','pt()','GeV/c',100,0.,1000.,True,True],\
'PX' : [True,'px()','px()','px()','GeV/c',100,-1000.,1000.,True,True],\
'PY' : [True,'py()','py()','py()','GeV/c',100,-1000.,1000.,True,True],\
'PZ' : [True,'pz()','pz()','pz()','GeV/c',100,-1000.,1000.,True,True],\
'R' : [True,'r()','r()','r()','',100,0.,1000.,True,True],\
'THETA' : [True,'theta()','theta()','theta()','',100,0.,2*math.pi+0.01,True,True],\
'ETA' : [True,'eta()','eta()','eta()','',100,-8.0,+8.0,True,True],\
'PHI' : [True,'phi()','phi()','phi()','',100,0.,2*math.pi+0.01,True,True],\
'Y' : [True,'y()','y()','y()','',100,-8.0,+8.0,True,True],\
'BETA' : [True,'beta()','beta()','beta()','',100,0.,1.,True,True],\
'GAMMA': [True,'gamma()','gamma()','gamma()','',100,1.,1000.,True,True],\
'N' : [True,'N()','N()','N()','',20,0.,20.,True,True],\
'ISOL' : [True,'','','isolated()','',2,0,1,True,False],\
'HE_EE': [True,'','','HEoverEE()','',100,0,100,True,False],\
'NTRACKS': [True,'','','ntracks()','',100,0,100,True,False] }
class __metaclass__(type):
def __getattr__(self, name):
if name in self.values.keys():
return self.values.keys().index(name)
else:
return self.values.keys().index('UNKNOWN')
def accept_particles(self, index):
name = self.values.keys()[index]
return self.values[name][0]
def convert2string(self,index):
return self.values.keys()[index]
def convert2job_string(self,index,level):
name = self.values.keys()[index]
if level==MA5RunningType.PARTON:
return self.values[name][1]
elif level==MA5RunningType.HADRON:
return self.values[name][2]
elif level==MA5RunningType.RECO:
return self.values[name][3]
return ""
def convert2unit(self,index):
name = self.values.keys()[index]
return self.values[name][4]
def convert2nbins(self,index):
name = self.values.keys()[index]
return self.values[name][5]
def convert2xmin(self,index):
name = self.values.keys()[index]
return self.values[name][6]
def convert2xmax(self,index):
name = self.values.keys()[index]
return self.values[name][7]
def isCuttable(self,index):
name = self.values.keys()[index]
return self.values[name][8]
def prefix(self,index):
name = self.values.keys()[index]
return self.values[name][9]
def get_list(self,level=MA5RunningType.PARTON):
output = []
for item in self.values.keys():
x = ObservableType.convert2job_string(self.values.keys().index(item),level)
if x=="":
continue
output.append(item)
if self.values[item][0] and self.values[item][9]:
output.append('s'+item)
output.append('v'+item)
output.append('sd'+item)
output.append('ds'+item)
output.append('d'+item)
output.append('dv'+item)
output.append('vd'+item)
output.append('r'+item)
return output
def get_cutlist1(self,level=MA5RunningType.PARTON):
output = []
for item in self.values.keys():
if item=="N":
output.append(item)
continue
x = ObservableType.convert2job_string(self.values.keys().index(item),level)
if x=="":
continue
if not self.values[item][8]:
continue
if self.values[item][0]:
continue
output.append(item)
return output
def get_cutlist2(self,level=MA5RunningType.PARTON):
output = []
for item in self.values.keys():
x = ObservableType.convert2job_string(self.values.keys().index(item),level)
if item=="N":
continue
if x=="":
continue
if not self.values[item][8]:
continue
if not self.values[item][0]:
continue
output.append(item)
if not self.values[item][9]:
continue
output.append('s'+item)
output.append('v'+item)
output.append('sd'+item)
output.append('ds'+item)
output.append('d'+item)
output.append('dv'+item)
output.append('vd'+item)
output.append('r'+item)
return output
|
gpl-3.0
| 7,029,142,806,390,245,000 | 37.626437 | 190 | 0.586371 | false | 2.84547 | false | false | false |
mganeva/mantid
|
scripts/Muon/GUI/Common/context_example/context_example_widget.py
|
1
|
1841
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
from Muon.GUI.Common.context_example.context_example_view import ContextExampleView
from Muon.GUI.Common.context_example.context_example_presenter import ContextExamplePresenter
from Muon.GUI.Common.context_example.context_example_model import ContextExampleModel
class ContextExampleWidget(object):
"""
An example of how to use the context with a widget class.
The widget class exposes the MVP to the rest of the GUI
"""
def __init__(self, context, parent=None):
model = ContextExampleModel(context)
sub_context = model.getSubContext()
view = ContextExampleView(sub_context, parent)
self._presenter = ContextExamplePresenter(view, model)
@property
def presenter(self):
return self._presenter
@property
def widget(self):
return self._presenter.widget
# interact with context
def setUpdateContext(self, slot):
"""
This function is to set the update
method from the main GUI to the signals
from this GUI
"""
view = self._presenter.widget
view.updateSignal.connect(slot)
def updateContext(self):
self._presenter.updateContext()
def loadFromContext(self):
# extract relevant info from context via model
model = self._presenter.model
sub_context = model.getSubContext()
# update the view with the subcontext
view = self._presenter.widget
view.loadFromContext(sub_context)
|
gpl-3.0
| -1,136,197,275,128,112,800 | 31.875 | 93 | 0.689299 | false | 4.127803 | false | false | false |
cumc-dbmi/pmi_sprint_reporter
|
file_transfer.py
|
1
|
3848
|
from base64 import b64encode
from io import BytesIO
from uuid import uuid4
import requests
import time
import wddx
import settings
TRANSFER_API_URL_FMT = 'https://transfer.nyp.org/seos/1000/%s.api'
TRANSFER_LOGIN_URL = TRANSFER_API_URL_FMT % 'login'
TRANSFER_FIND_URL = TRANSFER_API_URL_FMT % 'find'
TRANSFER_PUT_URL = TRANSFER_API_URL_FMT % 'put'
TRANSFER_SEND_URL = TRANSFER_API_URL_FMT % 'send'
SEND_DELAY_SECONDS = 1.5 # Accellion recommends 5 seconds, ain't nobody got time for that
UPLOAD_TIMEOUT_SECONDS = 60 * 2
def get_tokens():
"""
Retrieve Accellion API tokens
:return:
"""
data = {'auth_type': 'pwd',
'uid': settings.accellion['username'],
'pwd': settings.accellion['password'],
'api_token': 1,
'output': 'json'}
response = requests.post(TRANSFER_LOGIN_URL, data=data)
return response.json()
def parse_response(content):
items = wddx.loads(content)
return items[0]
def upload(filename, file_contents, recipients, mime_type='text/plain', subject=None, message=None, expire_days=21):
"""
Upload a file to the Accellion file system
:param filename: user-friendly filename
:param file_contents: binary data; this supports streamed data to prevent reading into memory
:param recipients: comma-separated list of e-mail addresses
:param subject: subject of e-mail
:param message: body of e-mail
:param mime_type: type of file
:param expire_days: number of days until link expires
:return: details from put and send api calls
"""
tokens = get_tokens()
uid = uuid4().__str__()
file_handle = '%s/files/%s/%s' % (tokens['client_id'], uid, filename)
data = {'token': tokens['put_token'], 'file_handle': file_handle}
put_response = requests.post(TRANSFER_PUT_URL, data=data, files={'file': (filename, file_contents, mime_type)})
put_details = parse_response(put_response.content)
# create e-mail package with links to file (short-url mode)
time.sleep(SEND_DELAY_SECONDS)
meta_file_handle = '%s/files/%s-list' % (tokens['client_id'], uid)
file_handle = put_details['file_handle']
file_size = put_details['file_size']
file_handle_hash = b64encode(file_handle)
file_list = '%s\n|%s\n|%s\n|\n' % (b64encode(filename), file_handle_hash, b64encode(file_size))
data = {'token': tokens['send_token'],
'short_token': 1,
'sender': b64encode(settings.accellion['username']),
'recipients': b64encode(recipients),
'meta_file_handle': meta_file_handle,
'file_list1': file_list,
'link_validity': expire_days,
'email_options1': 'vr'} # only allow the original recipient to download
if subject is not None:
data['subject'] = b64encode(subject)
if message is not None:
data['message'] = b64encode(message)
send_response = requests.post(TRANSFER_SEND_URL, data=data, timeout=UPLOAD_TIMEOUT_SECONDS)
send_details = parse_response(send_response.content)
response_details = dict(put=put_details, send=send_details)
return response_details
def download(url):
"""
Download file from secure file transfer and save it to specified location
:param url: url of file in secure file transfer
:return: file contents as str
"""
tokens = get_tokens()
cookie_value = 'user&%s&cs&%s' % (settings.accellion['username'], tokens['inbox_cs'])
r = requests.get(url, cookies=dict(a1000c1s1=cookie_value))
bs = BytesIO(r.content)
return bs.read()
def inbox():
"""
Retrieve list of e-mail packages
:return:
"""
tokens = get_tokens()
data = dict(token=tokens['inbox_token'], mailbody=1)
inbox_response = requests.post(TRANSFER_FIND_URL, data=data)
return parse_response(inbox_response.content)
|
mit
| -8,684,232,296,754,300,000 | 35.301887 | 116 | 0.661123 | false | 3.451121 | false | false | false |
funkring/fdoo
|
addons-funkring/shop_delivery/__openerp__.py
|
1
|
1296
|
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright (c) 2007 Martin Reisenhofer <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Shop Delivery Default",
"description":"""
Shop Delivery
=============
Define shop delivery defaults
""",
"version" : "1.0",
"author" : "oerp.at",
"website" : "http://oerp.at",
"depends" : ["delivery",
"at_purchase_sale"],
"data" : ["view/shop_view.xml"],
"auto_install" : False,
"installable": True
}
|
agpl-3.0
| -7,352,487,846,455,987,000 | 34.054054 | 78 | 0.574074 | false | 3.95122 | false | false | false |
giraldeau/autovm
|
autovm/helpers.py
|
1
|
3970
|
#!/usr/bin/env python
# coding=utf-8
import os
from os.path import join, dirname, exists
from os import makedirs, utime, unlink, walk
from heapq import heappush, heappop, heappushpop
import platform
import sys
def default_dist():
(a, b, c) = platform.dist()
return c
def default_arch():
mapping = { 'x86_64': 'amd64', 'i386': 'i386' }
arch = platform.machine()
return mapping.get(arch, 'amd64')
class NullProgressMonitor(object):
def __init__(self, msg="progress"):
pass
def update(self, percent):
pass
class CmdProgressMonitor(object):
def __init__(self, msg="progress"):
self.msg = msg
self.width = 10
self.percent = 0.0
def update(self, percent):
if (percent - self.percent < 0.001):
return
self.percent = percent
ticks = ((int(percent * 100) + 5) / self.width)
blank = self.width - ticks
sys.stdout.write("%s [%s%s] %.1f%%\r" % (self.msg, '#' * ticks, ' ' * blank, self.percent * 100))
sys.stdout.flush()
null_progress = NullProgressMonitor()
def copyfileobj_progress(fsrc, fdst, size, length=16*1024, progress=null_progress):
"""copy data from file-like object fsrc to file-like object fdst"""
sum = 0.0
if size == 0:
size = 1
while 1:
progress.update(sum / size)
buf = fsrc.read(length)
sum += len(buf)
if not buf:
break
fdst.write(buf)
progress.update(1.0)
# http://stackoverflow.com/questions/12654772/create-empty-file-using-python
def touch(path):
d = dirname(path)
if not exists(d):
makedirs(d)
with open(path, 'a'):
utime(path, None)
class AbstractWalkerVisitor(object):
def visit_file(self, root, name):
pass
def visit_dir(self, root, name):
pass
class PrintWalkerVisitor(AbstractWalkerVisitor):
def visit_file(self, root, name):
print "f %s" % repr(join(root, name))
def visit_dir(self, root, name):
print "d %s" % repr(join(root, name))
class EntriesWalkerVisitor(AbstractWalkerVisitor):
def __init__(self):
self.entries = []
def visit_file(self, root, name):
self.entries.append(join(root, name))
class CountWalkerVisitor(AbstractWalkerVisitor):
def __init__(self):
self.files = 0
self.directories = 0
def visit_file(self, root, name):
self.files += 1
def visit_dir(self, root, name):
self.directories += 1
class FileEntry(object):
def __init__(self, path):
self.path = path
self.st = os.stat(path)
def __cmp__(self, other):
if (self.st.st_mtime < other.st.st_mtime):
return 1
elif (self.st.st_mtime == other.st.st_mtime):
return 0
return -1
def __repr__(self):
return "%s %s" % (str(self.st.st_mtime), self.path)
class LRUWalkerVisitor(AbstractWalkerVisitor):
'make the list of least used files'
def __init__(self, max_item=100):
self.heap = []
self.max_item = max_item
def visit_file(self, root, name):
item = FileEntry(join(root, name))
if len(self.heap) < self.max_item:
heappush(self.heap, item)
else:
heappushpop(self.heap, item)
def get_entries(self):
return [heappop(self.heap) for i in range(len(self.heap))]
class DeleteWalkerVisitor(AbstractWalkerVisitor):
def visit_file(self, root, name):
unlink(join(root, name))
def visit_dir(self, root, name):
unlink(join(root, name))
class Walker(object):
'Scan directory and feed visitor'
def process(self, path, *visitor):
for root, dirs, files in walk(path, topdown=False):
for name in files:
for v in visitor:
v.visit_file(root, name)
for name in dirs:
for v in visitor:
v.visit_dir(root, name)
|
gpl-3.0
| -6,377,656,115,338,019,000 | 28.857143 | 106 | 0.589673 | false | 3.491645 | false | false | false |
endlessm/chromium-browser
|
chrome/android/java/src/PRESUBMIT.py
|
1
|
8949
|
# Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Android Java code.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
This presubmit checks for the following:
- No new calls to Notification.Builder or NotificationCompat.Builder
constructors. Callers should use ChromeNotificationBuilder instead.
- No new calls to AlertDialog.Builder. Callers should use ModalDialogView
instead.
"""
import re
NEW_NOTIFICATION_BUILDER_RE = re.compile(
r'\bnew\sNotification(Compat)?\.Builder\b')
IMPORT_APP_COMPAT_ALERTDIALOG_RE = re.compile(
r'\bimport\sandroid\.support\.v7\.app\.AlertDialog;')
NEW_COMPATIBLE_ALERTDIALOG_BUILDER_RE = re.compile(
r'\bnew\s+(UiUtils\s*\.)?CompatibleAlertDialogBuilder\b')
NEW_ALERTDIALOG_BUILDER_RE = re.compile(
r'\bnew\sAlertDialog\.Builder\b')
COMMENT_RE = re.compile(r'^\s*(//|/\*|\*)')
BROWSER_ROOT = 'chrome/android/java/src/org/chromium/chrome/browser/'
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
result = []
result.extend(_CheckNotificationConstructors(input_api, output_api))
result.extend(_CheckAlertDialogBuilder(input_api, output_api))
result.extend(_CheckCompatibleAlertDialogBuilder(input_api, output_api))
# Add more checks here
return result
def _CheckNotificationConstructors(input_api, output_api):
# "Blacklist" because the following files are excluded from the check.
blacklist = (
'chrome/android/java/src/org/chromium/chrome/browser/notifications/'
'NotificationBuilder.java',
'chrome/android/java/src/org/chromium/chrome/browser/notifications/'
'NotificationCompatBuilder.java'
)
error_msg = '''
Android Notification Construction Check failed:
Your new code added one or more calls to the Notification.Builder and/or
NotificationCompat.Builder constructors, listed below.
This is banned, please construct notifications using
NotificationBuilderFactory.createChromeNotificationBuilder instead,
specifying a channel for use on Android O.
See https://crbug.com/678670 for more information.
'''
return _CheckReIgnoreComment(input_api, output_api, error_msg, blacklist,
NEW_NOTIFICATION_BUILDER_RE)
def _CheckAlertDialogBuilder(input_api, output_api):
# "Blacklist" because the following files are excluded from the check. In
# general, preference and FRE related UIs are not relevant to VR mode.
blacklist = (
BROWSER_ROOT + 'browserservices/ClearDataDialogActivity.java',
BROWSER_ROOT + 'browsing_data/ConfirmImportantSitesDialogFragment.java',
BROWSER_ROOT + 'browsing_data/OtherFormsOfHistoryDialogFragment.java',
BROWSER_ROOT + 'datareduction/settings/DataReductionStatsPreference.java',
BROWSER_ROOT + 'password_manager/AccountChooserDialog.java',
BROWSER_ROOT + 'password_manager/AutoSigninFirstRunDialog.java',
BROWSER_ROOT + r'settings[\\\/].*',
BROWSER_ROOT + 'signin/AccountPickerDialogFragment.java',
BROWSER_ROOT + 'signin/AccountSigninView.java',
BROWSER_ROOT + 'signin/ConfirmImportSyncDataDialog.java',
BROWSER_ROOT + 'signin/ConfirmManagedSyncDataDialog.java',
BROWSER_ROOT + 'signin/ConfirmSyncDataStateMachineDelegate.java',
BROWSER_ROOT + 'signin/SigninFragmentBase.java',
BROWSER_ROOT + 'signin/SignOutDialogFragment.java',
BROWSER_ROOT + 'site_settings/AddExceptionPreference.java',
BROWSER_ROOT + 'site_settings/ChosenObjectSettings.java',
BROWSER_ROOT + 'site_settings/ManageSpaceActivity.java',
BROWSER_ROOT + 'site_settings/ManageSpaceActivity.java',
BROWSER_ROOT + 'site_settings/SingleCategorySettings.java',
BROWSER_ROOT + 'site_settings/SingleWebsiteSettings.java',
BROWSER_ROOT + 'sync/settings/ManageSyncSettings.java',
BROWSER_ROOT + 'sync/settings/SyncAndServicesSettings.java',
BROWSER_ROOT + 'sync/ui/PassphraseCreationDialogFragment.java',
BROWSER_ROOT + 'sync/ui/PassphraseDialogFragment.java',
BROWSER_ROOT + 'sync/ui/PassphraseTypeDialogFragment.java',
)
error_msg = '''
AlertDialog.Builder Check failed:
Your new code added one or more calls to the AlertDialog.Builder, listed
below.
We recommend you use ModalDialogProperties to show a dialog whenever possible
to support VR mode. You could only keep the AlertDialog if you are certain
that your new AlertDialog is not used in VR mode (e.g. pereference, FRE)
If you are in doubt, contact
//src/chrome/android/java/src/org/chromium/chrome/browser/vr/VR_JAVA_OWNERS
'''
error_files = []
result = _CheckReIgnoreComment(input_api, output_api, error_msg, blacklist,
NEW_ALERTDIALOG_BUILDER_RE, error_files)
wrong_builder_errors = []
wrong_builder_error_msg = '''
Android Use of AppCompat AlertDialog.Builder Check failed:
Your new code added one or more calls to the AppCompat AlertDialog.Builder,
file listed below.
If you are keeping the new AppCompat AlertDialog.Builder, please use
CompatibleAlertDialogBuilder instead to work around support library issues.
See https://crbug.com/966101 for more information.
'''
for f in error_files:
contents = input_api.ReadFile(f)
if IMPORT_APP_COMPAT_ALERTDIALOG_RE.search(contents):
wrong_builder_errors.append(' %s' % (f.LocalPath()))
if wrong_builder_errors:
result.extend([output_api.PresubmitError(
wrong_builder_error_msg, wrong_builder_errors)])
return result
def _CheckCompatibleAlertDialogBuilder(input_api, output_api):
# "Blacklist" because the following files are excluded from the check.
blacklist = (
BROWSER_ROOT + 'LoginPrompt.java',
BROWSER_ROOT + 'SSLClientCertificateRequest.java',
BROWSER_ROOT + 'autofill/AutofillPopupBridge.java',
BROWSER_ROOT + 'autofill/keyboard_accessory/'
'AutofillKeyboardAccessoryBridge.java',
BROWSER_ROOT + 'dom_distiller/DistilledPagePrefsView.java',
BROWSER_ROOT + 'dom_distiller/DomDistillerUIUtils.java',
BROWSER_ROOT + 'download/DownloadController.java',
BROWSER_ROOT + 'download/OMADownloadHandler.java',
BROWSER_ROOT + 'externalnav/ExternalNavigationDelegateImpl.java',
BROWSER_ROOT + 'payments/AndroidPaymentApp.java',
BROWSER_ROOT + 'permissions/AndroidPermissionRequester.java',
BROWSER_ROOT + 'share/ShareDelegateImpl.java',
BROWSER_ROOT + 'util/AccessibilityUtil.java',
BROWSER_ROOT + 'webapps/AddToHomescreenDialog.java',
BROWSER_ROOT + 'webapps/WebappOfflineDialog.java',
)
error_msg = '''
Android Use of CompatibleAlertDialogBuilder Check failed:
Your new code added one or more calls to the CompatibleAlertDialogBuilder
constructors, listed below.
We recommend you use ModalDialogProperties to show a dialog whenever possible
to support VR mode. You could only keep the AlertDialog if you are certain
that your new AlertDialog is not used in VR mode (e.g. pereference, FRE)
If you are in doubt, contact
//src/chrome/android/java/src/org/chromium/chrome/browser/vr/VR_JAVA_OWNERS
'''
return _CheckReIgnoreComment(input_api, output_api, error_msg, blacklist,
NEW_COMPATIBLE_ALERTDIALOG_BUILDER_RE)
def _CheckReIgnoreComment(input_api, output_api, error_msg, blacklist,
regular_expression, error_files=None):
def CheckLine(current_file, line_number, line, problems, error_files):
"""Returns a boolean whether the line contains an error."""
if (regular_expression.search(line) and not COMMENT_RE.search(line)):
if error_files is not None:
error_files.append(current_file)
problems.append(
' %s:%d\n \t%s' %
(current_file.LocalPath(), line_number, line.strip()))
return True
return False
problems = []
sources = lambda x: input_api.FilterSourceFile(
x, white_list=(r'.*\.java$',), black_list=blacklist)
for f in input_api.AffectedFiles(include_deletes=False,
file_filter=sources):
previous_line = ''
for line_number, line in f.ChangedContents():
if not CheckLine(f, line_number, line, problems, error_files):
if previous_line:
two_lines = '\n'.join([previous_line, line])
CheckLine(f, line_number, two_lines, problems, error_files)
previous_line = line
else:
previous_line = ''
if problems:
return [output_api.PresubmitError(error_msg, problems)]
return []
|
bsd-3-clause
| -43,752,571,225,410,950 | 41.614286 | 80 | 0.721086 | false | 3.66162 | false | false | false |
Uli1/mapnik
|
scons/scons-local-2.4.0/SCons/Tool/ifort.py
|
1
|
3327
|
"""SCons.Tool.ifort
Tool-specific initialization for newer versions of the Intel Fortran Compiler
for Linux/Windows (and possibly Mac OS X).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ifort.py rel_2.4.0:3365:9259ea1c13d7 2015/09/21 14:03:43 bdbaddog"
import SCons.Defaults
from SCons.Scanner.Fortran import FortranScan
from FortranCommon import add_all_to_env
def generate(env):
"""Add Builders and construction variables for ifort to an Environment."""
# ifort supports Fortran 90 and Fortran 95
# Additionally, ifort recognizes more file extensions.
fscan = FortranScan("FORTRANPATH")
SCons.Tool.SourceFileScanner.add_scanner('.i', fscan)
SCons.Tool.SourceFileScanner.add_scanner('.i90', fscan)
if 'FORTRANFILESUFFIXES' not in env:
env['FORTRANFILESUFFIXES'] = ['.i']
else:
env['FORTRANFILESUFFIXES'].append('.i')
if 'F90FILESUFFIXES' not in env:
env['F90FILESUFFIXES'] = ['.i90']
else:
env['F90FILESUFFIXES'].append('.i90')
add_all_to_env(env)
fc = 'ifort'
for dialect in ['F77', 'F90', 'FORTRAN', 'F95']:
env['%s' % dialect] = fc
env['SH%s' % dialect] = '$%s' % dialect
if env['PLATFORM'] == 'posix':
env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS -fPIC' % dialect)
if env['PLATFORM'] == 'win32':
# On Windows, the ifort compiler specifies the object on the
# command line with -object:, not -o. Massage the necessary
# command-line construction variables.
for dialect in ['F77', 'F90', 'FORTRAN', 'F95']:
for var in ['%sCOM' % dialect, '%sPPCOM' % dialect,
'SH%sCOM' % dialect, 'SH%sPPCOM' % dialect]:
env[var] = env[var].replace('-o $TARGET', '-object:$TARGET')
env['FORTRANMODDIRPREFIX'] = "/module:"
else:
env['FORTRANMODDIRPREFIX'] = "-module "
def exists(env):
return env.Detect('ifort')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
lgpl-2.1
| -6,600,932,587,542,999,000 | 36.806818 | 104 | 0.685903 | false | 3.676243 | false | false | false |
allenai/allennlp
|
allennlp/training/trainer.py
|
1
|
56811
|
import datetime
import logging
import math
import os
import re
import time
import traceback
from contextlib import contextmanager
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, Union
from allennlp.common.util import int_to_device
import torch
import torch.distributed as dist
from torch.cuda import amp
import torch.optim.lr_scheduler
from torch.nn.parallel import DistributedDataParallel
from torch.nn.utils import clip_grad_norm_
from allennlp.common import Lazy, Registrable, Tqdm
from allennlp.common import util as common_util
from allennlp.common.checks import ConfigurationError, check_for_gpu
from allennlp.data import DataLoader
from allennlp.data.dataloader import TensorDict
from allennlp.models.model import Model
from allennlp.nn import util as nn_util
from allennlp.training import util as training_util
from allennlp.training.checkpointer import Checkpointer
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.metric_tracker import MetricTracker
from allennlp.training.momentum_schedulers import MomentumScheduler
from allennlp.training.moving_average import MovingAverage
from allennlp.training.optimizers import Optimizer
from allennlp.training.tensorboard_writer import TensorboardWriter
logger = logging.getLogger(__name__)
class Trainer(Registrable):
"""
The base class for an AllenNLP trainer. It can do pretty much
anything you want. Your subclass should implement `train`
and also probably `from_params`.
"""
default_implementation = "gradient_descent"
def __init__(
self,
serialization_dir: str = None,
cuda_device: Optional[Union[int, torch.device]] = None,
distributed: bool = False,
local_rank: int = 0,
world_size: int = 1,
) -> None:
if cuda_device is None:
from torch import cuda
if cuda.device_count() > 0:
cuda_device = 0
else:
cuda_device = -1
check_for_gpu(cuda_device)
self._serialization_dir = serialization_dir
if isinstance(cuda_device, list):
raise ConfigurationError(
"In allennlp 1.0, the Trainer can only be assigned a single `cuda_device`. "
"Instead, we use torch's DistributedDataParallel at the command level, meaning "
"our Trainer always uses a single GPU per process."
)
if distributed and world_size <= 1:
raise ConfigurationError(
"Distributed training can be performed only with more than 1 device. Check "
"`cuda_device` key in the experiment configuration."
)
self.cuda_device = int_to_device(cuda_device)
self._distributed = distributed
self._rank = local_rank
self._master = self._rank == 0
self._world_size = world_size
def train(self) -> Dict[str, Any]:
"""
Train a model and return the results.
"""
raise NotImplementedError
@contextmanager
def get_checkpoint_state(self) -> Iterator[Tuple[Dict[str, Any], Dict[str, Any]]]:
"""
Returns a tuple of (model state, training state), where training state could have several
internal components (e.g., for an, optimizer, learning rate scheduler, etc.).
This is a context manager, and should be called as `with trainer.get_checkpoint_state() as
state:`, so that the trainer has the opportunity to change and restore its internal state
for checkpointing. This is used, e.g., for moving averages of model weights.
"""
raise NotImplementedError
class BatchCallback(Registrable):
"""
An optional callback that you can pass to the `GradientDescentTrainer` that will be called at
the end of every batch, during both training and validation. The default implementation
does nothing. You can implement your own callback and do whatever you want, such as saving
predictions to disk or extra logging.
"""
def __call__(
self,
trainer: "GradientDescentTrainer",
batch_inputs: List[List[TensorDict]],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_master: bool,
) -> None:
pass
@BatchCallback.register("tensorboard-memory-usage")
class TensoboardBatchMemoryUsage(BatchCallback):
"""
Logs the CPU and GPU memory usage to tensorboard on every batch.
This is mainly used for debugging as it can cause a significant slowdown in training.
"""
def __call__(
self,
trainer: "GradientDescentTrainer",
batch_inputs: List[List[TensorDict]],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_master: bool,
) -> None:
# In the distributed case we need to call this from every worker, since every
# worker reports its own memory usage.
cpu_memory_usage = common_util.peak_cpu_memory()
gpu_memory_usage = common_util.peak_gpu_memory()
# But we only want to log from the master process.
if is_master:
trainer._tensorboard.log_memory_usage(cpu_memory_usage, gpu_memory_usage)
BatchCallback.register("null")(BatchCallback)
class EpochCallback(Registrable):
"""
An optional callback that you can pass to the `GradientDescentTrainer` that will be called at
the end of every epoch (and before the start of training, with `epoch=-1`). The default
implementation does nothing. You can implement your own callback and do whatever you want, such
as additional modifications of the trainer's state in between epochs.
"""
def __call__(
self,
trainer: "GradientDescentTrainer",
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
) -> None:
pass
EpochCallback.register("null")(EpochCallback)
@EpochCallback.register("track_epoch_callback")
class TrackEpochCallback:
"""
A callback that you can pass to the `GradientDescentTrainer` to access the current epoch number
in your model during training. This callback sets `model.epoch`, which can be read inside of
`model.forward()`. Since the EpochCallback passes `epoch=-1`
at the start of the training, we set `model.epoch = epoch + 1` which now denotes the number of
completed epochs at a given training state.
"""
def __init__(self):
super().__init__()
def __call__(
self,
trainer: "GradientDescentTrainer",
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
) -> None:
trainer.model.epoch = epoch + 1
_BasicCallback = Union[BatchCallback, EpochCallback]
class _TrainerCallbackMeta(type):
def __new__(cls, name, bases, dct):
"""
Add subclasses that wrap the `TrainerCallback` into other interfaces.
"""
subtype = super().__new__(cls, name, bases, dct)
# These subtypes wrap the `TrainerCallback` into the `_BasicCallback` interfaces.
subtype.Batch = cls._make_callback_type(BatchCallback, subtype.on_batch)
subtype.Epoch = cls._make_callback_type(EpochCallback, subtype.on_epoch)
subtype.End = cls._make_callback_type(EpochCallback, subtype.on_end)
return subtype
@classmethod
def _make_callback_type(
cls,
call_type: Type[_BasicCallback],
call: Callable[[], None],
) -> Type[_BasicCallback]: # type: ignore
class _Wrapper(call_type): # type: ignore
def __init__(self, trainer_callback: "TrainerCallback"):
self.trainer_callback = trainer_callback
def __call__(self, trainer: "GradientDescentTrainer", *args, **kwargs):
call(self.trainer_callback, trainer, *args, **kwargs) # type: ignore
return _Wrapper
class TrainerCallback(Registrable, metaclass=_TrainerCallbackMeta):
"""
A general callback object that wraps all three types of callbacks into one.
Rather than a `__call__` method, this class has `on_batch`, `on_epoch`, and `on_end` methods, corresponding to
each callback type. Each one receives the state of the wrapper object as `self`. This enables easier state
sharing between related callbacks.
Under the hood, this is a metaclass that creates wrapping subclasses each time a subclass is created.
"""
def on_batch(
self,
trainer: "GradientDescentTrainer",
batch_inputs: List[List[TensorDict]],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_master: bool,
) -> None:
"""
This callback hook is called after the end of each batch. This is equivalent to `BatchCallback`.
"""
pass
def on_epoch(
self,
trainer: "GradientDescentTrainer",
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
) -> None:
"""
This callback hook is called after the end of each epoch. This is equivalent to `EpochCallback`.
"""
pass
def on_end(
self,
trainer: "GradientDescentTrainer",
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
) -> None:
"""
This callback hook is called after the final training epoch. The `epoch` is passed as an argument.
"""
pass
def batch(self):
"""
Construct a `BatchCallback` wrapper for this `TrainCallback`.
The `cls.Batch` type is created by the metaclass.
"""
return self.Batch(self)
def epoch(self):
"""
Construct an `EpochCallback` wrapper for this instance.
The `cls.Epoch` type is created by the metaclass.
"""
return self.Epoch(self)
def end(self):
"""
Construct an `EpochCallback` wrapping the `on_end` end-of-training hook.
The `cls.End` type is created by the metaclass.
"""
return self.End(self)
TrainerCallback.register("null")(TrainerCallback)
@Trainer.register("gradient_descent", constructor="from_partial_objects")
class GradientDescentTrainer(Trainer):
"""
A trainer for doing supervised learning with gradient descent. It just takes a labeled dataset
and a `DataLoader`, and uses the supplied `Optimizer` to learn the weights for your model over
some fixed number of epochs. You can also pass in a validation dataloader and enable early
stopping. There are many other bells and whistles as well.
Registered as a `Trainer` with the name "gradient_descent" (and is also the default `Trainer`).
The constructor that is registered is `from_partial_objects` - see the arguments to that
function for the exact keys that should be used, if you are using a configuration file. They
largely match the arguments to `__init__`, and we don't repeat their docstrings in
`from_partial_objects`.
[0]: https://tinyurl.com/y5mv44fw
# Parameters
model : `Model`, required.
An AllenNLP model to be optimized. Pytorch Modules can also be optimized if
their `forward` method returns a dictionary with a "loss" key, containing a
scalar tensor representing the loss function to be optimized.
If you are training your model using GPUs, your model should already be
on the correct device. (If you are using our `train` command this will be
handled for you.)
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
optimizer : `torch.nn.Optimizer`, required.
An instance of a Pytorch Optimizer, instantiated with the parameters of the
model to be optimized.
data_loader : `DataLoader`, required.
A `DataLoader` containing your `Dataset`, yielding padded indexed batches.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
patience : `Optional[int] > 0`, optional (default=`None`)
Number of epochs to be patient before early stopping: the training is stopped
after `patience` epochs with no improvement. If given, it must be `> 0`.
If None, early stopping is disabled.
validation_metric : `str`, optional (default=`"-loss"`)
Validation metric to measure for whether to stop training using patience
and whether to serialize an `is_best` model each epoch. The metric name
must be prepended with either "+" or "-", which specifies whether the metric
is an increasing or decreasing function.
validation_data_loader : `DataLoader`, optional (default=`None`)
A `DataLoader` to use for the validation set. If `None`, then
use the training `DataLoader` with the validation data.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
num_epochs : `int`, optional (default = `20`)
Number of training epochs.
serialization_dir : `str`, optional (default=`None`)
Path to directory for saving and loading model files. Models will not be saved if
this parameter is not passed.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
checkpointer : `Checkpointer`, optional (default=`None`)
A `Checkpointer` is responsible for periodically saving model weights. If none is given
here, we will construct one with default parameters.
cuda_device : `int`, optional (default = `-1`)
An integer specifying the CUDA device(s) to use for this process. If -1, the CPU is used.
Data parallelism is controlled at the allennlp train level, so each trainer will have a single
GPU.
grad_norm : `float`, optional, (default = `None`).
If provided, gradient norms will be rescaled to have a maximum of this value.
grad_clipping : `float`, optional (default = `None`).
If provided, gradients will be clipped `during the backward pass` to have an (absolute)
maximum of this value. If you are getting `NaNs` in your gradients during training
that are not solved by using `grad_norm`, you may need this.
learning_rate_scheduler : `LearningRateScheduler`, optional (default = `None`)
If specified, the learning rate will be decayed with respect to
this schedule at the end of each epoch (or batch, if the scheduler implements
the `step_batch` method). If you use `torch.optim.lr_scheduler.ReduceLROnPlateau`,
this will use the `validation_metric` provided to determine if learning has plateaued.
To support updating the learning rate on every batch, this can optionally implement
`step_batch(batch_num_total)` which updates the learning rate given the batch number.
momentum_scheduler : `MomentumScheduler`, optional (default = `None`)
If specified, the momentum will be updated at the end of each batch or epoch
according to the schedule.
tensorboard_writer : `TensorboardWriter`, optional
If this is not provided, we will construct a `TensorboardWriter` with default
parameters and use that.
moving_average : `MovingAverage`, optional, (default = `None`)
If provided, we will maintain moving averages for all parameters. During training, we
employ a shadow variable for each parameter, which maintains the moving average. During
evaluation, we backup the original parameters and assign the moving averages to corresponding
parameters. Be careful that when saving the checkpoint, we will save the moving averages of
parameters. This is necessary because we want the saved model to perform as well as the validated
model if we load it later. But this may cause problems if you restart the training from checkpoint.
batch_callbacks : `List[BatchCallback]`, optional (default = `None`)
A list of callbacks that will be called at the end of every batch, during both train and
validation.
epoch_callbacks : `List[EpochCallback]`, optional (default = `None`)
A list of callbacks that will be called at the end of every epoch, and at the start of
training (with epoch = -1).
end_callbacks : `List[EpochCallback]`, optional (default = `None`)
A list of callbacks that will be called after the final epoch at the end of training. The type of the
callbacks is the same as `epoch_callbacks`.
trainer_callbacks : `List[TrainerCallback]`, optional (default = `None`)
A list of callbacks that will be called at each batch, epoch, and at the start and end of training.
distributed : `bool`, optional, (default = `False`)
If set, PyTorch's `DistributedDataParallel` is used to train the model in multiple GPUs. This also
requires `world_size` to be greater than 1.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately (you need a top-level "distributed" key, next to
the "trainer" entry, that specifies a list of "cuda_devices").
local_rank : `int`, optional, (default = `0`)
This is the unique identifier of the `Trainer` in a distributed process group. The GPU device id is
used as the rank.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
world_size : `int`, (default = `1`)
The number of `Trainer` workers participating in the distributed training.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
num_gradient_accumulation_steps : `int`, optional, (default = `1`)
Gradients are accumulated for the given number of steps before doing an optimizer step. This can
be useful to accommodate batches that are larger than the RAM size. Refer [Thomas Wolf's
post][0] for details on Gradient Accumulation.
use_amp : `bool`, optional, (default = `False`)
If `True`, we'll train using [Automatic Mixed Precision](https://pytorch.org/docs/stable/amp.html).
"""
def __init__(
self,
model: Model,
optimizer: torch.optim.Optimizer,
data_loader: DataLoader,
patience: Optional[int] = None,
validation_metric: str = "-loss",
validation_data_loader: DataLoader = None,
num_epochs: int = 20,
serialization_dir: Optional[str] = None,
checkpointer: Checkpointer = None,
cuda_device: Optional[Union[int, torch.device]] = None,
grad_norm: Optional[float] = None,
grad_clipping: Optional[float] = None,
learning_rate_scheduler: Optional[LearningRateScheduler] = None,
momentum_scheduler: Optional[MomentumScheduler] = None,
tensorboard_writer: TensorboardWriter = None,
moving_average: Optional[MovingAverage] = None,
batch_callbacks: List[BatchCallback] = None,
epoch_callbacks: List[EpochCallback] = None,
end_callbacks: List[EpochCallback] = None,
trainer_callbacks: List[TrainerCallback] = None,
distributed: bool = False,
local_rank: int = 0,
world_size: int = 1,
num_gradient_accumulation_steps: int = 1,
use_amp: bool = False,
) -> None:
super().__init__(serialization_dir, cuda_device, distributed, local_rank, world_size)
# I am not calling move_to_gpu here, because if the model is
# not already on the GPU then the optimizer is going to be wrong.
self.model = model
self.data_loader = data_loader
self._validation_data_loader = validation_data_loader
self.optimizer = optimizer
if patience is None: # no early stopping
if validation_data_loader is not None:
logger.warning(
"You provided a validation dataset but patience was set to None, "
"meaning that early stopping is disabled"
)
elif (not isinstance(patience, int)) or patience <= 0:
raise ConfigurationError(
'{} is an invalid value for "patience": it must be a positive integer '
"or None (if you want to disable early stopping)".format(patience)
)
# For tracking is_best_so_far and should_stop_early
self._metric_tracker = MetricTracker(patience, validation_metric)
# Get rid of + or -
self._validation_metric = validation_metric[1:]
self._num_epochs = num_epochs
self._checkpointer: Optional[Checkpointer] = checkpointer
if checkpointer is None and serialization_dir is not None:
self._checkpointer = Checkpointer(serialization_dir)
self._grad_norm = grad_norm
self._grad_clipping = grad_clipping
self._learning_rate_scheduler = learning_rate_scheduler
self._momentum_scheduler = momentum_scheduler
self._moving_average = moving_average
self._batch_callbacks = batch_callbacks or []
self._epoch_callbacks = epoch_callbacks or []
self._end_callbacks = end_callbacks or []
for callback in trainer_callbacks or []:
self._batch_callbacks.append(callback.batch())
self._epoch_callbacks.append(callback.epoch())
self._end_callbacks.append(callback.end())
# We keep the total batch number as an instance variable because it
# is used inside a closure for the hook which logs activations in
# `_enable_activation_logging`.
self._batch_num_total = 0
self._tensorboard = tensorboard_writer or TensorboardWriter(serialization_dir)
self._tensorboard.get_batch_num_total = lambda: self._batch_num_total
self._tensorboard.enable_activation_logging(self.model)
self._last_log = 0.0 # time of last logging
self._num_gradient_accumulation_steps = num_gradient_accumulation_steps
# Enable automatic mixed precision training.
self._scaler: Optional[amp.GradScaler] = None
self._use_amp = use_amp
if self._use_amp:
if self.cuda_device == torch.device("cpu"):
raise ValueError("Using AMP requires a cuda device")
self._scaler = amp.GradScaler()
# Using `DistributedDataParallel`(ddp) brings in a quirk wrt AllenNLP's `Model` interface and its
# usage. A `Model` object is wrapped by `ddp`, but assigning the wrapped model to `self.model`
# will break the usages such as `Model.get_regularization_penalty`, `Model.get_metrics`, etc.
#
# Hence a reference to Pytorch's object is maintained in the case of distributed training and in the
# normal case, reference to `Model` is retained. This reference is only used in
# these places: `model.__call__`, `model.train` and `model.eval`.
if self._distributed:
self._pytorch_model = DistributedDataParallel(
self.model,
device_ids=None if self.cuda_device == torch.device("cpu") else [self.cuda_device],
find_unused_parameters=True,
)
else:
self._pytorch_model = self.model
def rescale_gradients(self) -> float:
"""
Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled.
Returns the norm of the gradients.
"""
parameters_to_clip = [p for p in self.model.parameters() if p.grad is not None]
if self._grad_norm:
if self._scaler is not None:
# Need to first unscale gradients in order to clip as usual.
self._scaler.unscale_(self.optimizer)
return clip_grad_norm_(parameters_to_clip, self._grad_norm)
else:
return torch.norm(
torch.stack([torch.norm(p.grad.detach()) for p in parameters_to_clip])
)
def batch_outputs(self, batch: TensorDict, for_training: bool) -> Dict[str, torch.Tensor]:
"""
Does a forward pass on the given batch and returns the output dictionary that the model
returns, after adding any specified regularization penalty to the loss (if training).
"""
batch = nn_util.move_to_device(batch, self.cuda_device)
output_dict = self._pytorch_model(**batch)
if for_training:
try:
assert "loss" in output_dict
regularization_penalty = self.model.get_regularization_penalty()
if regularization_penalty is not None:
output_dict["reg_loss"] = regularization_penalty
output_dict["loss"] += regularization_penalty
except AssertionError:
if for_training:
raise RuntimeError(
"The model you are trying to optimize does not contain a"
" 'loss' key in the output of model.forward(inputs)."
)
return output_dict
def _train_epoch(self, epoch: int) -> Dict[str, float]:
"""
Trains one epoch and returns metrics.
"""
logger.info("Epoch %d/%d", epoch, self._num_epochs - 1)
cpu_memory_usage = []
for worker, memory in common_util.peak_cpu_memory().items():
cpu_memory_usage.append((worker, memory))
logger.info(f"Worker {worker} memory usage: {common_util.format_size(memory)}")
gpu_memory_usage = []
for gpu, memory in common_util.peak_gpu_memory().items():
gpu_memory_usage.append((gpu, memory))
logger.info(f"GPU {gpu} memory usage: {common_util.format_size(memory)}")
regularization_penalty = self.model.get_regularization_penalty()
train_loss = 0.0
batch_loss = 0.0
train_reg_loss = None if regularization_penalty is None else 0.0
batch_reg_loss = None if regularization_penalty is None else 0.0
# Set the model to "train" mode.
self._pytorch_model.train()
# Get tqdm for the training batches
batch_generator = iter(self.data_loader)
batch_group_generator = common_util.lazy_groups_of(
batch_generator, self._num_gradient_accumulation_steps
)
logger.info("Training")
num_training_batches: Union[int, float]
try:
len_data_loader = len(self.data_loader)
num_training_batches = math.ceil(
len_data_loader / self._num_gradient_accumulation_steps
)
except TypeError:
num_training_batches = float("inf")
# Having multiple tqdm bars in case of distributed training will be a mess. Hence only the master's
# progress is shown
if self._master:
batch_group_generator_tqdm = Tqdm.tqdm(
batch_group_generator, total=num_training_batches
)
else:
batch_group_generator_tqdm = batch_group_generator
self._last_log = time.time()
batches_this_epoch = 0
if self._batch_num_total is None:
self._batch_num_total = 0
done_early = False
for batch_group in batch_group_generator_tqdm:
if self._distributed:
# Check whether the other workers have stopped already (due to differing amounts of
# data in each). If so, we can't proceed because we would hang when we hit the
# barrier implicit in Model.forward. We use a IntTensor instead a BoolTensor
# here because NCCL process groups apparently don't support BoolTensor.
done = torch.tensor(0, device=self.cuda_device)
torch.distributed.all_reduce(done, torch.distributed.ReduceOp.SUM)
if done.item() > 0:
done_early = True
logger.warning(
f"Worker {torch.distributed.get_rank()} finishing training early! "
"This implies that there is an imbalance in your training "
"data across the workers and that some amount of it will be "
"ignored. A small amount of this is fine, but a major imbalance "
"should be avoided. Note: This warning will appear unless your "
"data is perfectly balanced."
)
break
batches_this_epoch += 1
self._batch_num_total += 1
batch_num_total = self._batch_num_total
# Zero gradients.
# NOTE: this is actually more efficient than calling `self.optimizer.zero_grad()`
# because it avoids a read op when the gradients are first updated below.
for param_group in self.optimizer.param_groups:
for p in param_group["params"]:
p.grad = None
batch_loss = 0.0
batch_group_outputs = []
for batch in batch_group:
with amp.autocast(self._use_amp):
batch_outputs = self.batch_outputs(batch, for_training=True)
batch_group_outputs.append(batch_outputs)
loss = batch_outputs["loss"]
reg_loss = batch_outputs.get("reg_loss")
if torch.isnan(loss):
raise ValueError("nan loss encountered")
loss = loss / len(batch_group)
batch_loss += loss.item()
if reg_loss is not None:
reg_loss = reg_loss / len(batch_group)
batch_reg_loss = reg_loss.item()
train_reg_loss += batch_reg_loss # type: ignore
if self._scaler is not None:
self._scaler.scale(loss).backward()
else:
loss.backward()
train_loss += batch_loss
batch_grad_norm = self.rescale_gradients()
# This does nothing if batch_num_total is None or you are using a
# scheduler which doesn't update per batch.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step_batch(batch_num_total)
if self._momentum_scheduler:
self._momentum_scheduler.step_batch(batch_num_total)
param_updates = None
if self._tensorboard.should_log_histograms_this_batch() and self._master:
# Get the magnitude of parameter updates for logging. We need to do some
# computation before and after the optimizer step, and it's expensive because of
# GPU/CPU copies (necessary for large models, and for shipping to tensorboard), so
# we don't do this every batch, only when it's requested.
param_updates = {
name: param.detach().cpu().clone()
for name, param in self.model.named_parameters()
}
if self._scaler is not None:
self._scaler.step(self.optimizer)
self._scaler.update()
else:
self.optimizer.step()
for name, param in self.model.named_parameters():
param_updates[name].sub_(param.detach().cpu())
else:
if self._scaler is not None:
self._scaler.step(self.optimizer)
self._scaler.update()
else:
self.optimizer.step()
# Update moving averages
if self._moving_average is not None:
self._moving_average.apply(batch_num_total)
# Update the description with the latest metrics
metrics = training_util.get_metrics(
self.model,
train_loss,
train_reg_loss,
batch_loss,
batch_reg_loss,
batches_this_epoch,
world_size=self._world_size,
cuda_device=self.cuda_device,
)
if self._master:
# Updating tqdm only for the master as the trainers wouldn't have one
description = training_util.description_from_metrics(metrics)
batch_group_generator_tqdm.set_description(description, refresh=False)
self._tensorboard.log_batch(
self.model,
self.optimizer,
batch_grad_norm,
metrics,
batch_group,
param_updates,
)
if self._checkpointer is not None:
self._checkpointer.maybe_save_checkpoint(self, epoch, batches_this_epoch)
for callback in self._batch_callbacks:
callback(
self,
batch_group,
batch_group_outputs,
metrics,
epoch,
batches_this_epoch,
is_training=True,
is_master=self._master,
)
if self._distributed and not done_early:
logger.warning(
f"Worker {torch.distributed.get_rank()} completed its entire epoch (training)."
)
# Indicate that we're done so that any workers that have remaining data stop the epoch early.
done = torch.tensor(1, device=self.cuda_device)
torch.distributed.all_reduce(done, torch.distributed.ReduceOp.SUM)
assert done.item()
# Let all workers finish their epoch before computing
# the final statistics for the epoch.
if self._distributed:
dist.barrier()
metrics = training_util.get_metrics(
self.model,
train_loss,
train_reg_loss,
batch_loss=None,
batch_reg_loss=None,
num_batches=batches_this_epoch,
reset=True,
world_size=self._world_size,
cuda_device=self.cuda_device,
)
for (worker, memory) in cpu_memory_usage:
metrics["worker_" + str(worker) + "_memory_MB"] = memory / (1024 * 1024)
for (gpu_num, memory) in gpu_memory_usage:
metrics["gpu_" + str(gpu_num) + "_memory_MB"] = memory / (1024 * 1024)
return metrics
def _validation_loss(self, epoch: int) -> Tuple[float, Optional[float], int]:
"""
Computes the validation loss. Returns it and the number of batches.
"""
logger.info("Validating")
self._pytorch_model.eval()
# Replace parameter values with the shadow values from the moving averages.
if self._moving_average is not None:
self._moving_average.assign_average_value()
if self._validation_data_loader is not None:
validation_data_loader = self._validation_data_loader
else:
raise ConfigurationError(
"Validation results cannot be calculated without a validation_data_loader"
)
regularization_penalty = self.model.get_regularization_penalty()
# Having multiple tqdm bars in case of distributed training will be a mess. Hence only the master's
# progress is shown
if self._master:
val_generator_tqdm = Tqdm.tqdm(validation_data_loader)
else:
val_generator_tqdm = validation_data_loader
batches_this_epoch = 0
val_loss = 0.0
val_batch_loss = 0.0
val_reg_loss = None if regularization_penalty is None else 0.0
val_batch_reg_loss = None if regularization_penalty is None else 0.0
done_early = False
for batch in val_generator_tqdm:
if self._distributed:
# Check whether the other workers have stopped already (due to differing amounts of
# data in each). If so, we can't proceed because we would hang when we hit the
# barrier implicit in Model.forward. We use a IntTensor instead a BoolTensor
# here because NCCL process groups apparently don't support BoolTensor.
done = torch.tensor(0, device=self.cuda_device)
torch.distributed.all_reduce(done, torch.distributed.ReduceOp.SUM)
if done.item() > 0:
done_early = True
logger.warning(
f"Worker {torch.distributed.get_rank()} finishing validation early! "
"This implies that there is an imbalance in your validation "
"data across the workers and that some amount of it will be "
"ignored. A small amount of this is fine, but a major imbalance "
"should be avoided. Note: This warning will appear unless your "
"data is perfectly balanced."
)
break
with amp.autocast(self._use_amp):
batch_outputs = self.batch_outputs(batch, for_training=False)
loss = batch_outputs.get("loss")
reg_loss = batch_outputs.get("reg_loss")
if loss is not None:
# You shouldn't necessarily have to compute a loss for validation, so we allow for
# `loss` to be None. We need to be careful, though - `batches_this_epoch` is
# currently only used as the divisor for the loss function, so we can safely only
# count those batches for which we actually have a loss. If this variable ever
# gets used for something else, we might need to change things around a bit.
batches_this_epoch += 1
val_batch_loss = loss.item()
val_loss += val_batch_loss
if reg_loss is not None:
val_batch_reg_loss = reg_loss.item()
val_reg_loss += val_batch_reg_loss # type: ignore
# Update the description with the latest metrics
val_metrics = training_util.get_metrics(
self.model,
val_loss,
val_reg_loss,
val_batch_loss,
val_batch_reg_loss,
batches_this_epoch,
world_size=self._world_size,
cuda_device=self.cuda_device,
)
description = training_util.description_from_metrics(val_metrics)
if self._master:
val_generator_tqdm.set_description(description, refresh=False)
for callback in self._batch_callbacks:
callback(
self,
[batch],
[batch_outputs],
val_metrics,
epoch,
batches_this_epoch,
is_training=False,
is_master=self._master,
)
if self._distributed and not done_early:
logger.warning(
f"Worker {torch.distributed.get_rank()} completed its entire epoch (validation)."
)
# Indicate that we're done so that any workers that have remaining data stop validation early.
done = torch.tensor(1, device=self.cuda_device)
torch.distributed.all_reduce(done, torch.distributed.ReduceOp.SUM)
assert done.item()
# Now restore the original parameter values.
if self._moving_average is not None:
self._moving_average.restore()
return val_loss, val_reg_loss, batches_this_epoch
def train(self) -> Dict[str, Any]:
"""
Trains the supplied model with the supplied parameters.
"""
try:
return self._try_train()
finally:
# make sure pending events are flushed to disk and files are closed properly
self._tensorboard.close()
def _try_train(self) -> Dict[str, Any]:
try:
epoch_counter = self._restore_checkpoint()
except RuntimeError:
traceback.print_exc()
raise ConfigurationError(
"Could not recover training from the checkpoint. Did you mean to output to "
"a different serialization directory or delete the existing serialization "
"directory?"
)
training_util.enable_gradient_clipping(self.model, self._grad_clipping)
logger.info("Beginning training.")
val_metrics: Dict[str, float] = {}
this_epoch_val_metric: float = 0.0
metrics: Dict[str, Any] = {}
epochs_trained = 0
training_start_time = time.time()
metrics["best_epoch"] = self._metric_tracker.best_epoch
for key, value in self._metric_tracker.best_epoch_metrics.items():
metrics["best_validation_" + key] = value
for callback in self._epoch_callbacks:
callback(self, metrics={}, epoch=-1, is_master=self._master)
for epoch in range(epoch_counter, self._num_epochs):
epoch_start_time = time.time()
train_metrics = self._train_epoch(epoch)
if self._master and self._checkpointer is not None:
self._checkpointer.save_checkpoint(epoch, self, save_model_only=True)
# Wait for the master to finish saving the model checkpoint
if self._distributed:
dist.barrier()
# get peak of memory usage
for key, value in train_metrics.items():
if key.startswith("gpu_") and key.endswith("_memory_MB"):
metrics["peak_" + key] = max(metrics.get("peak_" + key, 0), value)
elif key.startswith("worker_") and key.endswith("_memory_MB"):
metrics["peak_" + key] = max(metrics.get("peak_" + key, 0), value)
if self._validation_data_loader is not None:
with torch.no_grad():
# We have a validation set, so compute all the metrics on it.
val_loss, val_reg_loss, num_batches = self._validation_loss(epoch)
# It is safe again to wait till the validation is done. This is
# important to get the metrics right.
if self._distributed:
dist.barrier()
val_metrics = training_util.get_metrics(
self.model,
val_loss,
val_reg_loss,
batch_loss=None,
batch_reg_loss=None,
num_batches=num_batches,
reset=True,
world_size=self._world_size,
cuda_device=self.cuda_device,
)
# Check validation metric for early stopping
this_epoch_val_metric = val_metrics[self._validation_metric]
self._metric_tracker.add_metric(this_epoch_val_metric)
if self._metric_tracker.should_stop_early():
logger.info("Ran out of patience. Stopping training.")
break
if self._master:
self._tensorboard.log_metrics(
train_metrics, val_metrics=val_metrics, log_to_console=True, epoch=epoch + 1
) # +1 because tensorboard doesn't like 0
# Create overall metrics dict
training_elapsed_time = time.time() - training_start_time
metrics["training_duration"] = str(datetime.timedelta(seconds=training_elapsed_time))
metrics["training_start_epoch"] = epoch_counter
metrics["training_epochs"] = epochs_trained
metrics["epoch"] = epoch
for key, value in train_metrics.items():
metrics["training_" + key] = value
for key, value in val_metrics.items():
metrics["validation_" + key] = value
if self._metric_tracker.is_best_so_far():
# Update all the best_ metrics.
# (Otherwise they just stay the same as they were.)
metrics["best_epoch"] = epoch
for key, value in val_metrics.items():
metrics["best_validation_" + key] = value
self._metric_tracker.best_epoch_metrics = val_metrics
if self._serialization_dir and self._master:
common_util.dump_metrics(
os.path.join(self._serialization_dir, f"metrics_epoch_{epoch}.json"),
metrics,
)
# The Scheduler API is agnostic to whether your schedule requires a validation metric -
# if it doesn't, the validation metric passed here is ignored.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step(this_epoch_val_metric)
if self._momentum_scheduler:
self._momentum_scheduler.step(this_epoch_val_metric)
if self._master and self._checkpointer is not None:
self._checkpointer.save_checkpoint(
epoch, self, is_best_so_far=self._metric_tracker.is_best_so_far()
)
# Wait for the master to finish saving the checkpoint
if self._distributed:
dist.barrier()
for callback in self._epoch_callbacks:
callback(self, metrics=metrics, epoch=epoch, is_master=self._master)
epoch_elapsed_time = time.time() - epoch_start_time
logger.info("Epoch duration: %s", datetime.timedelta(seconds=epoch_elapsed_time))
if epoch < self._num_epochs - 1:
training_elapsed_time = time.time() - training_start_time
estimated_time_remaining = training_elapsed_time * (
(self._num_epochs - epoch_counter) / float(epoch - epoch_counter + 1) - 1
)
formatted_time = str(datetime.timedelta(seconds=int(estimated_time_remaining)))
logger.info("Estimated training time remaining: %s", formatted_time)
epochs_trained += 1
for callback in self._end_callbacks:
callback(self, metrics=metrics, epoch=epoch, is_master=self._master)
# Load the best model state before returning
best_model_state = (
None if self._checkpointer is None else self._checkpointer.best_model_state()
)
if best_model_state:
self.model.load_state_dict(best_model_state)
return metrics
@contextmanager
def get_checkpoint_state(self) -> Iterator[Tuple[Dict[str, Any], Dict[str, Any]]]:
if self._moving_average is not None:
# Assigning average value to model parameters. The checkpointer will call
# `restore_state_after_checkpointing` when it is done to put this back to what it was.
self._moving_average.assign_average_value()
model_state = self.model.state_dict()
# These are the training states we need to persist.
training_states = {
"metric_tracker": self._metric_tracker.state_dict(),
"optimizer": self.optimizer.state_dict(),
"batch_num_total": self._batch_num_total,
}
# If we have a learning rate or momentum scheduler, we should persist them too.
if self._learning_rate_scheduler is not None:
training_states["learning_rate_scheduler"] = self._learning_rate_scheduler.state_dict()
if self._momentum_scheduler is not None:
training_states["momentum_scheduler"] = self._momentum_scheduler.state_dict()
try:
yield model_state, training_states
finally:
if self._moving_average is not None:
self._moving_average.restore()
def _restore_checkpoint(self) -> int:
"""
Restores the model and training state from the last saved checkpoint.
This includes an epoch count and optimizer state, which is serialized separately
from model parameters. This function should only be used to continue training -
if you wish to load a model for inference/load parts of a model into a new
computation graph, you should use the native Pytorch functions:
` model.load_state_dict(torch.load("/path/to/model/weights.th"))`
If `self._serialization_dir` does not exist or does not contain any checkpointed weights,
this function will do nothing and return 0.
# Returns
epoch: `int`
The epoch at which to resume training, which should be one after the epoch
in the saved training state.
"""
if self._checkpointer is None:
return 0
model_state, training_state = self._checkpointer.restore_checkpoint()
if not training_state:
# No checkpoint to restore, start at 0
return 0
self.model.load_state_dict(model_state)
self.optimizer.load_state_dict(training_state["optimizer"])
if (
self._learning_rate_scheduler is not None
and "learning_rate_scheduler" in training_state
):
self._learning_rate_scheduler.load_state_dict(training_state["learning_rate_scheduler"])
if self._momentum_scheduler is not None and "momentum_scheduler" in training_state:
self._momentum_scheduler.load_state_dict(training_state["momentum_scheduler"])
training_util.move_optimizer_to_cuda(self.optimizer)
# Currently the `training_state` contains a serialized `MetricTracker`.
if "metric_tracker" in training_state:
self._metric_tracker.load_state_dict(training_state["metric_tracker"])
# It used to be the case that we tracked `val_metric_per_epoch`.
elif "val_metric_per_epoch" in training_state:
self._metric_tracker.clear()
self._metric_tracker.add_metrics(training_state["val_metric_per_epoch"])
# And before that we didn't track anything.
else:
self._metric_tracker.clear()
if isinstance(training_state["epoch"], int):
epoch_to_return = training_state["epoch"] + 1
else:
epoch_to_return = int(training_state["epoch"].split(".")[0]) + 1
# For older checkpoints with batch_num_total missing, default to old behavior where
# it is unchanged.
batch_num_total = training_state.get("batch_num_total")
if batch_num_total is not None:
self._batch_num_total = batch_num_total
return epoch_to_return
@classmethod
def from_partial_objects(
cls,
model: Model,
serialization_dir: str,
data_loader: DataLoader,
validation_data_loader: DataLoader = None,
local_rank: int = 0,
patience: int = None,
validation_metric: str = "-loss",
num_epochs: int = 20,
cuda_device: Optional[Union[int, torch.device]] = None,
grad_norm: float = None,
grad_clipping: float = None,
distributed: bool = False,
world_size: int = 1,
num_gradient_accumulation_steps: int = 1,
use_amp: bool = False,
no_grad: List[str] = None,
optimizer: Lazy[Optimizer] = Lazy(Optimizer.default),
learning_rate_scheduler: Lazy[LearningRateScheduler] = None,
momentum_scheduler: Lazy[MomentumScheduler] = None,
tensorboard_writer: Lazy[TensorboardWriter] = Lazy(TensorboardWriter),
moving_average: Lazy[MovingAverage] = None,
checkpointer: Lazy[Checkpointer] = Lazy(Checkpointer),
batch_callbacks: List[BatchCallback] = None,
epoch_callbacks: List[EpochCallback] = None,
end_callbacks: List[EpochCallback] = None,
trainer_callbacks: List[TrainerCallback] = None,
) -> "Trainer":
"""
This method exists so that we can have a documented method to construct this class using
`FromParams`. If you are not using `FromParams` or config files, you can safely ignore this
method.
The reason we can't just use `__init__` with `FromParams` here is because there are
sequential dependencies to this class's arguments. Anything that has a `Lazy[]` type
annotation needs something from one of the non-`Lazy` arguments. The `Optimizer` needs to
have the parameters from the `Model` before it's constructed, and the `Schedulers` need to
have the `Optimizer`. Because of this, the typical way we construct things `FromParams`
doesn't work, so we use `Lazy` to allow for constructing the objects sequentially.
If you're not using `FromParams`, you can just construct these arguments in the right order
yourself in your code and call the constructor directly.
"""
if cuda_device is None:
from torch import cuda
if cuda.device_count() > 0:
cuda_device = 0
else:
cuda_device = -1
check_for_gpu(cuda_device)
if cuda_device >= 0:
# Moving model to GPU here so that the optimizer state gets constructed on
# the right device.
model = model.cuda(cuda_device)
if no_grad:
for name, parameter in model.named_parameters():
if any(re.search(regex, name) for regex in no_grad):
parameter.requires_grad_(False)
parameters = [[n, p] for n, p in model.named_parameters() if p.requires_grad]
optimizer_ = optimizer.construct(model_parameters=parameters)
common_util.log_frozen_and_tunable_parameter_names(model)
batches_per_epoch: Optional[int]
try:
batches_per_epoch = len(data_loader)
batches_per_epoch = math.ceil(batches_per_epoch / num_gradient_accumulation_steps)
except TypeError:
batches_per_epoch = None
moving_average_ = (
None if moving_average is None else moving_average.construct(parameters=parameters)
)
learning_rate_scheduler_ = (
None
if learning_rate_scheduler is None
else learning_rate_scheduler.construct(
optimizer=optimizer_, num_epochs=num_epochs, num_steps_per_epoch=batches_per_epoch
)
)
momentum_scheduler_ = (
None
if momentum_scheduler is None
else momentum_scheduler.construct(optimizer=optimizer_)
)
checkpointer_ = checkpointer.construct(serialization_dir=serialization_dir)
tensorboard_writer_ = tensorboard_writer.construct(serialization_dir=serialization_dir)
return cls(
model,
optimizer_,
data_loader,
patience=patience,
validation_metric=validation_metric,
validation_data_loader=validation_data_loader,
num_epochs=num_epochs,
serialization_dir=serialization_dir,
cuda_device=cuda_device,
grad_norm=grad_norm,
grad_clipping=grad_clipping,
learning_rate_scheduler=learning_rate_scheduler_,
momentum_scheduler=momentum_scheduler_,
tensorboard_writer=tensorboard_writer_,
checkpointer=checkpointer_,
moving_average=moving_average_,
batch_callbacks=batch_callbacks,
epoch_callbacks=epoch_callbacks,
end_callbacks=end_callbacks,
trainer_callbacks=trainer_callbacks,
distributed=distributed,
local_rank=local_rank,
world_size=world_size,
num_gradient_accumulation_steps=num_gradient_accumulation_steps,
use_amp=use_amp,
)
|
apache-2.0
| 2,535,458,135,862,169,600 | 41.618905 | 114 | 0.608544 | false | 4.433856 | false | false | false |
google/playhvz
|
backend/constants.py
|
1
|
1174
|
#!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: High-level file comment."""
import sys
def main(argv):
pass
if __name__ == '__main__':
main(sys.argv)
HUMAN = 'resistance'
ZOMBIE = 'horde'
UNDECLARED = 'undeclared'
ALLEGIANCES = (HUMAN, ZOMBIE, UNDECLARED)
TEST_ENDPOINT = 'http://localhost:8080'
PLAYER_VOLUNTEER_ARGS = (
'advertising', 'logistics', 'communications', 'moderator',
'cleric', 'sorcerer', 'admin', 'photographer', 'chronicler',
'server', 'client', 'android', 'ios')
ACCESS_USER = 'user'
ACCESS_ADMIN = 'admin'
ACCESS_ADMIN_OR_PLAYER = 'adminOrPlayer'
ACCESS_PLAYER = 'player'
|
apache-2.0
| -48,341,932,827,967,384 | 27.634146 | 74 | 0.707836 | false | 3.307042 | false | false | false |
t3dev/odoo
|
addons/stock/tests/test_report.py
|
2
|
1121
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo
import odoo.tests
class TestReports(odoo.tests.TransactionCase):
def test_reports(self):
product1 = self.env['product.product'].create({
'name': 'Mellohi',
'default_code': 'C418',
'type': 'product',
'categ_id': self.env.ref('product.product_category_all').id,
'tracking': 'lot',
'barcode': 'scan_me'
})
lot1 = self.env['stock.production.lot'].create({
'name': 'Volume-Beta',
'product_id': product1.id,
})
report = self.env.ref('stock.label_lot_template')
target = b'\n\n\n^XA\n^FO100,50\n^A0N,44,33^FD[C418]Mellohi^FS\n^FO100,100\n^A0N,44,33^FDLN/SN:Volume-Beta^FS\n^FO100,150^BY3\n^BCN,100,Y,N,N\n^FDVolume-Beta^FS\n^XZ\n\n\n'
rendering, qweb_type = report.render_qweb_text(lot1.id)
self.assertEqual(target, rendering.replace(b' ', b''), 'The rendering is not good')
self.assertEqual(qweb_type, 'text', 'the report type is not good')
|
gpl-3.0
| 436,866,183,249,691,840 | 42.115385 | 180 | 0.597681 | false | 3.046196 | false | false | false |
scholer/cadnano2.5
|
cadnano/color.py
|
2
|
3229
|
# -*- coding: utf-8 -*-
"""This allows the model to have a :class:`Color` object class without
the need for :class:`PyQt5.QtGui.QColor`
When running the Qt Application, :class:`QColor` will be used, otherwise an
API compatible class is used and exported as a :class:`Color` object
Currently :class:`Color` objects are unused in the model and colors are stored as
QColor compatible hex string in format '#rrggbbaa', and therefore is not
exposed in the API documentation
"""
try:
from PyQt5.QtGui import QColor as Color
except Exception:
class Color(object):
"""Overloaded constructor using *args to be compatible with :class:`QColor`
usage::
Color(r, g, b)
or::
Color('#rrggbb') for hex
"""
def __init__(self, *args):
largs = len(args)
if largs == 1:
# clip the `#`
arg = args[0]
if isinstance(arg, str):
raise ValueError("color doesn't support ints")
color_number = int(arg[1:], 16)
r = (color_number >> 16) & 0xFF
g = (color_number >> 8) & 0xFF
b = color_number & 0xFF
self.setRgb(r, g, b, 255)
elif largs == 3:
r, g, b = args
self.setRgb(r, g, b, 255)
else:
r, g, b, a = args
self.setRgb(r, g, b, a)
# end def
def __repr__(self) -> str:
return self.hex()
def setRgb(self, r: int, g: int, b: int, a: int = 255):
"""Set the r, g, b and alpha 8 bit values
Args:
r: 0 - 255
g: 0 - 255
b: 0 - 255
a: 0 - 255
"""
self.r = r
self.g = g
self.b = b
self.a = a
# end def
def setAlpha(self, a: int):
"""Set the alpha 8 bit value
Args:
a (int): 0 - 255
"""
self.a = a
def name(self) -> str:
"""The hex string name. For :class:`QColor` compatibility
Returns:
:class:`QColor` compatible hex string in format '#rrggbbaa'
"""
return self.hex()
def hex(self) -> str:
"""The hex string name.
Returns:
:class:`QColor` compatible hex string in format '#rrggbbaa'
"""
return "#{:02X}{:02X}{:02X}{:02X}".format(self.r, self.g, self.b, self.a)
# end def
def _intToColor(color_number: int) -> Color:
""" legacy color support for converting integers to color objects based on the
cadnano 2 file format
Args:
color_number: integer value of a RGB color
Returns:
the :class:`Color` object
"""
return Color('#%0.6x' % (color_number))
def intToColorHex(color_number: int) -> str:
"""Convert an integer to a hexadecimal string compatible with :class:`QColor`
Args:
color_number: integer value of a RGB color
Returns:
:class:`QColor` compatible hex string in format '#rrggbb'
"""
return '#%0.6x' % (color_number)
|
mit
| 8,278,896,522,655,440,000 | 27.324561 | 85 | 0.5048 | false | 3.961963 | false | false | false |
jessicalucci/TaskManagement
|
taskflow/engines/dist_engine/dtclient.py
|
1
|
7108
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Rackspace Hosting All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client for Distributed System"""
import collections
import kombu
from kombu import mixins
import logging
import threading
import time
from taskflow import states
LOG = logging.getLogger(__name__)
TASK_EXCHANGE = kombu.Exchange('tasks', type='direct')
class DTClient(object):
def __init__(self, broker_uri=None):
self._connection = kombu.Connection(broker_uri)
self._listeners = collections.defaultdict(list)
self.requires = collections.defaultdict(set)
self.provides = collections.defaultdict(set)
self._is_provided = {}
self.mapper = {}
def _check_requires(self, results, callback):
"""Make sure all requires for a task are satisfied before
kicking off callback, and return accumulated results
"""
requires = callback.requires
if requires is None:
return results
waiting = []
accum_results = {}
for requirement in requires:
if not (requirement in self._is_provided.keys()):
waiting.append(requirement)
else:
accum_results[requirement] = self._is_provided[requirement]
if len(waiting) == 0:
res = callback.celery_task.delay(provides=list(callback.provides),
**accum_results)
self.mapper[res.id] = callback.task_id
return True
else:
LOG.info("Task %s still waiting on %s" %
(callback.task_id, waiting))
return waiting
def register_listener(self, data, callback):
"""Register callback as a listener for task or data
:param data: Data/Task ID that callback is listening for
:callback: Task to be executed upon data provided
"""
listener = Listener(self._connection, data, callback,
self._check_requires)
listener_t = threading.Thread(target=listener.run)
listener_t.daemon = True
listener_t.start()
self._listeners[data].append((listener, callback))
def notify_listeners(self, provides, results):
"""notify listeners of certain data
:param provides: A set of what this task provides. The set
contains either data this task provides, the task id
(task provides itself) or both
:param results: A dict or other data structure of what this
task provides. If a dict is used, the client will attempt
to pass on provided data in a key/value manner
(result[results][provided] = provided_data)
"""
# persist all data
for provided in provides:
if results['status'] == states.SUCCESS:
# Is this data already provided?
if self._is_provided.get(provided):
res = self._is_provided[provided]
LOG.error("WARNING!! %s Data is already provided,"
" and has value %s. OVERWRITING to value"
" %s" % (provided, res, results))
self._is_provided[provided] = (results['results'])
elif results['status'] == states.ERROR:
LOG.error("Task has errored")
# Once we have preserved all new data, notify all listeners
for provided in provides:
if results['status'] == states.SUCCESS:
self._check_active(provided)
_send_task_results(self._connection, provided,
results['results'])
def _check_active(self, queuename):
# Make sure all consumers have had a chance to spin up
# TODO(Jessica): Won't be a problem in large flows.
# Maybe only activate loop for flows of certain length?
for listener in self._listeners[queuename]:
listener = listener[0]
try_interval = 1
while True:
try_interval *= 2
if try_interval >= 30:
raise Exception("Could not find Listener %s \
for data %s" % (listener, queuename))
if listener._consuming is False:
LOG.error("Listener %s for data %s is not active. \
Trying again in %s seconds"
% (listener, queuename, try_interval))
time.sleep(try_interval)
else:
break
return True
def get_listeners(self, data):
"""Return all listeners for given data"""
results = []
for (_listener, callback) in self._listeners[data]:
results.append(callback)
return results
def close_listeners(self):
for listeners in self._listeners.values():
for _listener, callback in listeners:
_listener.should_stop = True
class Listener(mixins.ConsumerMixin):
"""Created when a task is registered for notification"""
def __init__(self, connection, queuename, callback, check):
self._queue = kombu.Queue(queuename, exchange=TASK_EXCHANGE,
routing_key=queuename)
self.connection = connection
# TODO(Jessica): See if callback can be pulled from chain to
# prevent passing of callback around
self._callback = callback
self._check = check
self._consuming = False
def on_consume_ready(self, connection, channel, consumers, **kwargs):
self._consuming = True
def get_consumers(self, Consumer, channel):
return [Consumer(queues=[self._queue],
callbacks=[self._do_callback])]
def _do_callback(self, body, message):
self._check(body, self._callback)
message.ack()
def on_consume_end(self, connection, channel):
connection.release()
def _send_task_results(connection, queuename, results):
"""Send task results to task_id queue"""
payload = results
routing_key = queuename
with kombu.pools.producers[connection].acquire(block=True) as producer:
kombu.common.maybe_declare(TASK_EXCHANGE, producer.channel)
producer.publish(payload, serializer='json',
exchange=TASK_EXCHANGE,
routing_key=routing_key)
|
apache-2.0
| -4,631,248,882,896,480,000 | 37.215054 | 78 | 0.589617 | false | 4.58285 | false | false | false |
Dev-Cloud-Platform/Dev-Cloud
|
dev_cloud/cc1/src/clm/utils/decorators.py
|
1
|
10248
|
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.clm.utils.decorators
Here are placed decorators for CLM views functions targeted to specific CLM
role actors (and src.clm.utils.decorators.genericlog() called by all those).
@par Actor decorators
- src.clm.utils.decorators.guest_log
- src.clm.utils.decorators.user_log
- src.clm.utils.decorators.admin_clm_log
All those decorators call src.clm.utils.decorators.genericlog().
By default those decorators call src.clm.utils.decorators.genericlog
with logging disabled. You can enable it by giving kwarg \c log=True ,
when decorating, eg.:
@code
@admin_clm_log(log=True)
def get_by_id(cm_id, caller_id, id):
pass
@endcode
@author Tomasz Sośnicki <[email protected]>
"""
from clm.utils.cm import CM
from clm.utils import log
from clm.utils.exception import CLMException
from clm.models.user import User
from common.signature import Signature
from common import response
from common.states import user_active_states
from functools import wraps
import json
from django.http import HttpResponse
from django.db import transaction
# Set of functions decorated by actor decorators
# (clm.utils.decorators.guest_log(), src.clm.utils.decorators.user_log(),
# src.clm.utils.decorators.admin_clm_log())
from common.utils import json_convert
global decorated_functions
decorated_functions = set([])
def guest_log(*arg, **kw):
"""
Decorator for functions requiring only \b guest's privilidges.
src.clm.utils.decorators.genericlog() is called with parameters:
- \c is_user=False
- \c is_clm_superuser=False
- \c is_cm_superuser=False
@par Decorated function's declaration
@code
@guest_log[(log=<False|True>)]
function (**kw)
@endcode
@par Decorated function's call
@code
function (**kw)
@endcode
"""
def logwrapper(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
return genericlog(kw.get('log', False), kw.get('pack', True), False, False, False, fun, args, kwargs)
decorated_functions.add(wrapper)
return wrapper
return logwrapper
def user_log(*arg, **kw):
"""
Decorator for functions requiring logged in \b user's privilidges.
src.clm.utils.decorators.genericlog() is called with parameters:
- \c is_user=True
- \c is_clm_superuser=False
- \c is_cm_superuser=False
@par Decorated function's declaration
@code
@user_log[(log=<False|True>)]
function (cm_id, caller_id, **kw)
@endcode
@par Decorated function's call
@code
function (cm_id=<cm_id>, login=<login>, password=<password>, **kw)
@endcode
"""
def logwrapper(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
return genericlog(kw.get('log', False), kw.get('pack', True), True, False, False, fun, args, kwargs)
decorated_functions.add(wrapper)
return wrapper
return logwrapper
def admin_cm_log(*arg, **kw):
"""
Decorator for functions requiring \b admin_cm's privilidges.
src.clm.utils.decorators.genericlog is called with parameters:
- \c is_user=True
- \c is_clm_superuser=False
- \c is_cm_superuser=True
@par Decorated function's declaration
@code
@admin_clm_log[(log=<False|True>)]
function (cm_id, caller_id, **kw)
@endcode
@par Decorated function's call
@code
function (cm_id=<cm_id>, login=<login>, password=<password>, **kw)
@endcode
\c password argument is removed by \c src.cm.utils.decorators.genericlog(),
so it doesn't appear in formal parameters of the function.
"""
def logwrapper(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
return genericlog(kw.get('log', False), kw.get('pack', True), True, False, True, fun, args, kwargs)
decorated_functions.add(wrapper)
return wrapper
return logwrapper
def admin_clm_log(*arg, **kw):
"""
Decorator for functions requiring \b admin_clm's privilidges.
src.clm.utils.decorators.genericlog is called with parameters:
- \c is_user=True
- \c is_clm_superuser=True
- \c is_cm_superuser=False
@par Decorated function's declaration
@code
@admin_clm_log[(log=<False|True>)]
function (cm_id, caller_id, *args, **kw)
@endcode
@par Decorated function's call
@code
function (cm_id, login, password, *arg, **kw)
@endcode
\c password argument is removed by \c src.cm.utils.decorators.genericlog(),
so it doesn't appear in formal parameters of the function.
"""
def logwrapper(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
return genericlog(kw.get('log', False), kw.get('pack', True), True, True, False, fun, args, kwargs)
decorated_functions.add(wrapper)
return wrapper
return logwrapper
def auth(is_user, is_clm_superuser, data):
if is_user:
login = data.pop('login')
password = data.get('password')
if password:
del data['password']
try:
user = User.objects.get(login=login)
except User.DoesNotExist:
raise CLMException('user_get')
if 'Signature' in data.keys():
if not Signature.checkSignature(user.password, data.pop('Signature'), data['parameters']):
raise CLMException('user_get')
del data['parameters']
elif user.password != password:
raise CLMException('user_get')
data['caller_id'] = user.id
if user.is_active != user_active_states['ok']:
raise CLMException('user_inactive')
if is_clm_superuser and not user.is_superuser:
raise CLMException('user_permission')
data['cm_id'] = data.pop('cm_id', None)
if not data['cm_id']:
if user.default_cluster_id is not None:
data['cm_id'] = user.default_cluster_id
return user.id
else:
return 0
def genericlog(log_enabled, pack_resp, is_user, is_clm_superuser, is_cm_superuser, fun, args, kwargs):
"""
Generic log is called by actor decorators defined in src.clm.utils.decorators :
- src.clm.utils.decorators.guest_log
- src.clm.utils.decorators.user_log
- src.clm.utils.decorators.admin_cm_log
- src.clm.utils.decorators.admin_clm_log
It calls decorated functions, additionally performing several tasks.
Genericlog performes:
-# <i>if decorated function requires user or admin privilidges</i>: <b>authorization</b>;
-# <b>execution</b> of the decorated function;
-# <b>debug log</b> of the arguments <i>depending on \c log_enabled and function's success</i>;
-# <i>if exception is thrown</i>: <b>general exception log</b>.
@returns{dict} response; fields:
@dictkey{status,string} 'ok', if succeeded
@dictkey{data,dict} response data
"""
# ===========================================================================
# AUTORIZATION
# ===========================================================================
name = '%s.%s' % (fun.__module__.replace('clm.views.', ''), fun.__name__)
request = args[0]
data = json.loads(request.body)
# ===========================================================================
# LOG AGRUMENTS
# ===========================================================================
gen_exception = False
with transaction.commit_manually():
try:
# Execute function
user_id = auth(is_user, is_clm_superuser, data)
resp = fun(**data)
if pack_resp and not hasattr(fun,
'packed'): # if function is decorated by cm_request, 'packed' atribbute will be set - response is already packed by cm
resp = response('ok', resp)
transaction.commit()
except CLMException, e:
transaction.rollback()
user_id = 0
resp = e.response
except Exception, e:
transaction.rollback()
gen_exception = True
user_id = 0
resp = response('clm_error', str(e))
if log_enabled or resp['status'] != 'ok':
log.debug(user_id, '=' * 100)
log.debug(user_id, 'Function: %s' % name)
log.debug(user_id, 'ARGS:\n%s' % json.dumps(data, indent=4))
if gen_exception:
log.exception(user_id, 'General exception')
log.debug(user_id, 'Response: %s' % resp or 'None')
return HttpResponse(json.dumps(resp, default=json_convert))
def cm_request(fun):
"""
Decorator for CM views functions that:
- either are fully transparent and just return CM response,
- or propagate request to CM and further postprocess its response.
Decorated function ought to be defined like:
@par Decorated function's declaration
@code
@cm_request
def function (cm_response, <kwargs>):
# postprocess cm_response
return cm_response
@endcode
@par Decorated function's call
@code
function (cm_id, <kwargs>) # `cm_id` is keyword arg as well, but it's required
@endcode
"""
url = r"%s/%s/" % (fun.__module__.replace("clm.views.", "").replace(".", "/"), fun.__name__)
@wraps(fun)
def wrapper(**data):
log.debug(0, "Forward request to CM: %s" % url)
cm_response = CM(data.pop('cm_id')).send_request(url, **data)
fun.packed = True # mark function response to not be packed by genericlog
return fun(cm_response, **data)
return wrapper
|
apache-2.0
| 987,290,668,116,218,800 | 29.771772 | 160 | 0.620084 | false | 3.707308 | false | false | false |
opmuse/opmuse
|
opmuse/sizeof.py
|
1
|
1750
|
# stolen from http://code.activestate.com/recipes/577504/ with some minor changes
# for pep8 compatibility
from __future__ import print_function
from sys import getsizeof, stderr
from itertools import chain
from collections import deque
try:
from reprlib import repr
except ImportError:
pass
def total_size(o, handlers={}, verbose=False):
""" Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
"""
def dict_handler(d):
return chain.from_iterable(d.items())
all_handlers = {tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter}
all_handlers.update(handlers) # user handlers take precedence
seen = set() # track which object id's have already been seen
default_size = getsizeof(0) # estimate sizeof object without __sizeof__
def sizeof(o):
if id(o) in seen: # do not double count the same object
return 0
seen.add(id(o))
s = getsizeof(o, default_size)
if verbose:
print(s, type(o), repr(o), file=stderr)
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
|
agpl-3.0
| -667,687,790,504,563,000 | 31.407407 | 86 | 0.609143 | false | 4.475703 | false | false | false |
sunlightlabs/read_FEC
|
fecreader/summary_data/management/commands/update_all_candidate_times.py
|
1
|
3767
|
from datetime import date
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Sum
from summary_data.utils.summary_utils import summarize_committee_periodic_webk
from summary_data.models import Candidate_Overlay, Authorized_Candidate_Committees, Committee_Time_Summary, Committee_Overlay
from shared_utils.cycle_utils import cycle_calendar
from django.conf import settings
try:
CURRENT_CYCLE = settings.CURRENT_CYCLE
except:
print "Missing current cycle list. Defaulting to 2016. "
CURRENT_CYCLE = '2016'
this_cycle_calendar = cycle_calendar[int(CURRENT_CYCLE)]
this_cycle_start = this_cycle_calendar['start']
this_cycle_end = this_cycle_calendar['end']
class Command(BaseCommand):
help = "Redo the summaries of *all candidates* - not just those that need it"
requires_model_validation = False
def handle(self, *args, **options):
candidates = Candidate_Overlay.objects.filter(cycle=CURRENT_CYCLE)
for candidate in candidates:
candidate_pcc = candidate.pcc
authorized_committee_list = Authorized_Candidate_Committees.objects.filter(candidate_id=candidate.fec_id, cycle=CURRENT_CYCLE).values('committee_id')
committee_list = [x.get('committee_id') for x in authorized_committee_list]
print "For candidate %s entering from list: %s" % (candidate.name, committee_list)
all_summaries = Committee_Time_Summary.objects.filter(com_id__in=committee_list, coverage_from_date__gte=this_cycle_start, coverage_through_date__lte=this_cycle_end).order_by('-coverage_through_date', '-coverage_from_date')
if all_summaries:
## Get most recent data from the time summary reports. But for totals that include recent stuff, use committee summaries.
most_recent_report = all_summaries[0]
recent_reports = all_summaries.filter(coverage_from_date=most_recent_report.coverage_from_date, coverage_through_date=most_recent_report.coverage_through_date)
# get data from the most recent report
recent_sums = recent_reports.aggregate( outstanding_loans=Sum('outstanding_loans'), cash_on_hand_end=Sum('cash_on_hand_end'))
for i in recent_sums:
if not recent_sums[i]:
recent_sums[i] = 0
# Independent expenditures are summarized separately.
candidate.cash_on_hand_date = most_recent_report.coverage_through_date
candidate.cash_on_hand = recent_sums['cash_on_hand_end']
candidate.outstanding_loans = recent_sums['outstanding_loans']
authorized_committees = Committee_Overlay.objects.filter(fec_id__in=committee_list,cycle=CURRENT_CYCLE)
sums = authorized_committees.aggregate(tot_contrib=Sum('total_contributions'), tot_disburse=Sum('total_disbursements'), tot_receipts=Sum('total_receipts'), tot_non_ite_contrib=Sum('total_unitemized'))
for i in sums:
if not sums[i]:
sums[i] = 0
candidate.total_contributions = sums['tot_contrib']
candidate.total_unitemized = sums['tot_non_ite_contrib']
candidate.total_disbursements = sums['tot_disburse']
candidate.total_receipts = sums['tot_receipts']
if not candidate.has_contributions and candidate.total_contributions > 0:
candidate.has_contributions = True
candidate.save()
|
bsd-3-clause
| 9,153,019,869,231,300,000 | 46.696203 | 235 | 0.63605 | false | 4.090119 | false | false | false |
GedheFoundation/sidesa2.0
|
surat_masuk.py
|
1
|
6371
|
#Boa:Frame:surat_masuk
import wx
import wx.richtext
import wx.lib.buttons
import input_administrasi_surat
def create(parent):
return surat_masuk(parent)
[wxID_SURAT_MASUK, wxID_SURAT_MASUKDARI, wxID_SURAT_MASUKDATEPICKERCTRL1,
wxID_SURAT_MASUKINPUT_KETERANGAN, wxID_SURAT_MASUKINPUT_NOMOR_SURAT,
wxID_SURAT_MASUKKOTAK_SURAT_MASUK, wxID_SURAT_MASUKLABEL_ISI_SINGKAT_SURAT,
wxID_SURAT_MASUKLABEL_KETERANGAN, wxID_SURAT_MASUKLABEL_NOMOR_SURAT_MASUK,
wxID_SURAT_MASUKLABEL_TANGGAL_SURAT, wxID_SURAT_MASUKLABEL_TUJUAN_SURAT,
wxID_SURAT_MASUKPERIHAL, wxID_SURAT_MASUKRICHTEXTCTRL1,
wxID_SURAT_MASUKTEXTCTRL1, wxID_SURAT_MASUKTEXTCTRL2,
wxID_SURAT_MASUKTOMBOL_KE_MENU_SURAT, wxID_SURAT_MASUKTOMBOL_SIMPAN,
wxID_SURAT_MASUKTUJUAN,
] = [wx.NewId() for _init_ctrls in range(18)]
class surat_masuk(wx.Frame):
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Frame.__init__(self, id=wxID_SURAT_MASUK, name=u'surat_masuk',
parent=prnt, pos=wx.Point(438, 232), size=wx.Size(850, 370),
style=wx.DEFAULT_FRAME_STYLE, title=u'Surat Masuk')
self.SetClientSize(wx.Size(850, 370))
self.Center(wx.BOTH)
self.label_nomor_surat_masuk = wx.StaticText(id=wxID_SURAT_MASUKLABEL_NOMOR_SURAT_MASUK,
label=u'Nomor Surat Masuk', name=u'label_nomor_surat_masuk',
parent=self, pos=wx.Point(40, 48), size=wx.Size(122, 17),
style=0)
self.label_tanggal_surat = wx.StaticText(id=wxID_SURAT_MASUKLABEL_TANGGAL_SURAT,
label=u'Tanggal Surat', name=u'label_tanggal_surat', parent=self,
pos=wx.Point(40, 80), size=wx.Size(81, 17), style=0)
self.label_tujuan_surat = wx.StaticText(id=wxID_SURAT_MASUKLABEL_TUJUAN_SURAT,
label=u'Dari', name=u'label_tujuan_surat', parent=self,
pos=wx.Point(40, 112), size=wx.Size(76, 17), style=0)
self.label_isi_singkat_surat = wx.StaticText(id=wxID_SURAT_MASUKLABEL_ISI_SINGKAT_SURAT,
label=u'Isi Singkat Surat', name=u'label_isi_singkat_surat',
parent=self, pos=wx.Point(40, 144), size=wx.Size(97, 17),
style=0)
self.label_keterangan = wx.StaticText(id=wxID_SURAT_MASUKLABEL_KETERANGAN,
label=u'Disposisi', name=u'label_keterangan', parent=self,
pos=wx.Point(40, 280), size=wx.Size(88, 17), style=0)
self.input_nomor_surat = wx.TextCtrl(id=wxID_SURAT_MASUKINPUT_NOMOR_SURAT,
name=u'input_nomor_surat', parent=self, pos=wx.Point(168, 40),
size=wx.Size(312, 25), style=0, value=u'')
self.dari = wx.TextCtrl(id=wxID_SURAT_MASUKDARI, name=u'dari',
parent=self, pos=wx.Point(168, 104), size=wx.Size(312, 25),
style=0, value=u'')
self.input_keterangan = wx.TextCtrl(id=wxID_SURAT_MASUKINPUT_KETERANGAN,
name=u'input_keterangan', parent=self, pos=wx.Point(168, 280),
size=wx.Size(656, 25), style=0, value=u'')
self.kotak_surat_masuk = wx.StaticBox(id=wxID_SURAT_MASUKKOTAK_SURAT_MASUK,
label=u'Surat masuk Input', name=u'kotak_surat_masuk',
parent=self, pos=wx.Point(8, 16), size=wx.Size(832, 304),
style=0)
self.tombol_simpan = wx.lib.buttons.GenBitmapTextButton(bitmap=wx.NullBitmap,
id=wxID_SURAT_MASUKTOMBOL_SIMPAN, label=u'Simpan',
name=u'tombol_simpan', parent=self, pos=wx.Point(280, 328),
size=wx.Size(176, 31), style=0)
self.tombol_simpan.Bind(wx.EVT_BUTTON, self.OnTombol_simpanButton,
id=wxID_SURAT_MASUKTOMBOL_SIMPAN)
self.tombol_ke_menu_surat = wx.lib.buttons.GenBitmapTextButton(bitmap=wx.NullBitmap,
id=wxID_SURAT_MASUKTOMBOL_KE_MENU_SURAT,
label=u'Kembali Ke Menu Surat', name=u'tombol_ke_menu_surat',
parent=self, pos=wx.Point(464, 328), size=wx.Size(200, 31),
style=0)
self.tombol_ke_menu_surat.Bind(wx.EVT_BUTTON,
self.OnTombol_ke_menu_suratButton,
id=wxID_SURAT_MASUKTOMBOL_KE_MENU_SURAT)
self.datePickerCtrl1 = wx.DatePickerCtrl(id=wxID_SURAT_MASUKDATEPICKERCTRL1,
name='datePickerCtrl1', parent=self, pos=wx.Point(168, 72),
size=wx.Size(168, 26), style=wx.DP_SHOWCENTURY)
self.richTextCtrl1 = wx.richtext.RichTextCtrl(id=wxID_SURAT_MASUKRICHTEXTCTRL1,
parent=self, pos=wx.Point(168, 144), size=wx.Size(656, 128),
style=wx.richtext.RE_MULTILINE, value='')
self.tujuan = wx.StaticText(id=wxID_SURAT_MASUKTUJUAN, label=u'Kepada',
name=u'tujuan', parent=self, pos=wx.Point(488, 112),
size=wx.Size(48, 15), style=0)
self.textCtrl1 = wx.TextCtrl(id=wxID_SURAT_MASUKTEXTCTRL1,
name='textCtrl1', parent=self, pos=wx.Point(552, 104),
size=wx.Size(272, 25), style=0, value='')
self.perihal = wx.StaticText(id=wxID_SURAT_MASUKPERIHAL,
label=u'Perihal', name=u'perihal', parent=self, pos=wx.Point(488,
48), size=wx.Size(44, 15), style=0)
self.textCtrl2 = wx.TextCtrl(id=wxID_SURAT_MASUKTEXTCTRL2,
name='textCtrl2', parent=self, pos=wx.Point(552, 40),
size=wx.Size(272, 25), style=0, value='')
def __init__(self, parent):
self._init_ctrls(parent)
def OnTombol_ke_menu_suratButton(self, event):
self.main=input_administrasi_surat.create(None)
self.main.Show()
self.Close()
def OnTombol_simpanButton(self, event):
inputsuratmasuk = str(self.input_tambak.GetValue())
inputjumlah = str(self.input_jumlah.GetValue())
inputpemilik = str(self.input_pemilik.GetValue())
inputketerangan = str(self.richTextCtrl1.GetValue())
add_tambak="INSERT INTO potensipariwisata (namapotensi, jumlah, pemilik, keterangan) VALUES('"+(inputtambak)+"', '"+(inputjumlah)+"', '"+(inputpemilik)+"', '"+(inputketerangan)+"')"
cur.execute(add_tambak)
db.commit()
self.input_tambak.SetValue('')
self.input_jumlah.Clear()
self.input_pemilik.Clear()
self.richTextCtrl1.Clear()
self.pesan = wx.MessageDialog(self,"Data Baru Tambak Disimpan","Konfirmasi",wx.OK)
self.pesan.ShowModal()
|
gpl-2.0
| -2,547,471,735,619,508,700 | 47.633588 | 189 | 0.638832 | false | 2.631557 | false | false | false |
calamares/calamares
|
src/modules/dummypythonqt/main.py
|
1
|
8246
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# === This file is part of Calamares - <https://calamares.io> ===
#
# SPDX-FileCopyrightText: 2016-2017 Teo Mrnjavac <[email protected]>
# SPDX-FileCopyrightText: 2017 Alf Gaida <[email protected]>
# SPDX-License-Identifier: GPL-3.0-or-later
#
# Calamares is Free Software: see the License-Identifier above.
#
import platform
from PythonQt.QtGui import *
import PythonQt.calamares as calamares
# WARNING: the Calamares PythonQt API is considered EXPERIMENTAL as of
# Calamares 2.5. It comes with no promise or commitment to API stability.
# Set up translations.
# You may skip this if your Calamares module has no user visible strings.
# DO NOT install _ into the builtin namespace because each module loads
# its own catalog.
# DO use the gettext class-based API and manually alias _ as described in:
# https://docs.python.org/3.5/library/gettext.html#localizing-your-module
import gettext
import inspect
import os
_filename = inspect.getframeinfo(inspect.currentframe()).filename
_path = os.path.dirname(os.path.abspath(_filename))
_ = gettext.gettext
# Example Python ViewModule.
# A Python ViewModule is a Python program which defines a ViewStep class.
# One UI module ==> one ViewStep.
# This class must be marked with the @calamares_module decorator. A
# ViewModule may define other classes, but only one may be decorated with
# @calamares_module. Such a class must conform to the Calamares ViewStep
# interface and functions as the entry point of the module.
# A ViewStep manages one or more "wizard pages" through methods like
# back/next, and reports its status through isNextEnabled/isBackEnabled/
# isAtBeginning/isAtEnd. The whole UI, including all the pages, must be
# exposed as a single QWidget, returned by the widget function.
#
# For convenience, both C++ and PythonQt ViewSteps are considered to be
# implementations of ViewStep.h. Additionally, the Calamares PythonQt API
# allows Python developers to keep their identifiers more Pythonic on the
# Python side. Thus, all of the following are considered valid method
# identifiers in a ViewStep implementation: isNextEnabled, isnextenabled,
# is_next_enabled.
@calamares_module
class DummyPythonQtViewStep:
def __init__(self):
# Importing PythonQt.QtGui provides access to most Qt widget classes.
self.main_widget = QFrame()
self.main_widget.setLayout(QVBoxLayout())
label = QLabel()
self.main_widget.layout().addWidget(label)
accumulator = "\nCalamares+PythonQt running embedded Python " +\
platform.python_version()
label.text = accumulator
btn = QPushButton()
# Python strings can be used wherever a method wants a QString. Python
# gettext translations can be used seamlessly as well.
btn.setText(_("Click me!"))
self.main_widget.layout().addWidget(btn)
# The syntax for signal-slot connections is very simple, though
# slightly different from the C++ equivalent. There are no SIGNAL and
# SLOT macros, and a signal can be connected to any Python method
# (without a special "slot" designation).
btn.connect("clicked(bool)", self.on_btn_clicked)
def on_btn_clicked(self):
self.main_widget.layout().addWidget(QLabel(_("A new QLabel.")))
def prettyName(self):
return _("Dummy PythonQt ViewStep")
def isNextEnabled(self):
return True # The "Next" button should be clickable
def isBackEnabled(self):
return True # The "Back" button should be clickable
def isAtBeginning(self):
# True means the currently shown UI page is the first page of this
# module, thus a "Back" button click will not be handled by this
# module and will cause a skip to the previous ViewStep instead
# (if any). False means that the present ViewStep provides other UI
# pages placed logically "before" the current one, thus a "Back" button
# click will be handled by this module instead of skipping to another
# ViewStep. A module (ViewStep) with only one page will always return
# True here.
return True
def isAtEnd(self):
# True means the currently shown UI page is the last page of this
# module, thus a "Next" button click will not be handled by this
# module and will cause a skip to the next ViewStep instead (if any).
# False means that the present ViewStep provides other UI pages placed
# logically "after" the current one, thus a "Next" button click will
# be handled by this module instead of skipping to another ViewStep.
# A module (ViewStep) with only one page will always return True here.
return True
def jobs(self):
# Returns a list of objects that implement Calamares::Job.
return [DummyPQJob("Dummy PythonQt job reporting for duty")]
def widget(self):
# Returns the base QWidget of this module's UI.
return self.main_widget
def retranslate(self, locale_name):
# This is where it gets slightly weird. In most desktop applications we
# shouldn't need this kind of mechanism, because we could assume that
# the operating environment is configured to use a certain language.
# Usually the user would change the system-wide language in a settings
# UI, restart the application, done.
# Alas, Calamares runs on an unconfigured live system, and one of the
# core features of Calamares is to allow the user to pick a language.
# Unfortunately, strings in the UI do not automatically react to a
# runtime language change. To get UI strings in a new language, all
# user-visible strings must be retranslated (by calling tr() in C++ or
# _() in Python) and reapplied on the relevant widgets.
# When the user picks a new UI translation language, Qt raises a QEvent
# of type LanguageChange, which propagates through the QObject
# hierarchy. By catching and reacting to this event, we can show
# user-visible strings in the new language at the right time.
# The C++ side of the Calamares PythonQt API catches the LanguageChange
# event and calls the present method. It is then up to the module
# developer to add here all the needed code to load the module's
# translation catalog for the new language (which is separate from the
# main Calamares strings catalog) and reapply any user-visible strings.
calamares.utils.debug("PythonQt retranslation event "
"for locale name: {}".format(locale_name))
# First we load the catalog file for the new language...
try:
global _
_t = gettext.translation('dummypythonqt',
localedir=os.path.join(_path, 'lang'),
languages=[locale_name])
_ = _t.gettext
except OSError as e:
calamares.utils.debug(e)
pass
# ... and then we can call setText(_("foo")) and similar methods on
# the relevant widgets here to reapply the strings.
# An example Job class. Implements Calamares::Job. For method identifiers, the
# same rules apply as for ViewStep. No decorators are necessary here, because
# only the ViewStep implementation is the unique entry point, and a module can
# have any number of jobs.
class DummyPQJob:
def __init__(self, my_msg):
self.my_msg = my_msg
def pretty_name(self):
return _("The Dummy PythonQt Job")
def pretty_description(self):
return _("This is the Dummy PythonQt Job. "
"The dummy job says: {}").format(self.my_msg)
def pretty_status_message(self):
return _("A status message for Dummy PythonQt Job.")
def exec(self):
# As an example, we touch a file in the target root filesystem.
rmp = calamares.global_storage['rootMountPoint']
os.system("touch {}/calamares_dpqt_was_here".format(rmp))
calamares.utils.debug("the dummy job says {}".format(self.my_msg))
return {'ok': True}
|
gpl-3.0
| -682,205,914,237,107,000 | 42.861702 | 79 | 0.680936 | false | 4.162544 | false | false | false |
ofavre/cellulart
|
utils.py
|
1
|
1311
|
# -*- coding: utf-8 -*-
# License: See LICENSE file.
import math
CLASSIC = 0
WRAP = 1
#MOBIUS = 2
def distance(world_shape, type=CLASSIC):
def distance_classic(a,b):
val = 0.0
for ad,bd in zip(a,b):
val += (ad-bd)**2
return math.sqrt(val)
def distance_wrap(a,b):
val = 0.0
i = 0
for ad,bd in zip(a,b):
di = world_shape[i]
ad %= di
bd %= di
dist = abs(ad-bd)
if ad < bd:
dist2 = abs(ad+di-bd)
if dist2 < dist:
dist = dist2
else:
dist2 = abs(ad-(bd+di))
if dist2 < dist:
dist = dist2
val += dist**2
i += 1
return math.sqrt(val)
if type == CLASSIC:
return distance_classic
elif type == WRAP:
return distance_wrap
else:
return None
def weighted_sum_wrap(value1, weight1, value2, weight2, wrap_length):
if value1 < value2 and (value1+wrap_length)-value2 < value2-value1:
value1 += wrap_length
elif value2 < value1 and (value2+wrap_length)-value1 < value1-value2:
value2 += wrap_length
return ( (weight1*value1 + weight2*value2) / (weight1+weight2) ) % wrap_length
|
bsd-3-clause
| -2,513,885,121,454,406,000 | 23.735849 | 84 | 0.500381 | false | 3.431937 | false | false | false |
nathanbjenx/cairis
|
cairis/core/colourcodes.py
|
1
|
3560
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__author__ = 'Shamal Faily'
def threatColourCode(valueId):
if (valueId == 9): return '359 1 .5'
elif (valueId > 9): return '359 1 .5'
elif (valueId == 8): return '359 1 .7'
elif (valueId == 7): return '6 .86 .44'
elif (valueId == 6): return '10 .7 .94'
elif (valueId == 5): return '19 .65 .99'
elif (valueId == 4): return '27 .48 .99'
elif (valueId == 3): return '34 .38 .99'
elif (valueId == 2): return '36 .21 1'
elif (valueId == 1): return '37 .07 1'
elif (valueId < 1): return '37 .07 1'
def threatLikelihoodColourCode(tlValue):
# Based on orrd5 color scheme
if tlValue == 'Incredible': return "#fef0d9"
elif tlValue == 'Improbable': return "#fdcc8a"
elif tlValue == 'Remote': return "#fc8d59"
elif tlValue == 'Occasional': return "#e34a33"
else: return "#b30000"
def vulnerabilitySeverityColourCode(vsValue):
# Based on orrd4 color scheme
if vsValue == 'Negligible': return '1'
elif vsValue == 'Marginal': return '2'
elif vsValue == 'Critical': return '3'
else: return '4'
def vulnerabilitySeverityTextColourCode(vsValue):
if (vsValue == 'Catastrophic'): return 'white'
else: return 'black'
def usabilityColourCode(valueId):
if (valueId <= 1): return '#F7FBFF'
elif (valueId == 2): return '#DEEBF7'
elif (valueId == 3): return '#C6DBEF'
elif (valueId == 4): return '#9ECAE1'
elif (valueId == 5): return '#6BAED6'
elif (valueId == 6): return '#4292C6'
elif (valueId == 7): return '#2171B5'
elif (valueId == 8): return '#08519C'
elif (valueId == 9): return '#08306B'
elif (valueId > 9): return '#08306B'
def obstacleColourCode(valueId):
if (valueId <= 0.2): return '1'
elif (valueId <= 0.3): return '2'
elif (valueId <= 0.4): return '3'
elif (valueId <= 0.5): return '4'
elif (valueId <= 0.6): return '5'
elif (valueId <= 0.7): return '6'
elif (valueId <= 0.8): return '7'
elif (valueId <= 0.9): return '8'
else: return '9'
def riskTextColourCode(valueId):
if (valueId >= 7): return 'white'
else: return 'black'
def usabilityTextColourCode(valueId):
if (valueId >= 7): return 'white'
else: return 'black'
def probabilityTextColourCode(valueId):
if (valueId >= 0.5): return 'white'
else: return 'black'
def surfaceTypeColourCode(valueId):
if (valueId == 9): return '359 1 .5'
elif (valueId > 9): return '359 1 .5'
elif (valueId == 8): return '359 1 .7'
elif (valueId == 7): return '6 .86 .44'
elif (valueId == 6): return '10 .7 .94'
elif (valueId == 5): return '19 .65 .99'
elif (valueId == 4): return '27 .48 .99'
elif (valueId == 3): return '34 .38 .99'
elif (valueId == 2): return '36 .21 1'
elif (valueId <= 1): return '37 .07 1'
def surfaceTypeTextColourCode(valueId):
if (valueId >= 7): return 'white'
else: return 'black'
|
apache-2.0
| 3,056,037,936,750,514,000 | 33.901961 | 63 | 0.660674 | false | 3.032368 | false | false | false |
operasoftware/twisted-apns
|
apns/notification.py
|
1
|
5424
|
from datetime import datetime
import binascii
import json
import struct
from apns.commands import NOTIFICATION
from apns.utils import datetime_to_timestamp
class NotificationError(Exception):
"""To be thrown upon failures on notification processing."""
pass
class NotificationInvalidPriorityError(NotificationError):
"""
Thrown while packing a notification, if the notification priority field is
invalid.
"""
pass
class NotificationPayloadNotSerializableError(NotificationError):
"""
Thrown while packing a notification, if the notification payload field
could not be serialized to JSON.
"""
pass
class NotificationTokenUnhexlifyError(NotificationError):
"""
Thrown while packing a notification, if the notification token field could
not be converted to binary from its hex representation.
"""
def __init__(self, msg):
super(NotificationTokenUnhexlifyError, self).__init__(msg)
class NotificationInvalidCommandError(NotificationError):
"""
Thrown while unpacking a notification, if the notification command field
contains invalid value.
"""
pass
class NotificationInvalidIdError(NotificationError):
"""
Thrown while unpacking a notification, if the notification structure is
invalid.
"""
pass
class Notification(object):
"""
A representation of the structure of a notification request, as defined in
the iOS documentation.
"""
COMMAND = NOTIFICATION
PRIORITY_NORMAL = 5
PRIORITY_IMMEDIATELY = 10
PRIORITIES = (PRIORITY_NORMAL, PRIORITY_IMMEDIATELY)
PAYLOAD = 2
TOKEN = 1
PRIORITY = 5
NOTIFICATION_ID = 3
EXPIRE = 4
EXPIRE_IMMEDIATELY = 0
def __init__(self, payload=None, token=None, expire=None,
priority=PRIORITY_NORMAL, iden=0):
"""
Init an instance of Notification.
:param payload: object containing structure of payload to be sent to
remote device.
:param token: string containing target device token in hex
:param expire: notification expire time as UNIX timestamp, 0 means that
notification expires immediately.
:param priority: notification priority, as described in iOS
documentation
:param iden: notification ID, as described in iOS documentation
"""
self.payload = payload
self.token = token
self.expire = expire
self.priority = priority
self.iden = iden
def __str__(self):
return '<Notification: %s>' % self.token
def to_binary_string(self):
"""Pack the notification to binary form and return it as string."""
if self.priority not in self.PRIORITIES:
raise NotificationInvalidPriorityError()
try:
token = binascii.unhexlify(self.token)
except TypeError as error:
raise NotificationTokenUnhexlifyError(error)
try:
payload = json.dumps(self.payload)
except TypeError:
raise NotificationPayloadNotSerializableError()
fmt = ">BIBH{0}sBH{1}sBHIBHIBHB".format(len(token), len(payload))
expire = (0 if self.expire == self.EXPIRE_IMMEDIATELY else
datetime_to_timestamp(self.expire))
# |COMMAND|FRAME-LEN|{token}|{payload}|{id:4}|{expire:4}|{priority:1}
# 5 items, each 3 bytes prefix, then each item length
length = 3*5 + len(token) + len(payload) + 4 + 4 + 1
message = struct.pack(fmt, self.COMMAND, length,
self.TOKEN, len(token), token,
self.PAYLOAD, len(payload), payload,
self.NOTIFICATION_ID, 4, self.iden,
self.EXPIRE, 4, expire,
self.PRIORITY, 1, self.priority)
return message
def from_binary_string(self, notification):
"""Unpack the notification from binary string."""
command = struct.unpack('>B', notification[0])[0]
if command != self.COMMAND:
raise NotificationInvalidCommandError()
length = struct.unpack('>I', notification[1:5])[0]
notification = notification[5:]
offset = 0
def next_item(offset):
iden, length = struct.unpack('>BH', notification[offset:offset+3])
offset += 3
payload = notification[offset:offset+length]
offset += length
if iden == self.PAYLOAD:
payload = struct.unpack('>{0}s'.format(length), payload)[0]
self.payload = json.loads(payload)
elif iden == self.TOKEN:
payload = struct.unpack('>{0}s'.format(length), payload)[0]
self.token = binascii.hexlify(payload)
elif iden == self.PRIORITY:
self.priority = struct.unpack('>B', payload)[0]
elif iden == self.NOTIFICATION_ID:
self.iden = struct.unpack('>I', payload)[0]
elif iden == self.EXPIRE:
payload = struct.unpack('>I', payload)[0]
self.expire = (self.EXPIRE_IMMEDIATELY if payload == 0 else
datetime.fromtimestamp(payload))
else:
raise NotificationInvalidIdError()
return offset
while offset < length:
offset = next_item(offset)
|
mit
| 4,067,661,437,472,473,600 | 31.872727 | 79 | 0.615413 | false | 4.604414 | false | false | false |
vessemer/concept-to-clinic
|
prediction/src/tests/test_segmentation.py
|
1
|
2579
|
import os
import time
import pylidc as pl
import pytest
from config import Config
from . import get_timeout
from ..algorithms.identify.prediction import load_patient_images
from ..algorithms.segment.trained_model import predict
from ..preprocess.lung_segmentation import save_lung_segments, get_z_range
def test_correct_paths(dicom_paths):
assert os.path.isdir(Config.SEGMENT_ASSETS_DIR)
for path in dicom_paths:
assert os.path.isdir(path)
def test_segment_predict_load(dicom_path):
predicted = predict(dicom_path, [])
assert predicted['volumes'] == []
def test_segment_dicom(dicom_path, nodule_locations):
predicted = predict(dicom_path, nodule_locations)
assert isinstance(predicted['binary_mask_path'], str)
assert predicted['volumes']
assert predicted['volumes'][0] > 0
def test_segment_luna(metaimage_path, luna_nodule):
predicted = predict(metaimage_path, [luna_nodule])
assert isinstance(predicted['binary_mask_path'], str)
assert predicted['volumes']
assert predicted['volumes'][0] > 0
@pytest.mark.stop_timeout
def test_nodule_segmentation(dicom_path, nodule_001):
predict(dicom_path, [nodule_001])
@pytest.mark.stop_timeout
def test_lung_segmentation(dicom_paths):
"""Test whether the annotations of the LIDC images are inside the segmented lung masks.
Iterate over all local LIDC images, fetch the annotations, compute their positions within the masks and check that
at this point the lung masks are set to 255."""
for path in dicom_paths:
min_z, max_z = get_z_range(path)
directories = path.split('/')
lidc_id = directories[2]
patient_id = directories[-1]
original, mask = save_lung_segments(path, patient_id)
original_shape, mask_shape = original.shape, mask.shape
scan = pl.query(pl.Scan).filter(pl.Scan.patient_id == lidc_id).first()
for annotation in scan.annotations:
centroid_x, centroid_y, centroid_z = annotation.centroid()
patient_mask = load_patient_images(patient_id, wildcard="*_m.png")
x_mask = int(mask_shape[1] / original_shape[1] * centroid_x)
y_mask = int(mask_shape[2] / original_shape[2] * centroid_y)
z_mask = int(abs(min_z) - abs(centroid_z))
mask_value = patient_mask[z_mask, x_mask, y_mask]
assert mask_value == 255
@pytest.mark.stop_timeout
def test_stop_timeout():
timeout = get_timeout()
if timeout > 0:
time.sleep(timeout + 1)
raise ValueError("This test should timeout")
|
mit
| -8,960,259,686,865,526,000 | 32.934211 | 118 | 0.68166 | false | 3.532877 | true | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.