repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
andremiller/beets | beetsplug/echonest.py | 1 | 17229 | # This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetch a variety of acoustic metrics from The Echo Nest.
"""
import time
import socket
import os
import tempfile
from string import Template
import subprocess
from beets import util, config, plugins, ui
from beets.dbcore import types
import pyechonest
import pyechonest.song
import pyechonest.track
# If a request at the EchoNest fails, we want to retry the request RETRIES
# times and wait between retries for RETRY_INTERVAL seconds.
RETRIES = 10
RETRY_INTERVAL = 10
DEVNULL = open(os.devnull, 'wb')
ALLOWED_FORMATS = ('MP3', 'OGG', 'AAC')
UPLOAD_MAX_SIZE = 50 * 1024 * 1024
# FIXME: use avconv?
CONVERT_COMMAND = u'ffmpeg -i $source -y -acodec libvorbis -vn -aq 2 $dest'
TRUNCATE_COMMAND = u'ffmpeg -t 300 -i $source'\
u'-y -acodec libvorbis -vn -aq 2 $dest'
# Maps attribute names from echonest to their field names in beets.
# The attributes are retrieved from a songs `audio_summary`. See:
# http://echonest.github.io/pyechonest/song.html#pyechonest.song.profile
ATTRIBUTES = {
'energy': 'energy',
'liveness': 'liveness',
'speechiness': 'speechiness',
'acousticness': 'acousticness',
'danceability': 'danceability',
'valence': 'valence',
'tempo': 'bpm',
}
# Types for the flexible fields added by `ATTRIBUTES`
FIELD_TYPES = {
'energy': types.FLOAT,
'liveness': types.FLOAT,
'speechiness': types.FLOAT,
'acousticness': types.FLOAT,
'danceability': types.FLOAT,
'valence': types.FLOAT,
}
MUSICAL_SCALE = ['C', 'C#', 'D', 'D#', 'E' 'F',
'F#', 'G', 'G#', 'A', 'A#', 'B']
# We also use echonest_id (song_id) and echonest_fingerprint to speed up
# lookups.
ID_KEY = 'echonest_id'
FINGERPRINT_KEY = 'echonest_fingerprint'
def _splitstrip(string, delim=u','):
"""Split string (at commas by default) and strip whitespace from the
pieces.
"""
return [s.strip() for s in string.split(delim)]
def diff(item1, item2):
"""Score two Item objects according to the Echo Nest numerical
fields.
"""
result = 0.0
for attr in ATTRIBUTES.values():
if attr == 'bpm':
# BPM (tempo) is handled specially to normalize.
continue
try:
result += abs(
float(item1.get(attr, None)) -
float(item2.get(attr, None))
)
except TypeError:
result += 1.0
try:
bpm1 = float(item1.get('bpm', None))
bpm2 = float(item2.get('bpm', None))
result += abs(bpm1 - bpm2) / max(bpm1, bpm2, 1)
except TypeError:
result += 1.0
return result
def similar(lib, src_item, threshold=0.15, fmt='${difference}: ${path}'):
for item in lib.items():
if item.path != src_item.path:
d = diff(item, src_item)
if d < threshold:
s = fmt.replace('${difference}', '{:2.2f}'.format(d))
ui.print_obj(item, lib, s)
class EchonestMetadataPlugin(plugins.BeetsPlugin):
item_types = FIELD_TYPES
def __init__(self):
super(EchonestMetadataPlugin, self).__init__()
self.config.add({
'auto': True,
'apikey': u'NY2KTZHQ0QDSHBAP6',
'upload': True,
'convert': True,
'truncate': True,
})
self.config.add(ATTRIBUTES)
pyechonest.config.ECHO_NEST_API_KEY = \
self.config['apikey'].get(unicode)
if self.config['auto']:
self.import_stages = [self.imported]
def _echofun(self, func, **kwargs):
"""Wrapper for requests to the EchoNest API. Will retry up to
RETRIES times and wait between retries for RETRY_INTERVAL
seconds.
"""
for i in range(RETRIES):
try:
result = func(**kwargs)
except pyechonest.util.EchoNestAPIError as e:
if e.code == 3:
# reached access limit per minute
self._log.debug(u'rate-limited on try {0}; waiting {1} '
u'seconds', i + 1, RETRY_INTERVAL)
time.sleep(RETRY_INTERVAL)
elif e.code == 5:
# specified identifier does not exist
# no use in trying again.
self._log.debug(u'{0}', e)
return None
else:
self._log.error(u'{0}', e.args[0][0])
return None
except (pyechonest.util.EchoNestIOError, socket.error) as e:
self._log.warn(u'IO error: {0}', e)
time.sleep(RETRY_INTERVAL)
except Exception as e:
# there was an error analyzing the track, status: error
self._log.debug(u'{0}', e)
return None
else:
break
else:
# If we exited the loop without breaking, then we used up all
# our allotted retries.
self._log.error(u'request failed repeatedly')
return None
return result
def _pick_song(self, songs, item):
"""Helper method to pick the best matching song from a list of songs
returned by the EchoNest. Compares artist, title and duration. If
the artist and title match and the duration difference is <= 1.0
seconds, it's considered a match.
"""
if not songs:
self._log.debug(u'no songs found')
return
pick = None
min_dist = item.length
for song in songs:
if song.artist_name.lower() == item.artist.lower() \
and song.title.lower() == item.title.lower():
dist = abs(item.length - song.audio_summary['duration'])
if dist < min_dist:
min_dist = dist
pick = song
if min_dist > 2.5:
return None
return pick
def _flatten_song(self, song):
"""Given an Echo Nest song object, return a flat dict containing
attributes we care about. If song is None, return None.
"""
if not song:
return
values = dict(song.audio_summary)
values['id'] = song.id
return values
# "Profile" (ID-based) lookup.
def profile(self, item):
"""Do a lookup on the EchoNest by MusicBrainz ID.
"""
# Use an existing Echo Nest ID.
if ID_KEY in item:
enid = item[ID_KEY]
# Look up the Echo Nest ID based on the MBID.
else:
if not item.mb_trackid:
self._log.debug(u'no ID available')
return
mbid = 'musicbrainz:track:{0}'.format(item.mb_trackid)
track = self._echofun(pyechonest.track.track_from_id,
identifier=mbid)
if not track:
self._log.debug(u'lookup by MBID failed')
return
enid = track.song_id
# Use the Echo Nest ID to look up the song.
songs = self._echofun(pyechonest.song.profile, ids=enid,
buckets=['id:musicbrainz', 'audio_summary'])
return self._flatten_song(self._pick_song(songs, item))
# "Search" (metadata-based) lookup.
def search(self, item):
"""Search the item at the EchoNest by artist and title.
"""
songs = self._echofun(pyechonest.song.search, title=item.title,
results=100, artist=item.artist,
buckets=['id:musicbrainz', 'tracks',
'audio_summary'])
return self._flatten_song(self._pick_song(songs, item))
# "Analyze" (upload the audio itself) method.
def prepare_upload(self, item):
"""Truncate and convert an item's audio file so it can be
uploaded to echonest.
Return a ``(source, tmp)`` tuple where `source` is the path to
the file to be uploaded and `tmp` is a temporary file to be
deleted after the upload or `None`.
If conversion or truncation fails, return `None`.
"""
source = item.path
tmp = None
if item.format not in ALLOWED_FORMATS:
if self.config['convert']:
tmp = source = self.convert(source)
if not tmp:
return
if os.stat(source).st_size > UPLOAD_MAX_SIZE:
if self.config['truncate']:
source = self.truncate(source)
if tmp is not None:
util.remove(tmp)
tmp = source
else:
return
if source:
return source, tmp
def convert(self, source):
"""Converts an item in an unsupported media format to ogg. Config
pending.
This is stolen from Jakob Schnitzers convert plugin.
"""
fd, dest = tempfile.mkstemp(u'.ogg')
os.close(fd)
self._log.info(u'encoding {0} to {1}',
util.displayable_path(source),
util.displayable_path(dest))
opts = []
for arg in CONVERT_COMMAND.split():
arg = arg.encode('utf-8')
opts.append(Template(arg).substitute(source=source, dest=dest))
# Run the command.
try:
util.command_output(opts)
except (OSError, subprocess.CalledProcessError) as exc:
self._log.debug(u'encode failed: {0}', exc)
util.remove(dest)
return
self._log.info(u'finished encoding {0}', util.displayable_path(source))
return dest
def truncate(self, source):
"""Truncates an item to a size less than UPLOAD_MAX_SIZE."""
fd, dest = tempfile.mkstemp(u'.ogg')
os.close(fd)
self._log.info(u'truncating {0} to {1}',
util.displayable_path(source),
util.displayable_path(dest))
opts = []
for arg in TRUNCATE_COMMAND.split():
arg = arg.encode('utf-8')
opts.append(Template(arg).substitute(source=source, dest=dest))
# Run the command.
try:
util.command_output(opts)
except (OSError, subprocess.CalledProcessError) as exc:
self._log.debug(u'truncate failed: {0}', exc)
util.remove(dest)
return
self._log.info(u'truncate encoding {0}', util.displayable_path(source))
return dest
def analyze(self, item):
"""Upload the item to the EchoNest for analysis. May require to
convert the item to a supported media format.
"""
prepared = self.prepare_upload(item)
if not prepared:
self._log.debug(u'could not prepare file for upload')
return
source, tmp = prepared
self._log.info(u'uploading file, please be patient')
track = self._echofun(pyechonest.track.track_from_filename,
filename=source)
if tmp is not None:
util.remove(tmp)
if not track:
self._log.debug(u'failed to upload file')
return
# Sometimes we have a track but no song. I guess this happens for
# new / unverified songs. We need to "extract" the audio_summary
# from the track object manually. I don't know why the
# pyechonest API handles tracks (merge audio_summary to __dict__)
# and songs (keep audio_summary in an extra attribute)
# differently.
# Maybe a patch for pyechonest could help?
# First get the (limited) metadata from the track in case
# there's no associated song.
from_track = {}
for key in ATTRIBUTES:
try:
from_track[key] = getattr(track, key)
except AttributeError:
pass
from_track['duration'] = track.duration
# Try to look up a song for the full metadata.
try:
song_id = track.song_id
except AttributeError:
return from_track
songs = self._echofun(pyechonest.song.profile,
ids=[song_id], track_ids=[track.id],
buckets=['audio_summary'])
if songs:
pick = self._pick_song(songs, item)
if pick:
return self._flatten_song(pick)
return from_track # Fall back to track metadata.
# Shared top-level logic.
def fetch_song(self, item):
"""Try all methods to get a matching song object from the
EchoNest. If no method succeeds, return None.
"""
# There are four different ways to get a song. Each method is a
# callable that takes the Item as an argument.
methods = [self.profile, self.search]
if self.config['upload']:
methods.append(self.analyze)
# Try each method in turn.
for method in methods:
song = method(item)
if song:
self._log.debug(u'got song through {0}: {1} - {2} [{3}]',
method.__name__,
item.artist,
item.title,
song.get('duration'),
)
return song
def apply_metadata(self, item, values, write=False):
"""Copy the metadata from the dictionary of song information to
the item.
"""
# Update each field.
for k, v in values.iteritems():
if k in ATTRIBUTES:
field = ATTRIBUTES[k]
self._log.debug(u'metadata: {0} = {1}', field, v)
if field == 'bpm':
item[field] = int(v)
else:
item[field] = v
if 'key' in values and 'mode' in values:
key = MUSICAL_SCALE[values['key'] - 1]
if values['mode'] == 0: # Minor key
key += 'm'
item['initial_key'] = key
if 'id' in values:
enid = values['id']
self._log.debug(u'metadata: {0} = {1}', ID_KEY, enid)
item[ID_KEY] = enid
# Write and save.
if write:
item.try_write()
item.store()
# Automatic (on-import) metadata fetching.
def imported(self, session, task):
"""Import pipeline stage.
"""
for item in task.imported_items():
song = self.fetch_song(item)
if song:
self.apply_metadata(item, song)
# Explicit command invocation.
def requires_update(self, item):
"""Check if this item requires an update from the EchoNest (its
data is missing).
"""
for field in ATTRIBUTES.values():
if not item.get(field):
return True
self._log.info(u'no update required')
return False
def commands(self):
fetch_cmd = ui.Subcommand('echonest',
help='Fetch metadata from the EchoNest')
fetch_cmd.parser.add_option(
'-f', '--force', dest='force', action='store_true', default=False,
help='(re-)download information from the EchoNest'
)
def fetch_func(lib, opts, args):
self.config.set_args(opts)
write = config['import']['write'].get(bool)
for item in lib.items(ui.decargs(args)):
self._log.info(u'{0} - {1}', item.artist, item.title)
if self.config['force'] or self.requires_update(item):
song = self.fetch_song(item)
if song:
self.apply_metadata(item, song, write)
fetch_cmd.func = fetch_func
sim_cmd = ui.Subcommand('echosim', help='show related files')
sim_cmd.parser.add_option(
'-t', '--threshold', dest='threshold', action='store',
type='float', default=0.15, help='Set difference threshold'
)
sim_cmd.parser.add_option(
'-f', '--format', action='store', default='${difference}: ${path}',
help='print with custom format'
)
def sim_func(lib, opts, args):
self.config.set_args(opts)
for item in lib.items(ui.decargs(args)):
similar(lib, item, opts.threshold, opts.format)
sim_cmd.func = sim_func
return [fetch_cmd, sim_cmd]
| mit | -5,006,644,330,438,449,000 | 33.527054 | 79 | 0.546346 | false |
MicheleMaris/grasp_lib | stokesCubeMap.py | 1 | 16416 | VERSION='V 1.4 - 2014 Jun 4 - '
from grid2d import MapGrid
class stokesCubeMap :
def __init__(self,*Arg) :
arm_alias={'x':'S','y':'M'}
self._nameRIMO=None
self._angularCut=None
self._Nsamples=-1
self.File = []
self.Component = []
self.Instrument = []
self.Channel = []
self.Horn = []
self.Arm = []
self.FreqCode = []
self.Version = []
self.FreqMHz = []
self.Polarization = []
self.Beamdata=[]
self.BeamdataQ=[]
self.BeamdataU=[]
self.BeamdataV=[]
self.header={}
if len(Arg) < 2 :
self._arm=None
self._LstFileName=None
return
self._arm=Arg[0]
self._LstFileName=Arg[1]
for k in open(self._LstFileName,'r') :
kk=k.split('/')[-1].split('.')[0].split('_')
if kk[4]==self._arm :
self.File.append(k.split('.')[0]+'.stokes')
self.Component.append(kk[0])
self.Instrument.append(kk[1])
self.Channel.append(kk[2])
self.Horn.append(kk[3])
self.Arm.append(kk[4])
self.FreqCode.append(kk[5])
self.Version.append(kk[6])
self.FreqMHz.append(float(kk[5][1:]))
self.Polarization.append(arm_alias[kk[4]])
for k in self.keys() : self.__dict__[k]=np.array(self.__dict__[k])
def fill_from_fits(self) :
import numpy as np
self.Beamdata=[]
self.BeamdataQ=[]
self.BeamdataU=[]
self.BeamdataV=[]
self.header={}
isFirst=True
for k in self.File :
print k
p,x,b,q,u,v = self.get_fits(k)
self.Beamdata.append(b)
self.BeamdataQ.append(q)
self.BeamdataU.append(u)
self.BeamdataV.append(v)
if isFirst :
self.header={'p':p,'x':x}
isFirst=False
for k in ['','Q','U','V'] :
nn='Beamdata'+k
self.__dict__[nn]=np.array(self.__dict__[nn]).transpose()
self._Nsamples=self.Beamdata.shape[0]
def fitsType(self) :
if self.header['x'].has_key('HIERARCH Nx') :
return 'grd'
else :
return 'cut'
def getGeometry(self) :
import numpy as np
if self.fitsType()=='grd' :
return
else :
geom={}
for k in ['objType','Ntheta','Nphi','Mintheta','Maxtheta'] :
geom[k]=self.header['x']['HIERARCH '+k]
geom['colat']=np.rad2deg(np.arange(geom['Ntheta'])*(geom['Maxtheta']-geom['Mintheta'])/float(geom['Ntheta']-1)+geom['Mintheta'])
geom['long']=np.arange(geom['Nphi'])/float(geom['Nphi']-1)*360.
return geom
def apply_angularCut(self,angularCut) :
import numpy as np
if self.fitsType()=='grd' : return
if angularCut == None : return
self._angularCut=angularCut
gg=self.getGeometry()
idx=np.where(gg['colat']<self._angularCut)[0].max()
imax=idx*gg['Nphi']+gg['Nphi']
self.Beamdata[:imax,:]=0
self.BeamdataQ[:imax,:]=0
self.BeamdataU[:imax,:]=0
self.BeamdataV[:imax,:]=0
def __len__(self) :
return len(self.FreqMHz)
def __getitem__(self,this) :
return self.__dict__[this]
def keys(self) :
l=[]
for k in self.__dict__.keys() :
if k[0]!='_' : l.append(k)
return l
def copy(self) :
import copy
return copy.deepcopy(self)
def argslice(self,idx) :
out=self.copy()
for k in self.keys() :
out.__dict__[k]=self.__dict__[k][idx]
def get_fits(self,File,ihdu=1,fitsfile=None) :
import pyfits
if fitsfile == None :
t=pyfits.open(File)
else :
t=fitsfile
p=t[0].header
x=t[hdu].header
b=t[hdu].data.field('Beamdata')
q=t[hdu].data.field('BeamdataQ')
u=t[hdu].data.field('BeamdataU')
v=t[hdu].data.field('BeamdataV')
if fitsfile == None :
t.close()
return p,x,b,q,u,v
def _template(self) :
import numpy as np
return self.Beamdata.shape
def isGRD(self) :
"Returns True if the file is a GRD"
return self.header['x'].has_key('HIERARCH Nx')
def Nelements(self) :
return self.header['x']['NAXIS2']
def NrowsNcols(self) :
"returns the number of rows and cols"
if self.isGRD() :
return (self.header['x']['HIERARCH Ny'],self.header['x']['HIERARCH Nx'])
return (self.header['x']['HIERARCH Ntheta'],self.header['x']['HIERARCH Nphi'])
def reshape2d(self) :
"reshape to 2d the matrices"
for k in self.keys() :
self.__dict__[k].shape=self.NrowsNcols()
def reshape1d(self) :
"reshape to 1d the matrices"
for k in self.keys() :
self.__dict__[k].shape=self.__dict__[k].size
def rows_cols_idx(self) :
"returns the maps of rows and cols index"
import numpy as np
nr,nc=self.NrowsNcols()
row=np.zeros([nr,nc])
for k in range(nr) : row[k,:]=k
row.shape=self.Nelements()
col=np.zeros([nr,nc])
for k in range(nc) : row[:,k]=k
col.shape=self.Nelements()
return row,col
def rows_cols_values(self) :
"returns the maps of rows and cols values"
import numpy as np
nr,nc=self.NrowsNcols()
row=np.zeros([nr,nc])
for k in range(nr) : row[k,:]=k
row.shape=self.Nelements()
col=np.zeros([nr,nc])
for k in range(nc) : row[:,k]=k
col.shape=self.Nelements()
return row,col
def interp(self,idx,FreqMHz) :
import numpy as np
b=np.interp(FreqMHz,self.FreqMHz,self.Beamdata[idx])
q=np.interp(FreqMHz,self.FreqMHz,self.BeamdataQ[idx])
u=np.interp(FreqMHz,self.FreqMHz,self.BeamdataU[idx])
v=np.interp(FreqMHz,self.FreqMHz,self.BeamdataV[idx])
return b,q,u,v
def resample(self,FreqMHz) :
import copy
import numpy as np
out=stokesCubeMap()
out.header=self.header
out.FreqMHz=FreqMHz
out._Nsamples=self._Nsamples
out.Beamdata = np.zeros([self._Nsamples,len(out.FreqMHz)])
out.BeamdataQ = np.zeros([self._Nsamples,len(out.FreqMHz)])
out.BeamdataU = np.zeros([self._Nsamples,len(out.FreqMHz)])
out.BeamdataV = np.zeros([self._Nsamples,len(out.FreqMHz)])
for ii in range(self._Nsamples) :
b,q,u,v=self.interp(ii,out.FreqMHz)
out.Beamdata[ii]=b
out.BeamdataQ[ii]=q
out.BeamdataU[ii]=u
out.BeamdataV[ii]=v
return out
def average(self,FreqMHz,Weight,Method=None,nameRIMO=None) :
import numpy as np
import time
import copy
out=stokesCubeMap()
out.File=copy.deepcopy(self.File)
out._angularCut=self._angularCut
out.header=self.header
out.Beamdata = np.zeros([self._Nsamples])
out.BeamdataQ = np.zeros([self._Nsamples])
out.BeamdataU = np.zeros([self._Nsamples])
out.BeamdataV = np.zeros([self._Nsamples])
out._Nsamples=0
dw=(FreqMHz[1:]-FreqMHz[:-1])*0.5
out._Norm=((Weight[1:]+Weight[:-1])*dw).sum()
out._Method=Method
tic=time.time()
for ii in range(self._Nsamples) :
b,q,u,v=self.interp(ii,FreqMHz)
xx=b*Weight ; out.Beamdata[ii] = ((xx[1:]+xx[:-1])*dw).sum()/out._Norm
xx=q*Weight ; out.BeamdataQ[ii] = ((xx[1:]+xx[:-1])*dw).sum()/out._Norm
xx=u*Weight ; out.BeamdataU[ii] = ((xx[1:]+xx[:-1])*dw).sum()/out._Norm
xx=v*Weight ; out.BeamdataV[ii] = ((xx[1:]+xx[:-1])*dw).sum()/out._Norm
out._elapsed_time=time.time()-tic
out._Method=Method
out._nameRIMO=nameRIMO
return out
def tofits(self,fitsname,Author='M.Maris',creator='',version='',doNotWrite=False,clobber=True) :
"saves the file in fits"
import numpy as np
from collections import OrderedDict
import pyfits
from SmartTable import dict2fits
import time
import copy
#o=pyfits.open(fitsname)
out = OrderedDict()
out['Beamdata']=np.array(self['Beamdata'],dtype='float32')
out['BeamdataQ']=np.array(self['BeamdataQ'],dtype='float32')
out['BeamdataU']=np.array(self['BeamdataU'],dtype='float32')
out['BeamdataV']=np.array(self['BeamdataV'],dtype='float32')
T=dict2fits.Table(out)
#T.header=copy.deepcopy(self.header['x'])
T.header.update('TUNIT1',self.header['x']['TUNIT1'])
T.header.update('TUNIT2',self.header['x']['TUNIT2'])
T.header.update('TUNIT3',self.header['x']['TUNIT3'])
T.header.update('TUNIT4',self.header['x']['TUNIT4'])
#
#test wether the original file was a GRD or a CUT file
print ' copying Header'
if self.header['x'].has_key('HIERARCH Nx') :
# a grd file
for k in ['objType','Nx','Ny','Xcentre','Ycentre','Xdelta','Ydelta'] :
T.header.update('HIERARCH '+k,self.header['x']['HIERARCH '+k])
print " %s : '%s' in '%s'"%(k,self.header['x']['HIERARCH '+k],T.header['HIERARCH '+k])
else :
# a CUT file
for k in ['objType','Ntheta','Nphi','Mintheta','Maxtheta'] :
T.header.update('HIERARCH '+k,self.header['x']['HIERARCH '+k])
print " %s : '%s' in '%s'"%(k,self.header['x']['HIERARCH '+k],T.header['HIERARCH '+k])
print
T.header.update('HIERARCH SUM BEAMDATA',self['Beamdata'].sum(),'sum of Beamdata')
T.header.update('HIERARCH SUM BEAMDATAQ',self['BeamdataQ'].sum(),'sum of BeamdataQ')
T.header.update('HIERARCH SUM BEAMDATAU',self['BeamdataU'].sum(),'sum of BeamdataU')
T.header.update('HIERARCH SUM BEAMDATAV',self['BeamdataV'].sum(),'sum of BeamdataV')
T.header.update('HIERARCH RIMO',self._nameRIMO if self._nameRIMO != None else '','')
T.header.update('HIERARCH ANGULARCUT',self._angularCut if self._angularCut != None else 'None','angular cut [deg]')
T.header.update('HIERARCH DATE',time.asctime(),'')
T.header.update('HIERARCH CREATOR',creator,'')
T.header.update('HIERARCH CONTACT',Author,'')
if self._Method != None :
for k in self._Method :
T.header.update('HIERARCH '+k[0],k[1],k[2])
T.header.add_comment('')
T.header.add_comment('Beam band averaged')
T.header.add_comment('Follows the list of input files used')
for i in range(len(self.File)) :
l='%s'%(self.File[i])
T.header.add_comment(l)
T.header.add_comment('')
if not doNotWrite :
print " Writing to:",fitsname
T.writeto(fitsname,clobber=clobber)
return T
class stokesMap(MapGrid) :
def __init__(self,StokesFileName,mode='readonly') :
from grid2d import GridAxis
self.__new_info__()
self.__info__['fits_readout_time_sec']=-1.
if StokesFileName==None : return
# connects to the fits file
self.fits_connect(StokesFileName,mode=mode)
# gets the header of the first hdu
self.fits_load_hdu(1,justHeader=True)
# formats the mapgrid according to the header content
self.__info__['geometry']=self.getGeometry()
if self.isGRD() :
MapGrid.__init__(self,GridAxis('y','',self.__info__['geometry']['y']),GridAxis('x','',self.__info__['geometry']['x']))
else :
MapGrid.__init__(self,GridAxis('long','deg',self.__info__['geometry']['long']),GridAxis('colat','deg',self.__info__['geometry']['colat']))
# gets the first hdu
self.fits_load_hdu(1)
def __new_info__(self) :
from collections import OrderedDict
self.__info__=OrderedDict()
def fits_info(self) :
"return infos on the fits file"
if self.__info__['fitsfile']==None :
return "File not connected"
return self.__info__['fitsfile'].info()
def fits_primary_header(self) :
"returns the current fits primary header content"
return self.__info__['header']['p']
def fits_extended_header(self) :
"returns the current fits extended header content"
return self.__info__['header']['x']
def fits_connect(self,FileName,mode='readonly') :
"connect (open and keep control) to a fits file"
import pyfits
self.__info__['StokesFileName']=FileName
self.__info__['fitsfile']=pyfits.open(FileName,mode)
self.__info__['ihdu']=-1
self.__info__['header']={'p':None,'x':None}
if self.__info__['fitsfile']==None :
print "Error: file %s not found"%StokesFileName
MapGrid.__init__(self)
return
self.__info__['header']['p']=self.__info__['fitsfile'][0].header
def fits_unconnect(self) :
"unconnect (close and left control) the fits file"
if self.__info__['fitsfile']==None :
print "File not connected"
self.__info__['fitsfile'].close()
self.__info__['fitsfile']=None
def fits_load_hdu(self,ihdu,justHeader=False) :
"""fits_load_hdu(ihdu)
Load a fits hdu ihdu.
If succesfull returns (ihdu, fits_readout_time_sec)"""
from grid2d import GridAxis
import time
self.__info__['fits_readout_time_sec']=-1.
if self.__info__['fitsfile']==None :
print "File not connected"
try :
x=self.__info__['fitsfile'][ihdu].header
except :
print "hdu : ",ihdu," does not exists"
return
self.__info__['header']['x']=x
self.__info__['ihdu']=ihdu
if justHeader : return
tic=time.time()
b=self.__info__['fitsfile'][ihdu].data.field('Beamdata') ; b.shape=(self.R['n'],self.C['n']) ; self.newmap('Beamdata',value=b)
q=self.__info__['fitsfile'][ihdu].data.field('BeamdataQ') ; q.shape=(self.R['n'],self.C['n']) ; self.newmap('BeamdataQ',value=q)
u=self.__info__['fitsfile'][ihdu].data.field('BeamdataU') ; u.shape=(self.R['n'],self.C['n']) ; self.newmap('BeamdataU',value=u)
v=self.__info__['fitsfile'][ihdu].data.field('BeamdataV') ; v.shape=(self.R['n'],self.C['n']) ; self.newmap('BeamdataV',value=v)
self.__info__['fits_readout_time_sec']=time.time()-tic
return self.__info__['ihdu'],self.__info__['fits_readout_time_sec']
def copy(self,skipFields=None,skipFits=True) :
"makes a copy, without fits informations"
import copy
out=stokesMap(None)
for k in self.__dict__.keys() :
if k !='__info__' :
out.__dict__[k]=copy.deepcopy(self.__dict__[k])
else :
out.__new_info__()
for k1 in self.__dict__[k].keys() :
if (k1+' ')[0:4].lower()!='fits' :
out.__dict__[k][k1]=copy.deepcopy(self.__dict__[k][k1])
return out
def fitsType(self) :
if self.__info__['header']['x'].has_key('HIERARCH Nx') :
return 'grd'
else :
return 'cut'
def isGRD(self) :
"Returns True if the file is a GRD"
return self.__info__['header']['x'].has_key('HIERARCH Nx')
def getGeometry(self) :
import numpy as np
geom={}
if self.fitsType()=='grd' :
for k in ['objType','Nx','Ny','Xcentre','Ycentre','Xdelta','Ydelta'] :
geom[k]=self.__info__['header']['x']['HIERARCH '+k]
geom['x']=(np.arange(geom['Nx'])-geom['Xcentre'])*geom['Xdelta']
geom['y']=(np.arange(geom['Ny'])-geom['Ycentre'])*geom['Ydelta']
else :
for k in ['objType','Ntheta','Nphi','Mintheta','Maxtheta'] :
geom[k]=self.__info__['header']['x']['HIERARCH '+k]
geom['colat']=np.rad2deg(np.arange(geom['Ntheta'])*(geom['Maxtheta']-geom['Mintheta'])/float(geom['Ntheta']-1)+geom['Mintheta'])
geom['long']=np.arange(geom['Nphi'])/float(geom['Nphi']-1)*360.
return geom
def coadd(self,that) :
for k in ['Beamdata','BeamdataQ','BeamdataU','BeamdataV'] :
self[k]+=that[k]
def scale(self,that) :
for k in ['Beamdata','BeamdataQ','BeamdataU','BeamdataV'] :
self[k]*=that
def __sub__(self,that) :
new=self.copy()
for k in ['Beamdata','BeamdataQ','BeamdataU','BeamdataV'] :
try :
new[k]-=that[k]
except :
new[k]-=that
return new
def __add__(self,that) :
new=self.copy()
for k in ['Beamdata','BeamdataQ','BeamdataU','BeamdataV'] :
try :
new[k]+=that[k]
except :
new[k]+=that
return new
def __pow__(self,that) :
new=self.copy()
for k in ['Beamdata','BeamdataQ','BeamdataU','BeamdataV'] :
new[k]=new[k]**that
return new
| gpl-2.0 | -5,566,125,991,815,063,000 | 38.181384 | 147 | 0.569262 | false |
hannorein/rebound | update_version.py | 1 | 1912 | #!/usr/bin/python
# This script automatically creates a list of examples by reading the header in all problem.c files.
import glob
import subprocess
ghash = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("ascii").strip()
with open("version.txt") as f:
reboundversion = f.readlines()[0].strip()
print("Updating version to "+reboundversion)
with open("README.md") as f:
readme = f.readlines()
keep_lines_after_header = 5
with open("README.md","w") as f:
start_delete = -1
for i in range(0,len(readme)):
# [](https://rebound.readthedocs.org)
if "![Version]" in readme[i]:
readme[i] = "[](https://rebound.readthedocs.org)\n"
f.write(readme[i])
with open("src/rebound.c") as f:
reboundlines = f.readlines()
for i,l in enumerate(reboundlines):
if "**VERSIONLINE**" in l:
reboundlines[i] = "const char* reb_version_str = \""+reboundversion+"\"; // **VERSIONLINE** This line gets updated automatically. Do not edit manually.\n"
with open("src/rebound.c", "w") as f:
f.writelines(reboundlines)
with open("setup.py") as f:
setuplines = f.readlines()
for i,l in enumerate(setuplines):
if "version='" in l:
setuplines[i] = " version='"+reboundversion+"',\n"
if "GITHASHAUTOUPDATE" in l:
setuplines[i] = " ghash_arg = \"-DGITHASH="+ghash+"\" #GITHASHAUTOUPDATE\n"
with open("setup.py", "w") as f:
f.writelines(setuplines)
shortversion = reboundversion
while shortversion[-1] != '.':
shortversion = shortversion[:-1]
shortversion = shortversion[:-1]
print("To commit, copy and paste:")
print("\ngit commit -a -m \"Updating version to "+reboundversion+"\"")
| gpl-3.0 | 159,939,837,923,048,500 | 36.490196 | 174 | 0.638598 | false |
neuRowsATL/animatLabSimulationAPI | class_chartViz.py | 1 | 5103 | """
Created by: Bryce Chung
Last modified: January 4, 2016
"""
import matplotlib.pyplot as plt
plt.ion()
global verbose
verbose = 3
class chartViz(object):
"""
This class is used to visualize chartData objects.
"""
def __init__(self):
self.data = {}
self.fig = None
self.axes = {}
self.arrange = None
self.chartFormat = None
self.title = ''
self.titleFormat = {}
def add_data(self, name, objChartData):
if objChartData not in self.data:
self.data[name] = objChartData.data
def set_arrange(self, arrange):
self.arrange = arrange
def set_format(self, chartFormat):
self.chartFormat = chartFormat
def set_title(self, title, titleFormat = {}):
self.title = title
self.titleFormat = titleFormat
def make_chart(self, hide=['Time']):
self.fig = plt.figure(figsize=(24,18))
self.fig.suptitle(self.title, **self.titleFormat)
axShare = None
if self.arrange is None:
axLen = 1
for dAxis in self.data:
axLen += len(np.where(np.array(self.data[dAxis].keys()) <> 'Time')[0])
i=1
for dAxis in self.data:
print "\n"
for d in dAxis.keys():
if d in hide:
continue
if verbose > 1:
print "Charting: %s" % d
print "Shared:"
print axShare
if len(self.axes) > 0:
ax = self.fig.add_subplot(axLen, 1, i, sharex=axShare)
else:
ax = self.fig.add_subplot(axLen, 1, i)
axShare = ax
if dAxis[d].datatype == 'analog':
ax.plot(dAxis['Time'].data, dAxis[d].data, 'b-')
elif dAxis[d].datatype == 'spike':
for spike in dAxis[d].data:
ax.axvline(spike, color='g')
ax.yaxis.set_ticklabels([])
if i < axLen:
ax.xaxis.set_ticklabels([])
self.axes[d] = ax
i += 1
else:
for ix, axis in enumerate(self.arrange):
print "\n"
if len(self.axes) > 0:
ax = self.fig.add_subplot(len(self.arrange), 1, ix+1, sharex=axShare)
print "Sharing axis: %s" % str(axShare)
else:
ax = self.fig.add_subplot(len(self.arrange), 1, ix+1)
print "No shared axis"
axShare = ax
for ix, chart in enumerate(self.arrange[axis]['charts']):
if chart.split('.')[1:] in hide:
continue
if verbose > 1:
print "Charting: %s" % chart
#print "Shared:"
#print axShare
color = 'k'
kwargs = {}
if chart in self.chartFormat.keys():
formatting = self.chartFormat[chart]
if 'color' in formatting.keys():
kwargs['color'] = self.chartFormat[chart]['color']
if verbose > 1:
print "Charting: %s" % chart
print kwargs
strDataObj = chart.split('.')[0]
strChart = ''.join(chart.split('.')[1:])
data = self.data[strDataObj]
if data[strChart]['datatype'] == 'analog':
ax.plot(data['Time']['data'], data[strChart]['data'], **kwargs)
elif data[strChart]['datatype'] == 'spike':
if len(self.arrange[axis]['charts']) > 1:
height = 1./len(self.arrange[axis]['charts'])
else:
height = 1
for spike in data[strChart]['data']:
ax.axvline(spike, ymin=ix*height, ymax=(ix+1)*height-height*0.1, **kwargs)
ax.yaxis.set_ticklabels([])
ax.set_ylabel(self.arrange[axis]['name'])
if ix+1 < len(self.arrange):
ax.xaxis.set_ticklabels([])
self.axes[axis] = ax
| gpl-2.0 | 2,067,055,089,075,999,700 | 35.45 | 102 | 0.386047 | false |
cloughrm/Flask-Angular-Template | backend/pastry/models.py | 1 | 2070 | import random
import hashlib
from flask import current_app as app
from pastry.db import mongo
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer, SignatureExpired, BadSignature
class User(object):
def __init__(self, username, password):
self.set_args(
username=username,
password=generate_password_hash(password)
)
def set_args(self, **kwargs):
self.username = kwargs.get('username')
self.password = kwargs.get('password')
def create(self):
object_id = mongo.db.users.insert({
'username': self.username,
'password': self.password,
'api_key': self.generate_api_key(),
'admin': False,
'groups': ['user'],
'verified': False,
})
return object_id
def exists(self):
user = mongo.db.users.find_one({'username': self.username})
if user:
self.set_args(**user)
return True
def generate_auth_token(self, expires_in=86400):
s = TimedJSONWebSignatureSerializer(app.config.get('SECRET_KEY'), expires_in=expires_in)
token = s.dumps({'username': self.username})
return token
def generate_api_key(self):
return hashlib.md5(str(random.getrandbits(256))).hexdigest()
def verify_password(self, password):
return check_password_hash(self.password, password)
@staticmethod
def verify_api_key(api_key):
return mongo.db.users.find_one({'api_key': api_key})
@staticmethod
def verify_auth_token(token):
s = TimedJSONWebSignatureSerializer(app.config.get('SECRET_KEY'))
try:
data = s.loads(token)
except SignatureExpired:
app.logger.info('Expired Token')
return False
except BadSignature:
app.logger.warning('Invalid Token')
return False
user = mongo.db.users.find_one({'username': data['username']})
return user
| mit | -8,695,304,718,168,323,000 | 30.363636 | 96 | 0.617391 | false |
dmitryfizteh/MachineLearning | Week_1/Task_1/titanic.py | 1 | 2465 | import pandas
from scipy.stats.stats import pearsonr
print("Неделя №1. Задание №1")
# Функция вывода ответа
def write_result(result, index):
print("Ответ: " + str(result))
file = open("./Answers/" + str(index) + ".txt", "w")
file.write(result)
file.close()
data = pandas.read_csv('./Data/titanic.csv', index_col='PassengerId')
print("\nРешение задачи №1")
a1 = data['Sex'].value_counts()
result = str("%d %d" % (a1['male'],a1['female']))
write_result(result, 1)
print("\nРешение задачи №2")
a2 = data['Survived'].value_counts()
#print("%d погибло, %d выжило" % (a2[0],a2[1]))
result = str("%.2f" % (round(a2[1]/(a2[0]+a2[1])*100,2)))
write_result(result, 2)
print("\nРешение задачи №3")
a3 = data['Pclass'].value_counts()
result = str("%.2f" % (round(a3[1]/(a3[1]+a3[2]+a3[3])*100,2)))
write_result(result, 3)
print("\nРешение задачи №4")
a4_1 = (data['Age'].dropna()).mean()
a4_2 = (data['Age'].dropna()).median()
result = str("%0.2f %0.2f" % (a4_1, a4_2))
write_result(result, 4)
print("\nРешение задачи №5")
a5 = pearsonr(data['SibSp'], data['Parch'])
#print('Коэффициент корреляции r= %0.2f, уровень значимости p = %0.3f.' % a5)
result = str("%0.2f" % a5[0])
write_result(result, 5)
print("\nРешение задачи №6")
a6 = data[data['Sex'] == "female"]
a6 = a6['Name']
names = list()
for (key,value) in enumerate(a6):
value = value.replace("Mrs. ","")
value = value.replace("Miss. ","")
value = value.replace("(","")
value = value.replace(")","")
value = value.replace('"','')
value = value.split(", ")
names_i = value[0]
names.append(value[0])
for name in value[1].split(" "):
names.append(name)
# Функция поиска самого частого элемента в массиве
def Freq2(b):
d = {}
m, i = 0, 0 # Максимальная частота и индекс в словаре
for x in b: # Пробегаем в цикле исходный массив
d[x] = d[x] + 1 if x in d else 1 # Если ключ уже есть, прибавляем 1, если нет, записываем 1
if d[x] > m:
m, i = d[x], x # Запоминаем максимум и его индекс
#return {i:m}
return i
result = str("%s" % (Freq2(names)))
write_result(result, 6)
| gpl-3.0 | -2,582,688,218,459,481,000 | 26.376623 | 95 | 0.610057 | false |
eladkarakuli/anyway | process.py | 1 | 10121 | # -*- coding: utf-8 -*-
from __future__ import print_function
import glob
import os
import argparse
import json
from flask.ext.sqlalchemy import SQLAlchemy
import field_names
from models import Marker
import models
from utilities import ProgressSpinner, ItmToWGS84, init_flask, CsvReader
import itertools
import localization
import re
from datetime import datetime
directories_not_processes = {}
progress_wheel = ProgressSpinner()
content_encoding = 'cp1255'
accident_type_regex = re.compile("Accidents Type (?P<type>\d)")
ACCIDENTS = 'accidents'
CITIES = 'cities'
STREETS = 'streets'
ROADS = "roads"
URBAN_INTERSECTION = 'urban_intersection'
NON_URBAN_INTERSECTION = 'non_urban_intersection'
DICTIONARY = "dictionary"
lms_files = {ACCIDENTS: "AccData.csv",
URBAN_INTERSECTION: "IntersectUrban.csv",
NON_URBAN_INTERSECTION: "IntersectNonUrban.csv",
STREETS: "DicStreets.csv",
DICTIONARY: "Dictionary.csv",
}
coordinates_converter = ItmToWGS84()
app = init_flask(__name__)
db = SQLAlchemy(app)
def get_street(settlement_sign, street_sign, streets):
"""
extracts the street name using the settlement id and street id
"""
if settlement_sign not in streets:
return None
street_name = [x[field_names.street_name].decode(content_encoding) for x in streets[settlement_sign] if
x[field_names.street_sign] == street_sign]
# there should be only one street name, or none if it wasn't found.
return street_name[0] if len(street_name) == 1 else None
def get_address(accident, streets):
"""
extracts the address of the main street.
tries to build the full address: <street_name> <street_number>, <settlement>,
but might return a partial one if unsuccessful.
"""
street = get_street(accident[field_names.settlement_sign], accident[field_names.street1], streets)
if not street:
return u""
# the home field is invalid if it's empty or if it contains 9999
home = accident[field_names.home] if accident[field_names.home] != 9999 else None
settlement = localization.get_city_name(accident[field_names.settlement_sign])
if not home and not settlement:
return street
if not home and settlement:
return u"{}, {}".format(street, settlement)
if home and not settlement:
return u"{} {}".format(street, home)
return u"{} {}, {}".format(street, home, settlement)
def get_streets(accident, streets):
"""
extracts the streets the accident occurred in.
every accident has a main street and a secondary street.
:return: a tuple containing both streets.
"""
main_street = get_address(accident, streets)
secondary_street = get_street(accident[field_names.settlement_sign], accident[field_names.street2], streets)
return main_street, secondary_street
def get_junction(accident, roads):
"""
extracts the junction from an accident
:return: returns the junction or None if it wasn't found
"""
key = accident[field_names.road1], accident[field_names.road2]
junction = roads.get(key, None)
return junction.decode(content_encoding) if junction else None
def parse_date(accident):
"""
parses an accident's date
"""
year = accident[field_names.accident_year]
month = accident[field_names.accident_month]
day = accident[field_names.accident_day]
hour = accident[field_names.accident_hour] % 24
accident_date = datetime(year, month, day, hour, 0, 0)
return accident_date
def load_extra_data(accident, streets, roads):
"""
loads more data about the accident
:return: a dictionary containing all the extra fields and their values
:rtype: dict
"""
extra_fields = {}
# if the accident occurred in an urban setting
if bool(accident[field_names.urban_intersection]):
main_street, secondary_street = get_streets(accident, streets)
if main_street:
extra_fields[field_names.street1] = main_street
if secondary_street:
extra_fields[field_names.street2] = secondary_street
# if the accident occurred in a non urban setting (highway, etc')
if bool(accident[field_names.non_urban_intersection]):
junction = get_junction(accident, roads)
if junction:
extra_fields[field_names.junction_name] = junction
# localize static accident values
for field in localization.get_supported_tables():
if accident[field]:
# if we have a localized field for that particular field, save the field value
# it will be fetched we deserialized
if localization.get_field(field, accident[field]):
extra_fields[field] = accident[field]
return extra_fields
def import_accidents(provider_code, accidents, streets, roads):
print("reading accidents from file %s" % (accidents.name(),))
for accident in accidents:
if field_names.x_coordinate not in accident or field_names.y_coordinate not in accident:
raise ValueError("x and y coordinates are missing from the accidents file!")
if not accident[field_names.x_coordinate] or not accident[field_names.y_coordinate]:
continue
lng, lat = coordinates_converter.convert(accident[field_names.x_coordinate], accident[field_names.y_coordinate])
marker = {
"id":int("{0}{1}".format(provider_code, accident[field_names.id])),
"title":"Accident",
"description":json.dumps(load_extra_data(accident, streets, roads), encoding=models.db_encoding),
"address":get_address(accident, streets),
"latitude":lat,
"longitude":lng,
"type":Marker.MARKER_TYPE_ACCIDENT,
"subtype":int(accident[field_names.accident_type]),
"severity":int(accident[field_names.accident_severity]),
"created":parse_date(accident),
"locationAccuracy":int(accident[field_names.igun])
}
yield marker
accidents.close()
def get_files(directory):
for name, filename in lms_files.iteritems():
if name not in [STREETS, NON_URBAN_INTERSECTION, ACCIDENTS]:
continue
files = filter(lambda path: filename.lower() in path.lower(), os.listdir(directory))
amount = len(files)
if amount == 0:
raise ValueError(
"file doesn't exist directory, cannot parse it; directory: {0};filename: {1}".format(directory,
filename))
if amount > 1:
raise ValueError("there are too many files in the directory, cannot parse!;directory: {0};filename: {1}"
.format(directory, filename))
csv = CsvReader(os.path.join(directory, files[0]))
if name == STREETS:
streets_map = {}
for settlement in itertools.groupby(csv, lambda street: street.get(field_names.settlement, "OTHER")):
key, val = tuple(settlement)
streets_map[key] = [{field_names.street_sign: x[field_names.street_sign],
field_names.street_name: x[field_names.street_name]} for x in val if
field_names.street_name in x and field_names.street_sign in x]
csv.close()
yield name, streets_map
elif name == NON_URBAN_INTERSECTION:
roads = {(x[field_names.road1], x[field_names.road2]): x[field_names.junction_name] for x in csv if
field_names.road1 in x and field_names.road2 in x}
csv.close()
yield ROADS, roads
elif name == ACCIDENTS:
yield name, csv
def import_to_datastore(directory, provider_code, batch_size):
"""
goes through all the files in a given directory, parses and commits them
"""
try:
files_from_lms = dict(get_files(directory))
if len(files_from_lms) == 0:
return
print("importing data from directory: {}".format(directory))
now = datetime.now()
accidents = list(import_accidents(provider_code=provider_code, **files_from_lms))
db.session.execute(Marker.__table__.insert(), accidents)
db.session.commit()
took = int((datetime.now() - now).total_seconds())
print("imported {0} items from directory: {1} in {2} seconds".format(len(accidents), directory, took))
except Exception as e:
directories_not_processes[directory] = e.message
def get_provider_code(directory_name=None):
if directory_name:
match = accident_type_regex.match(directory_name)
if match:
return int(match.groupdict()['type'])
ans = ""
while not ans.isdigit():
ans = raw_input("directory provider code is invalid, please enter a valid code: ")
if ans.isdigit():
return int(ans)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default="static/data/lms")
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--delete_all', dest='delete_all', action='store_true', default=True)
parser.add_argument('--provider_code', type=int)
args = parser.parse_args()
# wipe all the Markers first
if args.delete_all:
print("deleting the entire db!")
db.session.query(Marker).delete()
db.session.commit()
for directory in glob.glob("{0}/*/*".format(args.path)):
parent_directory = os.path.basename(os.path.dirname(os.path.join(os.pardir, directory)))
provider_code = args.provider_code if args.provider_code else get_provider_code(parent_directory)
import_to_datastore(directory, provider_code, args.batch_size)
failed = ["{0}: {1}".format(directory, fail_reason) for directory, fail_reason in
directories_not_processes.iteritems()]
print("finished processing all directories, except: %s" % "\n".join(failed))
if __name__ == "__main__":
main() | bsd-3-clause | -1,322,978,184,679,433,700 | 37.196226 | 120 | 0.644106 | false |
kylbarnes/blox | deploy/demo-cli/blox-create-environment.py | 1 | 2812 | #!/usr/bin/env python
import json, os, sys
import common
def main(argv):
# Command Line Arguments
args = [{'arg':'--apigateway', 'dest':'apigateway', 'default':None, 'type':'boolean', 'help':'Call API Gateway endpoint'}]
if '--apigateway' in argv:
args.extend([{'arg':'--stack', 'dest':'stack', 'default':None, 'help':'CloudFormation stack name'}])
else:
args.extend([{'arg':'--host', 'dest':'host', 'default':'localhost:2000', 'help':'Blox Scheduler <Host>:<Port>'}])
args.extend([{'arg':'--environment', 'dest':'environment', 'default':None, 'help':'Blox environment name'}])
args.extend([{'arg':'--cluster', 'dest':'cluster', 'default':None, 'help':'ECS cluster name'}])
args.extend([{'arg':'--task-definition', 'dest':'taskDef', 'default':None, 'help':'ECS task definition arn'}])
# Parse Command Line Arguments
params = common.parse_cli_args('Create Blox Environment', args)
if params.apigateway:
run_apigateway(params)
else:
run_local(params)
# Call Blox Scheduler API Gateway Endpoint
def run_apigateway(params):
command = ["cloudformation", "describe-stack-resource", "--stack-name", params.stack, "--logical-resource-id", "RestApi"]
restApi = common.run_shell_command(params.region, command)
command = ["cloudformation", "describe-stack-resource", "--stack-name", params.stack, "--logical-resource-id", "ApiResource"]
restResource = common.run_shell_command(params.region, command)
body = {'name': params.environment, 'instanceGroup': {'cluster': params.cluster}, 'taskDefinition': params.taskDef}
command = ["apigateway", "test-invoke-method", "--rest-api-id", restApi['StackResourceDetail']['PhysicalResourceId'], "--resource-id", restResource['StackResourceDetail']['PhysicalResourceId'], "--http-method", "POST", "--headers", "{}", "--path-with-query-string", "/v1/environments", "--body", json.dumps(body)]
response = common.run_shell_command(params.region, command)
print "HTTP Response Code: %d" % response['status']
try:
obj = json.loads(response['body'])
print json.dumps(obj, indent=2)
except Exception as e:
print "Error: Could not parse response - %s" % e
print json.dumps(response, indent=2)
sys.exit(1)
# Call Blox Scheduler Local Endpoint
def run_local(params):
api = common.Object()
api.method = 'POST'
api.headers = {}
api.host = params.host
api.uri = '/v1/environments'
api.queryParams = {}
api.data = {'name': params.environment, 'instanceGroup': {'cluster': params.cluster}, 'taskDefinition': params.taskDef}
response = common.call_api(api)
print "HTTP Response Code: %d" % response.status
try:
obj = json.loads(response.body)
print json.dumps(obj, indent=2)
except Exception as e:
print "Error: Could not parse response - %s" % e
print response.body
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 | 5,035,756,915,522,746,000 | 39.753623 | 314 | 0.687411 | false |
lhfei/spark-in-action | spark-2.x/src/main/python/mllib/correlations.py | 1 | 2149 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Correlations using MLlib.
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.stat import Statistics
from pyspark.mllib.util import MLUtils
if __name__ == "__main__":
if len(sys.argv) not in [1, 2]:
print("Usage: correlations (<file>)", file=sys.stderr)
sys.exit(-1)
sc = SparkContext(appName="PythonCorrelations")
if len(sys.argv) == 2:
filepath = sys.argv[1]
else:
filepath = 'data/mllib/sample_linear_regression_data.txt'
corrType = 'pearson'
points = MLUtils.loadLibSVMFile(sc, filepath)\
.map(lambda lp: LabeledPoint(lp.label, lp.features.toArray()))
print()
print('Summary of data file: ' + filepath)
print('%d data points' % points.count())
# Statistics (correlations)
print()
print('Correlation (%s) between label and each feature' % corrType)
print('Feature\tCorrelation')
numFeatures = points.take(1)[0].features.size
labelRDD = points.map(lambda lp: lp.label)
for i in range(numFeatures):
featureRDD = points.map(lambda lp: lp.features[i])
corr = Statistics.corr(labelRDD, featureRDD, corrType)
print('%d\t%g' % (i, corr))
print()
sc.stop()
| apache-2.0 | 2,444,104,559,011,618,300 | 33.229508 | 74 | 0.679851 | false |
Cito/sqlalchemy | test/orm/test_unitofwork.py | 1 | 83902 | # coding: utf-8
"""Tests unitofwork operations."""
from sqlalchemy.testing import eq_, assert_raises, assert_raises_message
import datetime
from sqlalchemy.orm import mapper as orm_mapper
import sqlalchemy as sa
from sqlalchemy.util import u, ue, b
from sqlalchemy import Integer, String, ForeignKey, literal_column, event
from sqlalchemy.testing import engines
from sqlalchemy import testing
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.schema import Column
from sqlalchemy.orm import mapper, relationship, create_session, \
column_property, Session, exc as orm_exc
from sqlalchemy.testing import fixtures
from test.orm import _fixtures
from sqlalchemy.testing.assertsql import AllOf, CompiledSQL
class UnitOfWorkTest(object):
pass
class HistoryTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
def test_backref(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
am = mapper(Address, addresses)
m = mapper(User, users, properties=dict(
addresses = relationship(am, backref='user', lazy='joined')))
session = create_session(autocommit=False)
u = User(name='u1')
a = Address(email_address='u1@e')
a.user = u
session.add(u)
eq_(u.addresses, [a])
session.commit()
session.expunge_all()
u = session.query(m).one()
assert u.addresses[0].user == u
session.close()
class UnicodeTest(fixtures.MappedTest):
__requires__ = ('unicode_connections',)
@classmethod
def define_tables(cls, metadata):
if testing.against('mysql+oursql'):
from sqlalchemy.dialects.mysql import VARCHAR
uni_type = VARCHAR(50, collation='utf8_unicode_ci')
else:
uni_type = sa.Unicode(50)
Table('uni_t1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('txt', uni_type, unique=True))
Table('uni_t2', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('txt', uni_type, ForeignKey('uni_t1')))
@classmethod
def setup_classes(cls):
class Test(cls.Basic):
pass
class Test2(cls.Basic):
pass
def test_basic(self):
Test, uni_t1 = self.classes.Test, self.tables.uni_t1
mapper(Test, uni_t1)
txt = ue("\u0160\u0110\u0106\u010c\u017d")
t1 = Test(id=1, txt=txt)
self.assert_(t1.txt == txt)
session = create_session(autocommit=False)
session.add(t1)
session.commit()
self.assert_(t1.txt == txt)
def test_relationship(self):
Test, uni_t2, uni_t1, Test2 = (self.classes.Test,
self.tables.uni_t2,
self.tables.uni_t1,
self.classes.Test2)
mapper(Test, uni_t1, properties={
't2s': relationship(Test2)})
mapper(Test2, uni_t2)
txt = ue("\u0160\u0110\u0106\u010c\u017d")
t1 = Test(txt=txt)
t1.t2s.append(Test2())
t1.t2s.append(Test2())
session = create_session(autocommit=False)
session.add(t1)
session.commit()
session.close()
session = create_session()
t1 = session.query(Test).filter_by(id=t1.id).one()
assert len(t1.t2s) == 2
class UnicodeSchemaTest(fixtures.MappedTest):
__requires__ = ('unicode_connections', 'unicode_ddl',)
run_dispose_bind = 'once'
@classmethod
def create_engine(cls):
return engines.utf8_engine()
@classmethod
def define_tables(cls, metadata):
t1 = Table('unitable1', metadata,
Column(u('méil'), Integer, primary_key=True, key='a', test_needs_autoincrement=True),
Column(ue('\u6e2c\u8a66'), Integer, key='b'),
Column('type', String(20)),
test_needs_fk=True,
test_needs_autoincrement=True)
t2 = Table(u('Unitéble2'), metadata,
Column(u('méil'), Integer, primary_key=True, key="cc", test_needs_autoincrement=True),
Column(ue('\u6e2c\u8a66'), Integer,
ForeignKey('unitable1.a'), key="d"),
Column(ue('\u6e2c\u8a66_2'), Integer, key="e"),
test_needs_fk=True,
test_needs_autoincrement=True)
cls.tables['t1'] = t1
cls.tables['t2'] = t2
@classmethod
def setup_class(cls):
super(UnicodeSchemaTest, cls).setup_class()
@classmethod
def teardown_class(cls):
super(UnicodeSchemaTest, cls).teardown_class()
@testing.fails_on('mssql+pyodbc',
'pyodbc returns a non unicode encoding of the results description.')
def test_mapping(self):
t2, t1 = self.tables.t2, self.tables.t1
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
mapper(A, t1, properties={
't2s':relationship(B)})
mapper(B, t2)
a1 = A()
b1 = B()
a1.t2s.append(b1)
session = create_session()
session.add(a1)
session.flush()
session.expunge_all()
new_a1 = session.query(A).filter(t1.c.a == a1.a).one()
assert new_a1.a == a1.a
assert new_a1.t2s[0].d == b1.d
session.expunge_all()
new_a1 = (session.query(A).options(sa.orm.joinedload('t2s')).
filter(t1.c.a == a1.a)).one()
assert new_a1.a == a1.a
assert new_a1.t2s[0].d == b1.d
session.expunge_all()
new_a1 = session.query(A).filter(A.a == a1.a).one()
assert new_a1.a == a1.a
assert new_a1.t2s[0].d == b1.d
session.expunge_all()
@testing.fails_on('mssql+pyodbc',
'pyodbc returns a non unicode encoding of the results description.')
def test_inheritance_mapping(self):
t2, t1 = self.tables.t2, self.tables.t1
class A(fixtures.ComparableEntity):
pass
class B(A):
pass
mapper(A, t1,
polymorphic_on=t1.c.type,
polymorphic_identity='a')
mapper(B, t2,
inherits=A,
polymorphic_identity='b')
a1 = A(b=5)
b1 = B(e=7)
session = create_session()
session.add_all((a1, b1))
session.flush()
session.expunge_all()
eq_([A(b=5), B(e=7)], session.query(A).all())
class BinaryHistTest(fixtures.MappedTest, testing.AssertsExecutionResults):
@classmethod
def define_tables(cls, metadata):
Table('t1', metadata,
Column('id', sa.Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', sa.LargeBinary),
)
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
def test_binary_equality(self):
Foo, t1 = self.classes.Foo, self.tables.t1
data = b("this is some data")
mapper(Foo, t1)
s = create_session()
f1 = Foo(data=data)
s.add(f1)
s.flush()
s.expire_all()
f1 = s.query(Foo).first()
assert f1.data == data
f1.data = data
eq_(
sa.orm.attributes.get_history(f1, "data"),
((), [data], ())
)
def go():
s.flush()
self.assert_sql_count(testing.db, go, 0)
class PKTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('multipk1', metadata,
Column('multi_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('multi_rev', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('value', String(100)))
Table('multipk2', metadata,
Column('pk_col_1', String(30), primary_key=True),
Column('pk_col_2', String(30), primary_key=True),
Column('data', String(30)))
Table('multipk3', metadata,
Column('pri_code', String(30), key='primary', primary_key=True),
Column('sec_code', String(30), key='secondary', primary_key=True),
Column('date_assigned', sa.Date, key='assigned', primary_key=True),
Column('data', String(30)))
@classmethod
def setup_classes(cls):
class Entry(cls.Basic):
pass
# not supported on sqlite since sqlite's auto-pk generation only works with
# single column primary keys
@testing.fails_on('sqlite', 'FIXME: unknown')
def test_primary_key(self):
Entry, multipk1 = self.classes.Entry, self.tables.multipk1
mapper(Entry, multipk1)
e = Entry(name='entry1', value='this is entry 1', multi_rev=2)
session = create_session()
session.add(e)
session.flush()
session.expunge_all()
e2 = session.query(Entry).get((e.multi_id, 2))
self.assert_(e is not e2)
state = sa.orm.attributes.instance_state(e)
state2 = sa.orm.attributes.instance_state(e2)
eq_(state.key, state2.key)
# this one works with sqlite since we are manually setting up pk values
def test_manual_pk(self):
Entry, multipk2 = self.classes.Entry, self.tables.multipk2
mapper(Entry, multipk2)
e = Entry(pk_col_1='pk1', pk_col_2='pk1_related', data='im the data')
session = create_session()
session.add(e)
session.flush()
def test_key_pks(self):
Entry, multipk3 = self.classes.Entry, self.tables.multipk3
mapper(Entry, multipk3)
e = Entry(primary= 'pk1', secondary='pk2',
assigned=datetime.date.today(), data='some more data')
session = create_session()
session.add(e)
session.flush()
class ForeignPKTest(fixtures.MappedTest):
"""Detection of the relationship direction on PK joins."""
@classmethod
def define_tables(cls, metadata):
Table("people", metadata,
Column('person', String(10), primary_key=True),
Column('firstname', String(10)),
Column('lastname', String(10)))
Table("peoplesites", metadata,
Column('person', String(10), ForeignKey("people.person"),
primary_key=True),
Column('site', String(10)))
@classmethod
def setup_classes(cls):
class Person(cls.Basic):
pass
class PersonSite(cls.Basic):
pass
def test_basic(self):
peoplesites, PersonSite, Person, people = (self.tables.peoplesites,
self.classes.PersonSite,
self.classes.Person,
self.tables.people)
m1 = mapper(PersonSite, peoplesites)
m2 = mapper(Person, people, properties={
'sites' : relationship(PersonSite)})
sa.orm.configure_mappers()
eq_(list(m2.get_property('sites').synchronize_pairs),
[(people.c.person, peoplesites.c.person)])
p = Person(person='im the key', firstname='asdf')
ps = PersonSite(site='asdf')
p.sites.append(ps)
session = create_session()
session.add(p)
session.flush()
p_count = people.count(people.c.person=='im the key').scalar()
eq_(p_count, 1)
eq_(peoplesites.count(peoplesites.c.person=='im the key').scalar(), 1)
class ClauseAttributesTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('users_t', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(30)),
Column('counter', Integer, default=1))
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
User, users_t = cls.classes.User, cls.tables.users_t
mapper(User, users_t)
def test_update(self):
User = self.classes.User
u = User(name='test')
session = create_session()
session.add(u)
session.flush()
eq_(u.counter, 1)
u.counter = User.counter + 1
session.flush()
def go():
assert (u.counter == 2) is True # ensure its not a ClauseElement
self.sql_count_(1, go)
def test_multi_update(self):
User = self.classes.User
u = User(name='test')
session = create_session()
session.add(u)
session.flush()
eq_(u.counter, 1)
u.name = 'test2'
u.counter = User.counter + 1
session.flush()
def go():
eq_(u.name, 'test2')
assert (u.counter == 2) is True
self.sql_count_(1, go)
session.expunge_all()
u = session.query(User).get(u.id)
eq_(u.name, 'test2')
eq_(u.counter, 2)
def test_insert(self):
User = self.classes.User
u = User(name='test', counter=sa.select([5]))
session = create_session()
session.add(u)
session.flush()
assert (u.counter == 5) is True
class PassiveDeletesTest(fixtures.MappedTest):
__requires__ = ('foreign_keys',)
@classmethod
def define_tables(cls, metadata):
Table('mytable', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)),
test_needs_fk=True)
Table('myothertable', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('parent_id', Integer),
Column('data', String(30)),
sa.ForeignKeyConstraint(['parent_id'],
['mytable.id'],
ondelete="CASCADE"),
test_needs_fk=True)
@classmethod
def setup_classes(cls):
class MyClass(cls.Basic):
pass
class MyOtherClass(cls.Basic):
pass
def test_basic(self):
myothertable, MyClass, MyOtherClass, mytable = (self.tables.myothertable,
self.classes.MyClass,
self.classes.MyOtherClass,
self.tables.mytable)
mapper(MyOtherClass, myothertable)
mapper(MyClass, mytable, properties={
'children':relationship(MyOtherClass,
passive_deletes=True,
cascade="all")})
session = create_session()
mc = MyClass()
mc.children.append(MyOtherClass())
mc.children.append(MyOtherClass())
mc.children.append(MyOtherClass())
mc.children.append(MyOtherClass())
session.add(mc)
session.flush()
session.expunge_all()
assert myothertable.count().scalar() == 4
mc = session.query(MyClass).get(mc.id)
session.delete(mc)
session.flush()
assert mytable.count().scalar() == 0
assert myothertable.count().scalar() == 0
@testing.emits_warning(r".*'passive_deletes' is normally configured on one-to-many")
def test_backwards_pd(self):
"""Test that passive_deletes=True disables a delete from an m2o.
This is not the usual usage and it now raises a warning, but test
that it works nonetheless.
"""
myothertable, MyClass, MyOtherClass, mytable = (self.tables.myothertable,
self.classes.MyClass,
self.classes.MyOtherClass,
self.tables.mytable)
mapper(MyOtherClass, myothertable, properties={
'myclass':relationship(MyClass, cascade="all, delete", passive_deletes=True)
})
mapper(MyClass, mytable)
session = create_session()
mc = MyClass()
mco = MyOtherClass()
mco.myclass = mc
session.add(mco)
session.flush()
assert mytable.count().scalar() == 1
assert myothertable.count().scalar() == 1
session.expire(mco, ['myclass'])
session.delete(mco)
session.flush()
# mytable wasn't deleted, is the point.
assert mytable.count().scalar() == 1
assert myothertable.count().scalar() == 0
def test_aaa_m2o_emits_warning(self):
myothertable, MyClass, MyOtherClass, mytable = (self.tables.myothertable,
self.classes.MyClass,
self.classes.MyOtherClass,
self.tables.mytable)
mapper(MyOtherClass, myothertable, properties={
'myclass':relationship(MyClass, cascade="all, delete", passive_deletes=True)
})
mapper(MyClass, mytable)
assert_raises(sa.exc.SAWarning, sa.orm.configure_mappers)
class BatchDeleteIgnoresRowcountTest(fixtures.DeclarativeMappedTest):
__requires__ = ('foreign_keys',)
@classmethod
def setup_classes(cls):
class A(cls.DeclarativeBasic):
__tablename__ = 'A'
__table_args__ = dict(test_needs_fk=True)
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey('A.id', ondelete='CASCADE'))
def test_delete_both(self):
A = self.classes.A
session = Session(testing.db)
a1, a2 = A(id=1),A(id=2, parent_id=1)
session.add_all([a1, a2])
session.flush()
session.delete(a1)
session.delete(a2)
# no issue with multi-row count here
session.flush()
class ExtraPassiveDeletesTest(fixtures.MappedTest):
__requires__ = ('foreign_keys',)
@classmethod
def define_tables(cls, metadata):
Table('mytable', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)),
test_needs_fk=True)
Table('myothertable', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('parent_id', Integer),
Column('data', String(30)),
# no CASCADE, the same as ON DELETE RESTRICT
sa.ForeignKeyConstraint(['parent_id'],
['mytable.id']),
test_needs_fk=True)
@classmethod
def setup_classes(cls):
class MyClass(cls.Basic):
pass
class MyOtherClass(cls.Basic):
pass
def test_assertions(self):
myothertable, MyOtherClass = self.tables.myothertable, self.classes.MyOtherClass
mytable, MyClass = self.tables.mytable, self.classes.MyClass
mapper(MyClass, mytable, properties={
'foo': relationship(MyOtherClass,
passive_deletes='all',
cascade="all")
})
mapper(MyOtherClass, myothertable)
assert_raises_message(
sa.exc.ArgumentError,
"On MyClass.foo, can't set passive_deletes='all' in conjunction with 'delete' "
"or 'delete-orphan' cascade",
sa.orm.configure_mappers
)
def test_extra_passive(self):
myothertable, MyClass, MyOtherClass, mytable = (
self.tables.myothertable,
self.classes.MyClass,
self.classes.MyOtherClass,
self.tables.mytable)
mapper(MyOtherClass, myothertable)
mapper(MyClass, mytable, properties={
'children': relationship(MyOtherClass,
passive_deletes='all',
cascade="save-update")})
session = create_session()
mc = MyClass()
mc.children.append(MyOtherClass())
mc.children.append(MyOtherClass())
mc.children.append(MyOtherClass())
mc.children.append(MyOtherClass())
session.add(mc)
session.flush()
session.expunge_all()
assert myothertable.count().scalar() == 4
mc = session.query(MyClass).get(mc.id)
session.delete(mc)
assert_raises(sa.exc.DBAPIError, session.flush)
def test_extra_passive_2(self):
myothertable, MyClass, MyOtherClass, mytable = (self.tables.myothertable,
self.classes.MyClass,
self.classes.MyOtherClass,
self.tables.mytable)
mapper(MyOtherClass, myothertable)
mapper(MyClass, mytable, properties={
'children': relationship(MyOtherClass,
passive_deletes='all',
cascade="save-update")})
session = create_session()
mc = MyClass()
mc.children.append(MyOtherClass())
session.add(mc)
session.flush()
session.expunge_all()
assert myothertable.count().scalar() == 1
mc = session.query(MyClass).get(mc.id)
session.delete(mc)
mc.children[0].data = 'some new data'
assert_raises(sa.exc.DBAPIError, session.flush)
def test_dont_emit(self):
myothertable, MyClass, MyOtherClass, mytable = (self.tables.myothertable,
self.classes.MyClass,
self.classes.MyOtherClass,
self.tables.mytable)
mapper(MyOtherClass, myothertable)
mapper(MyClass, mytable, properties={
'children': relationship(MyOtherClass,
passive_deletes='all',
cascade="save-update")})
session = Session()
mc = MyClass()
session.add(mc)
session.commit()
mc.id
session.delete(mc)
# no load for "children" should occur
self.assert_sql_count(testing.db, session.flush, 1)
class ColumnCollisionTest(fixtures.MappedTest):
"""Ensure the mapper doesn't break bind param naming rules on flush."""
@classmethod
def define_tables(cls, metadata):
Table('book', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('book_id', String(50)),
Column('title', String(50))
)
def test_naming(self):
book = self.tables.book
class Book(fixtures.ComparableEntity):
pass
mapper(Book, book)
sess = create_session()
b1 = Book(book_id='abc', title='def')
sess.add(b1)
sess.flush()
b1.title = 'ghi'
sess.flush()
sess.close()
eq_(
sess.query(Book).first(),
Book(book_id='abc', title='ghi')
)
class DefaultTest(fixtures.MappedTest):
"""Exercise mappings on columns with DefaultGenerators.
Tests that when saving objects whose table contains DefaultGenerators,
either python-side, preexec or database-side, the newly saved instances
receive all the default values either through a post-fetch or getting the
pre-exec'ed defaults back from the engine.
"""
@classmethod
def define_tables(cls, metadata):
use_string_defaults = testing.against('postgresql', 'oracle', 'sqlite', 'mssql')
if use_string_defaults:
hohotype = String(30)
hohoval = "im hoho"
althohoval = "im different hoho"
else:
hohotype = Integer
hohoval = 9
althohoval = 15
cls.other['hohoval'] = hohoval
cls.other['althohoval'] = althohoval
dt = Table('default_t', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('hoho', hohotype, server_default=str(hohoval)),
Column('counter', Integer, default=sa.func.char_length("1234567", type_=Integer)),
Column('foober', String(30), default="im foober", onupdate="im the update"),
mysql_engine='MyISAM')
st = Table('secondary_table', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(50)),
mysql_engine='MyISAM')
if testing.against('postgresql', 'oracle'):
dt.append_column(
Column('secondary_id', Integer, sa.Sequence('sec_id_seq'),
unique=True))
st.append_column(
Column('fk_val', Integer,
ForeignKey('default_t.secondary_id')))
elif testing.against('mssql'):
st.append_column(
Column('fk_val', Integer,
ForeignKey('default_t.id')))
else:
st.append_column(
Column('hoho', hohotype, ForeignKey('default_t.hoho')))
@classmethod
def setup_classes(cls):
class Hoho(cls.Comparable):
pass
class Secondary(cls.Comparable):
pass
@testing.fails_on('firebird', 'Data type unknown on the parameter')
def test_insert(self):
althohoval, hohoval, default_t, Hoho = (self.other.althohoval,
self.other.hohoval,
self.tables.default_t,
self.classes.Hoho)
mapper(Hoho, default_t)
h1 = Hoho(hoho=althohoval)
h2 = Hoho(counter=12)
h3 = Hoho(hoho=althohoval, counter=12)
h4 = Hoho()
h5 = Hoho(foober='im the new foober')
session = create_session(autocommit=False)
session.add_all((h1, h2, h3, h4, h5))
session.commit()
eq_(h1.hoho, althohoval)
eq_(h3.hoho, althohoval)
def go():
# test deferred load of attribues, one select per instance
self.assert_(h2.hoho == h4.hoho == h5.hoho == hohoval)
self.sql_count_(3, go)
def go():
self.assert_(h1.counter == h4.counter == h5.counter == 7)
self.sql_count_(1, go)
def go():
self.assert_(h3.counter == h2.counter == 12)
self.assert_(h2.foober == h3.foober == h4.foober == 'im foober')
self.assert_(h5.foober == 'im the new foober')
self.sql_count_(0, go)
session.expunge_all()
(h1, h2, h3, h4, h5) = session.query(Hoho).order_by(Hoho.id).all()
eq_(h1.hoho, althohoval)
eq_(h3.hoho, althohoval)
self.assert_(h2.hoho == h4.hoho == h5.hoho == hohoval)
self.assert_(h3.counter == h2.counter == 12)
self.assert_(h1.counter == h4.counter == h5.counter == 7)
self.assert_(h2.foober == h3.foober == h4.foober == 'im foober')
eq_(h5.foober, 'im the new foober')
@testing.fails_on('firebird', 'Data type unknown on the parameter')
@testing.fails_on("oracle+cx_oracle", "seems like a cx_oracle bug")
def test_eager_defaults(self):
hohoval, default_t, Hoho = (self.other.hohoval,
self.tables.default_t,
self.classes.Hoho)
Secondary = self.classes.Secondary
mapper(Hoho, default_t, eager_defaults=True, properties={
"sec": relationship(Secondary),
"syn": sa.orm.synonym(default_t.c.counter)
})
mapper(Secondary, self.tables.secondary_table)
h1 = Hoho()
session = create_session()
session.add(h1)
if testing.db.dialect.implicit_returning:
self.sql_count_(1, session.flush)
else:
self.sql_count_(2, session.flush)
self.sql_count_(0, lambda: eq_(h1.hoho, hohoval))
# no actual eager defaults, make sure error isn't raised
h2 = Hoho(hoho=hohoval, counter=5)
session.add(h2)
session.flush()
eq_(h2.hoho, hohoval)
eq_(h2.counter, 5)
def test_insert_nopostfetch(self):
default_t, Hoho = self.tables.default_t, self.classes.Hoho
# populates from the FetchValues explicitly so there is no
# "post-update"
mapper(Hoho, default_t)
h1 = Hoho(hoho="15", counter=15)
session = create_session()
session.add(h1)
session.flush()
def go():
eq_(h1.hoho, "15")
eq_(h1.counter, 15)
eq_(h1.foober, "im foober")
self.sql_count_(0, go)
@testing.fails_on('firebird', 'Data type unknown on the parameter')
def test_update(self):
default_t, Hoho = self.tables.default_t, self.classes.Hoho
mapper(Hoho, default_t)
h1 = Hoho()
session = create_session()
session.add(h1)
session.flush()
eq_(h1.foober, 'im foober')
h1.counter = 19
session.flush()
eq_(h1.foober, 'im the update')
@testing.fails_on('firebird', 'Data type unknown on the parameter')
def test_used_in_relationship(self):
"""A server-side default can be used as the target of a foreign key"""
Hoho, hohoval, default_t, secondary_table, Secondary = (self.classes.Hoho,
self.other.hohoval,
self.tables.default_t,
self.tables.secondary_table,
self.classes.Secondary)
mapper(Hoho, default_t, properties={
'secondaries':relationship(Secondary, order_by=secondary_table.c.id)})
mapper(Secondary, secondary_table)
h1 = Hoho()
s1 = Secondary(data='s1')
h1.secondaries.append(s1)
session = create_session()
session.add(h1)
session.flush()
session.expunge_all()
eq_(session.query(Hoho).get(h1.id),
Hoho(hoho=hohoval,
secondaries=[
Secondary(data='s1')]))
h1 = session.query(Hoho).get(h1.id)
h1.secondaries.append(Secondary(data='s2'))
session.flush()
session.expunge_all()
eq_(session.query(Hoho).get(h1.id),
Hoho(hoho=hohoval,
secondaries=[
Secondary(data='s1'),
Secondary(data='s2')]))
class ColumnPropertyTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('data', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('a', String(50)),
Column('b', String(50))
)
Table('subdata', metadata,
Column('id', Integer, ForeignKey('data.id'), primary_key=True),
Column('c', String(50)),
)
@classmethod
def setup_mappers(cls):
class Data(cls.Basic):
pass
def test_refreshes(self):
Data, data = self.classes.Data, self.tables.data
mapper(Data, data, properties={
'aplusb':column_property(data.c.a + literal_column("' '") + data.c.b)
})
self._test(True)
def test_no_refresh(self):
Data, data = self.classes.Data, self.tables.data
mapper(Data, data, properties={
'aplusb':column_property(data.c.a + literal_column("' '") + data.c.b,
expire_on_flush=False)
})
self._test(False)
def test_refreshes_post_init(self):
Data, data = self.classes.Data, self.tables.data
m = mapper(Data, data)
m.add_property('aplusb', column_property(data.c.a + literal_column("' '") + data.c.b))
self._test(True)
def test_with_inheritance(self):
subdata, data, Data = (self.tables.subdata,
self.tables.data,
self.classes.Data)
class SubData(Data):
pass
mapper(Data, data, properties={
'aplusb':column_property(data.c.a + literal_column("' '") + data.c.b)
})
mapper(SubData, subdata, inherits=Data)
sess = create_session()
sd1 = SubData(a="hello", b="there", c="hi")
sess.add(sd1)
sess.flush()
eq_(sd1.aplusb, "hello there")
def _test(self, expect_expiry):
Data = self.classes.Data
sess = create_session()
d1 = Data(a="hello", b="there")
sess.add(d1)
sess.flush()
eq_(d1.aplusb, "hello there")
d1.b = "bye"
sess.flush()
if expect_expiry:
eq_(d1.aplusb, "hello bye")
else:
eq_(d1.aplusb, "hello there")
d1.b = 'foobar'
d1.aplusb = 'im setting this explicitly'
sess.flush()
eq_(d1.aplusb, "im setting this explicitly")
class OneToManyTest(_fixtures.FixtureTest):
run_inserts = None
def test_one_to_many_1(self):
"""Basic save of one to many."""
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
m = mapper(User, users, properties=dict(
addresses = relationship(mapper(Address, addresses), lazy='select')
))
u = User(name= 'one2manytester')
a = Address(email_address='[email protected]')
u.addresses.append(a)
a2 = Address(email_address='[email protected]')
u.addresses.append(a2)
session = create_session()
session.add(u)
session.flush()
user_rows = users.select(users.c.id.in_([u.id])).execute().fetchall()
eq_(list(user_rows[0].values()), [u.id, 'one2manytester'])
address_rows = addresses.select(
addresses.c.id.in_([a.id, a2.id]),
order_by=[addresses.c.email_address]).execute().fetchall()
eq_(list(address_rows[0].values()), [a2.id, u.id, '[email protected]'])
eq_(list(address_rows[1].values()), [a.id, u.id, '[email protected]'])
userid = u.id
addressid = a2.id
a2.email_address = '[email protected]'
session.flush()
address_rows = addresses.select(
addresses.c.id == addressid).execute().fetchall()
eq_(list(address_rows[0].values()),
[addressid, userid, '[email protected]'])
self.assert_(u.id == userid and a2.id == addressid)
def test_one_to_many_2(self):
"""Modifying the child items of an object."""
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
m = mapper(User, users, properties=dict(
addresses = relationship(mapper(Address, addresses), lazy='select')))
u1 = User(name='user1')
u1.addresses = []
a1 = Address(email_address='emailaddress1')
u1.addresses.append(a1)
u2 = User(name='user2')
u2.addresses = []
a2 = Address(email_address='emailaddress2')
u2.addresses.append(a2)
a3 = Address(email_address='emailaddress3')
session = create_session()
session.add_all((u1, u2, a3))
session.flush()
# modify user2 directly, append an address to user1.
# upon commit, user2 should be updated, user1 should not
# both address1 and address3 should be updated
u2.name = 'user2modified'
u1.addresses.append(a3)
del u1.addresses[0]
self.assert_sql(testing.db, session.flush, [
("UPDATE users SET name=:name "
"WHERE users.id = :users_id",
{'users_id': u2.id, 'name': 'user2modified'}),
("UPDATE addresses SET user_id=:user_id "
"WHERE addresses.id = :addresses_id",
{'user_id': None, 'addresses_id': a1.id}),
("UPDATE addresses SET user_id=:user_id "
"WHERE addresses.id = :addresses_id",
{'user_id': u1.id, 'addresses_id': a3.id})])
def test_child_move(self):
"""Moving a child from one parent to another, with a delete.
Tests that deleting the first parent properly updates the child with
the new parent. This tests the 'trackparent' option in the attributes
module.
"""
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
m = mapper(User, users, properties=dict(
addresses = relationship(mapper(Address, addresses), lazy='select')))
u1 = User(name='user1')
u2 = User(name='user2')
a = Address(email_address='address1')
u1.addresses.append(a)
session = create_session()
session.add_all((u1, u2))
session.flush()
del u1.addresses[0]
u2.addresses.append(a)
session.delete(u1)
session.flush()
session.expunge_all()
u2 = session.query(User).get(u2.id)
eq_(len(u2.addresses), 1)
def test_child_move_2(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
m = mapper(User, users, properties=dict(
addresses = relationship(mapper(Address, addresses), lazy='select')))
u1 = User(name='user1')
u2 = User(name='user2')
a = Address(email_address='address1')
u1.addresses.append(a)
session = create_session()
session.add_all((u1, u2))
session.flush()
del u1.addresses[0]
u2.addresses.append(a)
session.flush()
session.expunge_all()
u2 = session.query(User).get(u2.id)
eq_(len(u2.addresses), 1)
def test_o2m_delete_parent(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
m = mapper(User, users, properties=dict(
address = relationship(mapper(Address, addresses),
lazy='select',
uselist=False)))
u = User(name='one2onetester')
a = Address(email_address='[email protected]')
u.address = a
session = create_session()
session.add(u)
session.flush()
session.delete(u)
session.flush()
assert a.id is not None
assert a.user_id is None
assert sa.orm.attributes.instance_state(a).key in session.identity_map
assert sa.orm.attributes.instance_state(u).key not in session.identity_map
def test_one_to_one(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
m = mapper(User, users, properties=dict(
address = relationship(mapper(Address, addresses),
lazy='select',
uselist=False)))
u = User(name='one2onetester')
u.address = Address(email_address='[email protected]')
session = create_session()
session.add(u)
session.flush()
u.name = 'imnew'
session.flush()
u.address.email_address = '[email protected]'
session.flush()
def test_bidirectional(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
m1 = mapper(User, users)
m2 = mapper(Address, addresses, properties=dict(
user = relationship(m1, lazy='joined', backref='addresses')))
u = User(name='test')
a = Address(email_address='testaddress', user=u)
session = create_session()
session.add(u)
session.flush()
session.delete(u)
session.flush()
def test_double_relationship(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
m2 = mapper(Address, addresses)
m = mapper(User, users, properties={
'boston_addresses' : relationship(m2, primaryjoin=
sa.and_(users.c.id==addresses.c.user_id,
addresses.c.email_address.like('%boston%'))),
'newyork_addresses' : relationship(m2, primaryjoin=
sa.and_(users.c.id==addresses.c.user_id,
addresses.c.email_address.like('%newyork%')))})
u = User(name='u1')
a = Address(email_address='[email protected]')
b = Address(email_address='[email protected]')
u.boston_addresses.append(a)
u.newyork_addresses.append(b)
session = create_session()
session.add(u)
session.flush()
class SaveTest(_fixtures.FixtureTest):
run_inserts = None
def test_basic(self):
User, users = self.classes.User, self.tables.users
m = mapper(User, users)
# save two users
u = User(name='savetester')
u2 = User(name='savetester2')
session = create_session()
session.add_all((u, u2))
session.flush()
# assert the first one retreives the same from the identity map
nu = session.query(m).get(u.id)
assert u is nu
# clear out the identity map, so next get forces a SELECT
session.expunge_all()
# check it again, identity should be different but ids the same
nu = session.query(m).get(u.id)
assert u is not nu and u.id == nu.id and nu.name == 'savetester'
# change first users name and save
session = create_session()
session.add(u)
u.name = 'modifiedname'
assert u in session.dirty
session.flush()
# select both
userlist = session.query(User).filter(
users.c.id.in_([u.id, u2.id])).order_by(users.c.name).all()
eq_(u.id, userlist[0].id)
eq_(userlist[0].name, 'modifiedname')
eq_(u2.id, userlist[1].id)
eq_(userlist[1].name, 'savetester2')
def test_synonym(self):
users = self.tables.users
class SUser(fixtures.BasicEntity):
def _get_name(self):
return "User:" + self.name
def _set_name(self, name):
self.name = name + ":User"
syn_name = property(_get_name, _set_name)
mapper(SUser, users, properties={
'syn_name': sa.orm.synonym('name')
})
u = SUser(syn_name="some name")
eq_(u.syn_name, 'User:some name:User')
session = create_session()
session.add(u)
session.flush()
session.expunge_all()
u = session.query(SUser).first()
eq_(u.syn_name, 'User:some name:User')
def test_lazyattr_commit(self):
"""Lazily loaded relationships.
When a lazy-loaded list is unloaded, and a commit occurs, that the
'passive' call on that list does not blow away its value
"""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties = {
'addresses': relationship(mapper(Address, addresses))})
u = User(name='u1')
u.addresses.append(Address(email_address='u1@e1'))
u.addresses.append(Address(email_address='u1@e2'))
u.addresses.append(Address(email_address='u1@e3'))
u.addresses.append(Address(email_address='u1@e4'))
session = create_session()
session.add(u)
session.flush()
session.expunge_all()
u = session.query(User).one()
u.name = 'newname'
session.flush()
eq_(len(u.addresses), 4)
def test_inherits(self):
"""a user object that also has the users mailing address."""
users, addresses, User = (self.tables.users,
self.tables.addresses,
self.classes.User)
m1 = mapper(User, users)
class AddressUser(User):
pass
# define a mapper for AddressUser that inherits the User.mapper, and
# joins on the id column
mapper(AddressUser, addresses, inherits=m1)
au = AddressUser(name='u', email_address='u@e')
session = create_session()
session.add(au)
session.flush()
session.expunge_all()
rt = session.query(AddressUser).one()
eq_(au.user_id, rt.user_id)
eq_(rt.id, rt.id)
def test_deferred(self):
"""Deferred column operations"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties={
'description': sa.orm.deferred(orders.c.description)})
# dont set deferred attribute, commit session
o = Order(id=42)
session = create_session(autocommit=False)
session.add(o)
session.commit()
# assert that changes get picked up
o.description = 'foo'
session.commit()
eq_(list(session.execute(orders.select(), mapper=Order)),
[(42, None, None, 'foo', None)])
session.expunge_all()
# assert that a set operation doesn't trigger a load operation
o = session.query(Order).filter(Order.description == 'foo').one()
def go():
o.description = 'hoho'
self.sql_count_(0, go)
session.flush()
eq_(list(session.execute(orders.select(), mapper=Order)),
[(42, None, None, 'hoho', None)])
session.expunge_all()
# test assigning None to an unloaded deferred also works
o = session.query(Order).filter(Order.description == 'hoho').one()
o.description = None
session.flush()
eq_(list(session.execute(orders.select(), mapper=Order)),
[(42, None, None, None, None)])
session.close()
# why no support on oracle ? because oracle doesn't save
# "blank" strings; it saves a single space character.
@testing.fails_on('oracle', 'FIXME: unknown')
def test_dont_update_blanks(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
u = User(name='')
session = create_session()
session.add(u)
session.flush()
session.expunge_all()
u = session.query(User).get(u.id)
u.name = ''
self.sql_count_(0, session.flush)
def test_multi_table_selectable(self):
"""Mapped selectables that span tables.
Also tests redefinition of the keynames for the column properties.
"""
addresses, users, User = (self.tables.addresses,
self.tables.users,
self.classes.User)
usersaddresses = sa.join(users, addresses,
users.c.id == addresses.c.user_id)
m = mapper(User, usersaddresses,
properties=dict(
email = addresses.c.email_address,
foo_id = [users.c.id, addresses.c.user_id]))
u = User(name='multitester', email='[email protected]')
session = create_session()
session.add(u)
session.flush()
session.expunge_all()
id = m.primary_key_from_instance(u)
u = session.query(User).get(id)
assert u.name == 'multitester'
user_rows = users.select(users.c.id.in_([u.foo_id])).execute().fetchall()
eq_(list(user_rows[0].values()), [u.foo_id, 'multitester'])
address_rows = addresses.select(addresses.c.id.in_([u.id])).execute().fetchall()
eq_(list(address_rows[0].values()), [u.id, u.foo_id, '[email protected]'])
u.email = '[email protected]'
u.name = 'imnew'
session.flush()
user_rows = users.select(users.c.id.in_([u.foo_id])).execute().fetchall()
eq_(list(user_rows[0].values()), [u.foo_id, 'imnew'])
address_rows = addresses.select(addresses.c.id.in_([u.id])).execute().fetchall()
eq_(list(address_rows[0].values()), [u.id, u.foo_id, '[email protected]'])
session.expunge_all()
u = session.query(User).get(id)
assert u.name == 'imnew'
def test_history_get(self):
"""The history lazy-fetches data when it wasn't otherwise loaded."""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, cascade="all, delete-orphan")})
mapper(Address, addresses)
u = User(name='u1')
u.addresses.append(Address(email_address='u1@e1'))
u.addresses.append(Address(email_address='u1@e2'))
session = create_session()
session.add(u)
session.flush()
session.expunge_all()
u = session.query(User).get(u.id)
session.delete(u)
session.flush()
assert users.count().scalar() == 0
assert addresses.count().scalar() == 0
def test_batch_mode(self):
"""The 'batch=False' flag on mapper()"""
users, User = self.tables.users, self.classes.User
names = []
class Events(object):
def before_insert(self, mapper, connection, instance):
self.current_instance = instance
names.append(instance.name)
def after_insert(self, mapper, connection, instance):
assert instance is self.current_instance
mapper(User, users, batch=False)
evt = Events()
event.listen(User, "before_insert", evt.before_insert)
event.listen(User, "after_insert", evt.after_insert)
u1 = User(name='user1')
u2 = User(name='user2')
session = create_session()
session.add_all((u1, u2))
session.flush()
u3 = User(name='user3')
u4 = User(name='user4')
u5 = User(name='user5')
session.add_all([u4, u5, u3])
session.flush()
# test insert ordering is maintained
assert names == ['user1', 'user2', 'user4', 'user5', 'user3']
session.expunge_all()
sa.orm.clear_mappers()
m = mapper(User, users)
evt = Events()
event.listen(User, "before_insert", evt.before_insert)
event.listen(User, "after_insert", evt.after_insert)
u1 = User(name='user1')
u2 = User(name='user2')
session.add_all((u1, u2))
assert_raises(AssertionError, session.flush)
class ManyToOneTest(_fixtures.FixtureTest):
run_inserts = None
def test_m2o_one_to_one(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
# TODO: put assertion in here !!!
m = mapper(Address, addresses, properties=dict(
user = relationship(mapper(User, users), lazy='select', uselist=False)))
session = create_session()
data = [
{'name': 'thesub' , 'email_address': '[email protected]'},
{'name': 'assdkfj' , 'email_address': '[email protected]'},
{'name': 'n4knd' , 'email_address': '[email protected]'},
{'name': 'v88f4' , 'email_address': '[email protected]'},
{'name': 'asdf8d' , 'email_address': '[email protected]'}
]
objects = []
for elem in data:
a = Address()
a.email_address = elem['email_address']
a.user = User()
a.user.name = elem['name']
objects.append(a)
session.add(a)
session.flush()
objects[2].email_address = '[email protected]'
objects[3].user = User()
objects[3].user.name = 'imnewlyadded'
self.assert_sql_execution(testing.db,
session.flush,
CompiledSQL("INSERT INTO users (name) VALUES (:name)",
{'name': 'imnewlyadded'} ),
AllOf(
CompiledSQL("UPDATE addresses SET email_address=:email_address "
"WHERE addresses.id = :addresses_id",
lambda ctx: {'email_address': '[email protected]',
'addresses_id': objects[2].id}),
CompiledSQL("UPDATE addresses SET user_id=:user_id "
"WHERE addresses.id = :addresses_id",
lambda ctx: {'user_id': objects[3].user.id,
'addresses_id': objects[3].id})
)
)
l = sa.select([users, addresses],
sa.and_(users.c.id==addresses.c.user_id,
addresses.c.id==a.id)).execute()
eq_(list(l.first().values()),
[a.user.id, 'asdf8d', a.id, a.user_id, '[email protected]'])
def test_many_to_one_1(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
m = mapper(Address, addresses, properties=dict(
user = relationship(mapper(User, users), lazy='select')))
a1 = Address(email_address='emailaddress1')
u1 = User(name='user1')
a1.user = u1
session = create_session()
session.add(a1)
session.flush()
session.expunge_all()
a1 = session.query(Address).get(a1.id)
u1 = session.query(User).get(u1.id)
assert a1.user is u1
a1.user = None
session.flush()
session.expunge_all()
a1 = session.query(Address).get(a1.id)
u1 = session.query(User).get(u1.id)
assert a1.user is None
def test_many_to_one_2(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
m = mapper(Address, addresses, properties=dict(
user = relationship(mapper(User, users), lazy='select')))
a1 = Address(email_address='emailaddress1')
a2 = Address(email_address='emailaddress2')
u1 = User(name='user1')
a1.user = u1
session = create_session()
session.add_all((a1, a2))
session.flush()
session.expunge_all()
a1 = session.query(Address).get(a1.id)
a2 = session.query(Address).get(a2.id)
u1 = session.query(User).get(u1.id)
assert a1.user is u1
a1.user = None
a2.user = u1
session.flush()
session.expunge_all()
a1 = session.query(Address).get(a1.id)
a2 = session.query(Address).get(a2.id)
u1 = session.query(User).get(u1.id)
assert a1.user is None
assert a2.user is u1
def test_many_to_one_3(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
m = mapper(Address, addresses, properties=dict(
user = relationship(mapper(User, users), lazy='select')))
a1 = Address(email_address='emailaddress1')
u1 = User(name='user1')
u2 = User(name='user2')
a1.user = u1
session = create_session()
session.add_all((a1, u1, u2))
session.flush()
session.expunge_all()
a1 = session.query(Address).get(a1.id)
u1 = session.query(User).get(u1.id)
u2 = session.query(User).get(u2.id)
assert a1.user is u1
a1.user = u2
session.flush()
session.expunge_all()
a1 = session.query(Address).get(a1.id)
u1 = session.query(User).get(u1.id)
u2 = session.query(User).get(u2.id)
assert a1.user is u2
def test_bidirectional_no_load(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, backref='user', lazy='noload')})
mapper(Address, addresses)
# try it on unsaved objects
u1 = User(name='u1')
a1 = Address(email_address='e1')
a1.user = u1
session = create_session()
session.add(u1)
session.flush()
session.expunge_all()
a1 = session.query(Address).get(a1.id)
a1.user = None
session.flush()
session.expunge_all()
assert session.query(Address).get(a1.id).user is None
assert session.query(User).get(u1.id).addresses == []
class ManyToManyTest(_fixtures.FixtureTest):
run_inserts = None
def test_many_to_many(self):
keywords, items, item_keywords, Keyword, Item = (self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Keyword, keywords)
m = mapper(Item, items, properties=dict(
keywords=relationship(Keyword,
item_keywords,
lazy='joined',
order_by=keywords.c.name)))
data = [Item,
{'description': 'mm_item1',
'keywords' : (Keyword, [{'name': 'big'},
{'name': 'green'},
{'name': 'purple'},
{'name': 'round'}])},
{'description': 'mm_item2',
'keywords' : (Keyword, [{'name':'blue'},
{'name':'imnew'},
{'name':'round'},
{'name':'small'}])},
{'description': 'mm_item3',
'keywords' : (Keyword, [])},
{'description': 'mm_item4',
'keywords' : (Keyword, [{'name':'big'},
{'name':'blue'},])},
{'description': 'mm_item5',
'keywords' : (Keyword, [{'name':'big'},
{'name':'exacting'},
{'name':'green'}])},
{'description': 'mm_item6',
'keywords' : (Keyword, [{'name':'red'},
{'name':'round'},
{'name':'small'}])}]
session = create_session()
objects = []
_keywords = dict([(k.name, k) for k in session.query(Keyword)])
for elem in data[1:]:
item = Item(description=elem['description'])
objects.append(item)
for spec in elem['keywords'][1]:
keyword_name = spec['name']
try:
kw = _keywords[keyword_name]
except KeyError:
_keywords[keyword_name] = kw = Keyword(name=keyword_name)
item.keywords.append(kw)
session.add_all(objects)
session.flush()
l = (session.query(Item).
filter(Item.description.in_([e['description']
for e in data[1:]])).
order_by(Item.description).all())
self.assert_result(l, *data)
objects[4].description = 'item4updated'
k = Keyword()
k.name = 'yellow'
objects[5].keywords.append(k)
self.assert_sql_execution(
testing.db,
session.flush,
AllOf(
CompiledSQL("UPDATE items SET description=:description "
"WHERE items.id = :items_id",
{'description': 'item4updated',
'items_id': objects[4].id},
),
CompiledSQL("INSERT INTO keywords (name) "
"VALUES (:name)",
{'name': 'yellow'},
)
),
CompiledSQL("INSERT INTO item_keywords (item_id, keyword_id) "
"VALUES (:item_id, :keyword_id)",
lambda ctx: [{'item_id': objects[5].id,
'keyword_id': k.id}])
)
objects[2].keywords.append(k)
dkid = objects[5].keywords[1].id
del objects[5].keywords[1]
self.assert_sql_execution(
testing.db,
session.flush,
CompiledSQL("DELETE FROM item_keywords "
"WHERE item_keywords.item_id = :item_id AND "
"item_keywords.keyword_id = :keyword_id",
[{'item_id': objects[5].id, 'keyword_id': dkid}]),
CompiledSQL("INSERT INTO item_keywords (item_id, keyword_id) "
"VALUES (:item_id, :keyword_id)",
lambda ctx: [{'item_id': objects[2].id, 'keyword_id': k.id}]
))
session.delete(objects[3])
session.flush()
def test_many_to_many_remove(self):
"""Setting a collection to empty deletes many-to-many rows.
Tests that setting a list-based attribute to '[]' properly affects the
history and allows the many-to-many rows to be deleted
"""
keywords, items, item_keywords, Keyword, Item = (self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Keyword, keywords)
mapper(Item, items, properties=dict(
keywords = relationship(Keyword, item_keywords, lazy='joined'),
))
i = Item(description='i1')
k1 = Keyword(name='k1')
k2 = Keyword(name='k2')
i.keywords.append(k1)
i.keywords.append(k2)
session = create_session()
session.add(i)
session.flush()
assert item_keywords.count().scalar() == 2
i.keywords = []
session.flush()
assert item_keywords.count().scalar() == 0
def test_scalar(self):
"""sa.dependency won't delete an m2m relationship referencing None."""
keywords, items, item_keywords, Keyword, Item = (self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Keyword, keywords)
mapper(Item, items, properties=dict(
keyword=relationship(Keyword, secondary=item_keywords, uselist=False)))
i = Item(description='x')
session = create_session()
session.add(i)
session.flush()
session.delete(i)
session.flush()
def test_many_to_many_update(self):
"""Assorted history operations on a many to many"""
keywords, items, item_keywords, Keyword, Item = (self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Keyword, keywords)
mapper(Item, items, properties=dict(
keywords=relationship(Keyword,
secondary=item_keywords,
lazy='joined',
order_by=keywords.c.name)))
k1 = Keyword(name='keyword 1')
k2 = Keyword(name='keyword 2')
k3 = Keyword(name='keyword 3')
item = Item(description='item 1')
item.keywords.extend([k1, k2, k3])
session = create_session()
session.add(item)
session.flush()
item.keywords = []
item.keywords.append(k1)
item.keywords.append(k2)
session.flush()
session.expunge_all()
item = session.query(Item).get(item.id)
assert item.keywords == [k1, k2]
def test_association(self):
"""Basic test of an association object"""
keywords, items, item_keywords, Keyword, Item = (self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
class IKAssociation(fixtures.ComparableEntity):
pass
mapper(Keyword, keywords)
# note that we are breaking a rule here, making a second
# mapper(Keyword, keywords) the reorganization of mapper construction
# affected this, but was fixed again
mapper(IKAssociation, item_keywords,
primary_key=[item_keywords.c.item_id, item_keywords.c.keyword_id],
properties=dict(
keyword=relationship(mapper(Keyword, keywords, non_primary=True),
lazy='joined',
uselist=False,
order_by=keywords.c.name # note here is a valid place where order_by can be used
))) # on a scalar relationship(); to determine eager ordering of
# the parent object within its collection.
mapper(Item, items, properties=dict(
keywords=relationship(IKAssociation, lazy='joined')))
session = create_session()
def fixture():
_kw = dict([(k.name, k) for k in session.query(Keyword)])
for n in ('big', 'green', 'purple', 'round', 'huge',
'violet', 'yellow', 'blue'):
if n not in _kw:
_kw[n] = Keyword(name=n)
def assocs(*names):
return [IKAssociation(keyword=kw)
for kw in [_kw[n] for n in names]]
return [
Item(description='a_item1',
keywords=assocs('big', 'green', 'purple', 'round')),
Item(description='a_item2',
keywords=assocs('huge', 'violet', 'yellow')),
Item(description='a_item3',
keywords=assocs('big', 'blue'))]
session.add_all(fixture())
session.flush()
eq_(fixture(), session.query(Item).order_by(Item.description).all())
class SaveTest2(_fixtures.FixtureTest):
run_inserts = None
def test_m2o_nonmatch(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties=dict(
user = relationship(User, lazy='select', uselist=False)))
session = create_session()
def fixture():
return [
Address(email_address='a1', user=User(name='u1')),
Address(email_address='a2', user=User(name='u2'))]
session.add_all(fixture())
self.assert_sql_execution(
testing.db,
session.flush,
CompiledSQL("INSERT INTO users (name) VALUES (:name)",
{'name': 'u1'}),
CompiledSQL("INSERT INTO users (name) VALUES (:name)",
{'name': 'u2'}),
CompiledSQL("INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
{'user_id': 1, 'email_address': 'a1'}),
CompiledSQL("INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
{'user_id': 2, 'email_address': 'a2'}),
)
class SaveTest3(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('items', metadata,
Column('item_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('item_name', String(50)))
Table('keywords', metadata,
Column('keyword_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)))
Table('assoc', metadata,
Column('item_id', Integer, ForeignKey("items")),
Column('keyword_id', Integer, ForeignKey("keywords")),
Column('foo', sa.Boolean, default=True))
@classmethod
def setup_classes(cls):
class Keyword(cls.Basic):
pass
class Item(cls.Basic):
pass
def test_manytomany_xtracol_delete(self):
"""A many-to-many on a table that has an extra column can properly delete rows from the table without referencing the extra column"""
keywords, items, assoc, Keyword, Item = (self.tables.keywords,
self.tables.items,
self.tables.assoc,
self.classes.Keyword,
self.classes.Item)
mapper(Keyword, keywords)
mapper(Item, items, properties=dict(
keywords = relationship(Keyword, secondary=assoc, lazy='joined'),))
i = Item()
k1 = Keyword()
k2 = Keyword()
i.keywords.append(k1)
i.keywords.append(k2)
session = create_session()
session.add(i)
session.flush()
assert assoc.count().scalar() == 2
i.keywords = []
session.flush()
assert assoc.count().scalar() == 0
class BooleanColTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('t1_t', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(30)),
Column('value', sa.Boolean))
def test_boolean(self):
t1_t = self.tables.t1_t
# use the regular mapper
class T(fixtures.ComparableEntity):
pass
orm_mapper(T, t1_t, order_by=t1_t.c.id)
sess = create_session()
t1 = T(value=True, name="t1")
t2 = T(value=False, name="t2")
t3 = T(value=True, name="t3")
sess.add_all((t1, t2, t3))
sess.flush()
for clear in (False, True):
if clear:
sess.expunge_all()
eq_(sess.query(T).all(), [T(value=True, name="t1"), T(value=False, name="t2"), T(value=True, name="t3")])
if clear:
sess.expunge_all()
eq_(sess.query(T).filter(T.value==True).all(), [T(value=True, name="t1"),T(value=True, name="t3")])
if clear:
sess.expunge_all()
eq_(sess.query(T).filter(T.value==False).all(), [T(value=False, name="t2")])
t2 = sess.query(T).get(t2.id)
t2.value = True
sess.flush()
eq_(sess.query(T).filter(T.value==True).all(), [T(value=True, name="t1"), T(value=True, name="t2"), T(value=True, name="t3")])
t2.value = False
sess.flush()
eq_(sess.query(T).filter(T.value==True).all(), [T(value=True, name="t1"),T(value=True, name="t3")])
class RowSwitchTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
# parent
Table('t5', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(30), nullable=False))
# onetomany
Table('t6', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(30), nullable=False),
Column('t5id', Integer, ForeignKey('t5.id'),nullable=False))
# associated
Table('t7', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(30), nullable=False))
#manytomany
Table('t5t7', metadata,
Column('t5id', Integer, ForeignKey('t5.id'),nullable=False),
Column('t7id', Integer, ForeignKey('t7.id'),nullable=False))
@classmethod
def setup_classes(cls):
class T5(cls.Comparable):
pass
class T6(cls.Comparable):
pass
class T7(cls.Comparable):
pass
def test_onetomany(self):
t6, T6, t5, T5 = (self.tables.t6,
self.classes.T6,
self.tables.t5,
self.classes.T5)
mapper(T5, t5, properties={
't6s':relationship(T6, cascade="all, delete-orphan")
})
mapper(T6, t6)
sess = create_session()
o5 = T5(data='some t5', id=1)
o5.t6s.append(T6(data='some t6', id=1))
o5.t6s.append(T6(data='some other t6', id=2))
sess.add(o5)
sess.flush()
eq_(
list(sess.execute(t5.select(), mapper=T5)),
[(1, 'some t5')]
)
eq_(
list(sess.execute(t6.select().order_by(t6.c.id), mapper=T5)),
[(1, 'some t6', 1), (2, 'some other t6', 1)]
)
o6 = T5(data='some other t5', id=o5.id, t6s=[
T6(data='third t6', id=3),
T6(data='fourth t6', id=4),
])
sess.delete(o5)
sess.add(o6)
sess.flush()
eq_(
list(sess.execute(t5.select(), mapper=T5)),
[(1, 'some other t5')]
)
eq_(
list(sess.execute(t6.select().order_by(t6.c.id), mapper=T5)),
[(3, 'third t6', 1), (4, 'fourth t6', 1)]
)
def test_manytomany(self):
t7, t5, t5t7, T5, T7 = (self.tables.t7,
self.tables.t5,
self.tables.t5t7,
self.classes.T5,
self.classes.T7)
mapper(T5, t5, properties={
't7s':relationship(T7, secondary=t5t7, cascade="all")
})
mapper(T7, t7)
sess = create_session()
o5 = T5(data='some t5', id=1)
o5.t7s.append(T7(data='some t7', id=1))
o5.t7s.append(T7(data='some other t7', id=2))
sess.add(o5)
sess.flush()
assert list(sess.execute(t5.select(), mapper=T5)) == [(1, 'some t5')]
assert testing.rowset(sess.execute(t5t7.select(), mapper=T5)) == set([(1,1), (1, 2)])
assert list(sess.execute(t7.select(), mapper=T5)) == [(1, 'some t7'), (2, 'some other t7')]
o6 = T5(data='some other t5', id=1, t7s=[
T7(data='third t7', id=3),
T7(data='fourth t7', id=4),
])
sess.delete(o5)
assert o5 in sess.deleted
assert o5.t7s[0] in sess.deleted
assert o5.t7s[1] in sess.deleted
sess.add(o6)
sess.flush()
assert list(sess.execute(t5.select(), mapper=T5)) == [(1, 'some other t5')]
assert list(sess.execute(t7.select(), mapper=T5)) == [(3, 'third t7'), (4, 'fourth t7')]
def test_manytoone(self):
t6, T6, t5, T5 = (self.tables.t6,
self.classes.T6,
self.tables.t5,
self.classes.T5)
mapper(T6, t6, properties={
't5':relationship(T5)
})
mapper(T5, t5)
sess = create_session()
o5 = T6(data='some t6', id=1)
o5.t5 = T5(data='some t5', id=1)
sess.add(o5)
sess.flush()
assert list(sess.execute(t5.select(), mapper=T5)) == [(1, 'some t5')]
assert list(sess.execute(t6.select(), mapper=T5)) == [(1, 'some t6', 1)]
o6 = T6(data='some other t6', id=1, t5=T5(data='some other t5', id=2))
sess.delete(o5)
sess.delete(o5.t5)
sess.add(o6)
sess.flush()
assert list(sess.execute(t5.select(), mapper=T5)) == [(2, 'some other t5')]
assert list(sess.execute(t6.select(), mapper=T5)) == [(1, 'some other t6', 2)]
class InheritingRowSwitchTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
Column('id', Integer, primary_key=True),
Column('pdata', String(30))
)
Table('child', metadata,
Column('id', Integer, primary_key=True),
Column('pid', Integer, ForeignKey('parent.id')),
Column('cdata', String(30))
)
@classmethod
def setup_classes(cls):
class P(cls.Comparable):
pass
class C(P):
pass
def test_row_switch_no_child_table(self):
P, C, parent, child = (self.classes.P,
self.classes.C,
self.tables.parent,
self.tables.child)
mapper(P, parent)
mapper(C, child, inherits=P)
sess = create_session()
c1 = C(id=1, pdata='c1', cdata='c1')
sess.add(c1)
sess.flush()
# establish a row switch between c1 and c2.
# c2 has no value for the "child" table
c2 = C(id=1, pdata='c2')
sess.add(c2)
sess.delete(c1)
self.assert_sql_execution(testing.db, sess.flush,
CompiledSQL("UPDATE parent SET pdata=:pdata WHERE parent.id = :parent_id",
{'pdata':'c2', 'parent_id':1}
),
# this fires as of [ticket:1362], since we synchronzize
# PK/FKs on UPDATES. c2 is new so the history shows up as
# pure added, update occurs. If a future change limits the
# sync operation during _save_obj().update, this is safe to remove again.
CompiledSQL("UPDATE child SET pid=:pid WHERE child.id = :child_id",
{'pid':1, 'child_id':1}
)
)
class TransactionTest(fixtures.MappedTest):
__requires__ = ('deferrable_or_no_constraints',)
@classmethod
def define_tables(cls, metadata):
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True))
t2 = Table('t2', metadata,
Column('id', Integer, primary_key=True),
Column('t1_id', Integer,
ForeignKey('t1.id', deferrable=True, initially='deferred')
))
@classmethod
def setup_classes(cls):
class T1(cls.Comparable):
pass
class T2(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
T2, T1, t2, t1 = (cls.classes.T2,
cls.classes.T1,
cls.tables.t2,
cls.tables.t1)
orm_mapper(T1, t1)
orm_mapper(T2, t2)
def test_close_transaction_on_commit_fail(self):
T2, t1 = self.classes.T2, self.tables.t1
session = create_session(autocommit=True)
# with a deferred constraint, this fails at COMMIT time instead
# of at INSERT time.
session.add(T2(t1_id=123))
try:
session.flush()
assert False
except:
# Flush needs to rollback also when commit fails
assert session.transaction is None
# todo: on 8.3 at least, the failed commit seems to close the cursor?
# needs investigation. leaving in the DDL above now to help verify
# that the new deferrable support on FK isn't involved in this issue.
if testing.against('postgresql'):
t1.bind.engine.dispose()
class PartialNullPKTest(fixtures.MappedTest):
# sqlite totally fine with NULLs in pk columns.
# no other DB is like this.
__only_on__ = ('sqlite',)
@classmethod
def define_tables(cls, metadata):
Table('t1', metadata,
Column('col1', String(10), primary_key=True, nullable=True),
Column('col2', String(10), primary_key=True, nullable=True),
Column('col3', String(50))
)
@classmethod
def setup_classes(cls):
class T1(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
orm_mapper(cls.classes.T1, cls.tables.t1)
def test_key_switch(self):
T1 = self.classes.T1
s = Session()
s.add(T1(col1="1", col2=None))
t1 = s.query(T1).first()
t1.col2 = 5
assert_raises_message(
orm_exc.FlushError,
"Can't update table using NULL for primary key value",
s.commit
)
def test_plain_update(self):
T1 = self.classes.T1
s = Session()
s.add(T1(col1="1", col2=None))
t1 = s.query(T1).first()
t1.col3 = 'hi'
assert_raises_message(
orm_exc.FlushError,
"Can't update table using NULL for primary key value",
s.commit
)
def test_delete(self):
T1 = self.classes.T1
s = Session()
s.add(T1(col1="1", col2=None))
t1 = s.query(T1).first()
s.delete(t1)
assert_raises_message(
orm_exc.FlushError,
"Can't delete from table using NULL for primary key value",
s.commit
)
def test_total_null(self):
T1 = self.classes.T1
s = Session()
s.add(T1(col1=None, col2=None))
assert_raises_message(
orm_exc.FlushError,
r"Instance \<T1 at .+?\> has a NULL "
"identity key. If this is an auto-generated value, "
"check that the database table allows generation ",
s.commit
)
def test_dont_complain_if_no_update(self):
T1 = self.classes.T1
s = Session()
t = T1(col1="1", col2=None)
s.add(t)
s.commit()
t.col1 = "1"
s.commit() | mit | 4,476,068,721,460,982,000 | 32.096647 | 141 | 0.530388 | false |
130s/bloom | bloom/commands/update.py | 1 | 4643 | # Software License Agreement (BSD License)
#
# Copyright (c) 2013, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import argparse
import atexit
import bloom
import json
import os
import sys
import xmlrpclib
from bloom.logging import warning
from bloom.util import add_global_arguments
from bloom.util import handle_global_arguments
from pkg_resources import parse_version
from threading import Lock
_updater_running = False
_updater_lock = Lock()
UPDATE_MSG = """\
This version of bloom is '{current}', but the newest available version is '{newest}'. Please update.\
"""
def start_updater():
global _updater_running, _updater_lock
with _updater_lock:
if _updater_running:
return
_updater_running = True
import subprocess
subprocess.Popen('bloom-update --quiet', shell=True)
@atexit.register
def check_for_updates():
if sys.argv[0].endswith('bloom-update'):
return
user_bloom = os.path.join(os.path.expanduser('~'), '.bloom')
if os.path.exists(user_bloom):
with open(user_bloom, 'r') as f:
raw = f.read()
if not raw:
return
version_dict = json.loads(raw)
os.remove(user_bloom) # Remove only on successful parse
if type(version_dict) == dict and len(version_dict) == 2 and version_dict['current'] == bloom.__version__:
warning(UPDATE_MSG.format(**version_dict))
def get_argument_parser():
parser = argparse.ArgumentParser(description="Checks for updates")
add_global_arguments(parser)
return parser
_quiet = False
def info(msg):
global _quiet
if not _quiet:
print(msg)
def fetch_update(user_bloom):
if os.path.exists(user_bloom):
return
open(user_bloom, 'w').close() # Touch the file
pypi = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')
newest_version = pypi.package_releases('bloom')
newest_version = newest_version[0] if newest_version else None
current_version = bloom.__version__
if newest_version and bloom.__version__ != 'unset':
if parse_version(bloom.__version__) < parse_version(newest_version):
version_dict = {
'current': str(current_version),
'newest': str(newest_version)
}
with open(user_bloom, 'w') as f:
f.write(json.dumps(version_dict))
info(UPDATE_MSG.format(**version_dict))
if _quiet:
return
else:
info("Bloom is up-to-date!")
else:
info("Cannot determine newest version of bloom.")
os.remove(user_bloom)
def main(sysargs=None):
global _quiet
parser = get_argument_parser()
args = parser.parse_args(sysargs)
handle_global_arguments(args)
_quiet = args.quiet
user_bloom = os.path.join(os.path.expanduser('~'), '.bloom')
try:
fetch_update(user_bloom)
except Exception as e:
if not _quiet:
print('Error fetching latest version: ' + str(e), file=sys.stderr)
if os.path.exists(user_bloom):
os.remove(user_bloom)
| bsd-3-clause | -7,645,146,209,222,274,000 | 32.402878 | 114 | 0.677795 | false |
jakesyl/BitTornado | BitTornado/Network/selectpoll.py | 2 | 1284 | import select
import time
import bisect
POLLIN = 1
POLLOUT = 2
POLLERR = 8
POLLHUP = 16
class poll(object):
def __init__(self):
self.rlist = []
self.wlist = []
def register(self, f, t):
if not isinstance(f, int):
f = f.fileno()
if t & POLLIN:
insert(self.rlist, f)
else:
remove(self.rlist, f)
if t & POLLOUT:
insert(self.wlist, f)
else:
remove(self.wlist, f)
def unregister(self, f):
if not isinstance(f, int):
f = f.fileno()
remove(self.rlist, f)
remove(self.wlist, f)
def poll(self, timeout=None):
if self.rlist or self.wlist:
try:
r, w, _ = select.select(self.rlist, self.wlist, [], timeout)
except ValueError:
return None
else:
if timeout:
time.sleep(timeout / 1000)
return []
return [(s, POLLIN) for s in r] + [(s, POLLOUT) for s in w]
def remove(list, item):
i = bisect.bisect(list, item)
if i > 0 and list[i - 1] == item:
del list[i - 1]
def insert(list, item):
i = bisect.bisect(list, item)
if i == 0 or list[i - 1] != item:
list.insert(i, item)
| mit | -7,630,042,393,807,928,000 | 21.928571 | 76 | 0.499221 | false |
xorpaul/check_mk | web/plugins/wato/builtin_modules.py | 1 | 4363 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# List of modules for main menu and WATO snapin. These modules are
# defined in a plugin because they contain i18n strings.
# fields: mode, title, icon, permission, help
modules += [
( "folder", _("Hosts"), "folder", "hosts",
_("Manage monitored hosts and services and the hosts' folder structure.")),
( "hosttags", _("Host Tags"), "hosttag", "hosttags",
_("Tags classify hosts and are the "
"fundament of configuration of hosts and services.")),
( "globalvars", _("Global Settings"), "configuration", "global",
_("Global settings for Check_MK, Multisite and the monitoring core.")),
( "ruleeditor", _("Host & Service Parameters"), "rulesets", "rulesets",
_("Check parameters and other configuration variables on "
"hosts and services") ),
( "static_checks", _("Manual Checks"), "static_checks", "rulesets",
_("Configure fixed checks without using service discovery")),
( "check_plugins", _("Check Plugins"), "check_plugins", None,
_("Browse the catalog of all check plugins, create static checks")),
( "host_groups", _("Host & Service Groups"), "hostgroups", "groups",
_("Organize your hosts and services in groups independent of the tree structure.") ),
( "users", _("Users"), "users", "users",
_("Manage users of the monitoring system.") ),
( "roles", _("Roles & Permissions"), "roles", "users",
_("User roles are configurable sets of permissions." ) ),
( "contact_groups", _("Contact Groups"), "contactgroups", "users",
_("Contact groups are used to assign persons to hosts and services") ),
( "notifications", _("Notifications"), "notifications", "notifications",
_("Rules for the notification of contacts about host and service problems")),
( "timeperiods", _("Time Periods"), "timeperiods", "timeperiods",
_("Timeperiods restrict notifications and other things to certain periods of "
"the day.") ),
( "pattern_editor", _("Logfile Pattern Analyzer"), "analyze", "pattern_editor",
_("Analyze logfile pattern rules and validate logfile patterns against custom text.")),
( "bi_rules", _("BI - Business Intelligence"), "aggr", "bi_rules",
_("Configuration of Check_MK's Business Intelligence component.")),
( "sites", _("Distributed Monitoring"), "sites", "sites",
_("Distributed monitoring via Multsite, distributed configuration via WATO")),
( "snapshot", _("Backup & Restore"), "backup", "snapshots",
_("Make snapshots of your configuration, download, upload and restore snapshots.")),
( "icons", _("Custom Icons"), "icons", "icons",
_("Upload your own icons that can be used in views or custom actions")),
]
| gpl-2.0 | 777,438,350,486,081,900 | 50.329412 | 95 | 0.552143 | false |
OpenMined/PySyft | packages/syft/examples/duet/word_language_model/original/generate.py | 1 | 3193 | ###############################################################################
# Language Modeling on Wikitext-2
#
# This file generates new sentences sampled from the language model
#
###############################################################################
# stdlib
import argparse
# third party
import torch
import data # isort:skip
parser = argparse.ArgumentParser(description="PyTorch Wikitext-2 Language Model")
# Model parameters.
parser.add_argument(
"--data",
type=str,
default="./data/wikitext-2",
help='location of the data corpus; default: "./data/wikitext-2"',
)
parser.add_argument(
"--checkpoint",
type=str,
default="./model.pt",
help='model checkpoint to use; default: "./model.pt"',
)
parser.add_argument(
"--outf",
type=str,
default="generated.txt",
help='output file for generated text; default: "generated.txt"',
)
parser.add_argument(
"--words",
type=int,
default="1000",
help="number of words to generate; default: 1000",
)
parser.add_argument("--seed", type=int, default=1111, help="random seed; default: 1111")
parser.add_argument("--cuda", action="store_true", help="use CUDA")
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="temperature - higher will increase diversity; default: 1.0",
)
parser.add_argument(
"--log-interval", type=int, default=100, help="reporting interval; default: 100"
)
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device("cuda" if args.cuda else "cpu")
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3")
with open(args.checkpoint, "rb") as f:
model = torch.load(f).to(device)
model.eval()
corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
is_transformer_model = (
hasattr(model, "model_type") and model.model_type == "Transformer"
)
if not is_transformer_model:
hidden = model.init_hidden(1)
input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device)
with open(args.outf, "w") as outf:
with torch.no_grad(): # no tracking history
for i in range(args.words):
if is_transformer_model:
output = model(input, False)
word_weights = output[-1].squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
word_tensor = torch.Tensor([[word_idx]]).long().to(device)
input = torch.cat([input, word_tensor], 0)
else:
output, hidden = model(input, hidden)
word_weights = output.squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
outf.write(word + ("\n" if i % 20 == 19 else " "))
if i % args.log_interval == 0:
print(f"| Generated {i}/{args.words} words")
| apache-2.0 | 7,456,513,447,870,492,000 | 30.613861 | 88 | 0.603821 | false |
pymanopt/pymanopt | tests/test_problem_backend_interface.py | 1 | 2532 | import autograd.numpy as np
import numpy.testing as np_testing
import pymanopt
from pymanopt.manifolds import Euclidean, FixedRankEmbedded, Product
from ._test import TestCase
class TestProblemBackendInterface(TestCase):
def setUp(self):
self.m = m = 20
self.n = n = 10
self.rank = rank = 3
A = np.random.randn(m, n)
@pymanopt.function.Autograd
def cost(u, s, vt, x):
return np.linalg.norm(((u * s) @ vt - A) @ x) ** 2
self.cost = cost
self.gradient = self.cost.compute_gradient()
self.hvp = self.cost.compute_hessian_vector_product()
self.manifold = Product([FixedRankEmbedded(m, n, rank), Euclidean(n)])
self.problem = pymanopt.Problem(self.manifold, self.cost)
def test_cost_function(self):
(u, s, vt), x = self.manifold.rand()
self.cost(u, s, vt, x)
def test_gradient(self):
(u, s, vt), x = self.manifold.rand()
gu, gs, gvt, gx = self.gradient(u, s, vt, x)
self.assertEqual(gu.shape, (self.m, self.rank))
self.assertEqual(gs.shape, (self.rank,))
self.assertEqual(gvt.shape, (self.rank, self.n))
self.assertEqual(gx.shape, (self.n,))
def test_hessian_vector_product(self):
(u, s, vt), x = self.manifold.rand()
(a, b, c), d = self.manifold.rand()
hu, hs, hvt, hx = self.hvp(u, s, vt, x, a, b, c, d)
self.assertEqual(hu.shape, (self.m, self.rank))
self.assertEqual(hs.shape, (self.rank,))
self.assertEqual(hvt.shape, (self.rank, self.n))
self.assertEqual(hx.shape, (self.n,))
def test_problem_cost(self):
cost = self.problem.cost
X = self.manifold.rand()
(u, s, vt), x = X
np_testing.assert_allclose(cost(X), self.cost(u, s, vt, x))
def test_problem_egrad(self):
egrad = self.problem.egrad
X = self.manifold.rand()
(u, s, vt), x = X
G = egrad(X)
(gu, gs, gvt), gx = G
for ga, gb in zip((gu, gs, gvt, gx), self.gradient(u, s, vt, x)):
np_testing.assert_allclose(ga, gb)
def test_problem_hessian_vector_product(self):
ehess = self.problem.ehess
X = self.manifold.rand()
U = self.manifold.rand()
H = ehess(X, U)
(u, s, vt), x = X
(a, b, c), d = U
(hu, hs, hvt), hx = H
for ha, hb in zip((hu, hs, hvt, hx),
self.hvp(u, s, vt, x, a, b, c, d)):
np_testing.assert_allclose(ha, hb)
| bsd-3-clause | 588,114,702,667,529,700 | 32.315789 | 78 | 0.556477 | false |
robsonfs/HackerRankChallenge_30DaysOfCode | day_12/tests.py | 1 | 1507 | from unittest import TestCase, mock
from day12 import Person, Student
class TestDay12(TestCase):
def test_student_a_person_subclass(self):
self.assertTrue(issubclass(Student, Person))
def test_student_has_att_scores(self):
student = Student("Sophia", "Fernandes", 201302, [90, 100, 100, 80])
self.assertTrue(hasattr(student, 'scores'))
def test_student_calculate_testcase0(self):
student = Student("Sophia", "Fernandes", 201302, [90, 100, 100, 80])
grade = student.calculate()
self.assertEqual('O', grade)
def test_student_calculate_testcase1(self):
student = Student("Sophia", "Fernandes", 201302, [90, 80, 99, 80])
grade = student.calculate()
self.assertEqual('E', grade)
def test_student_calculate_testcase2(self):
student = Student("Sophia", "Fernandes", 201302, [76])
grade = student.calculate()
self.assertEqual('A', grade)
def test_student_calculate_testcase3(self):
student = Student("Sophia", "Fernandes", 201302, [66])
grade = student.calculate()
self.assertEqual('P', grade)
def test_student_calculate_testcase4(self):
student = Student("Sophia", "Fernandes", 201302, [54])
grade = student.calculate()
self.assertEqual('D', grade)
def test_student_calculate_testcase5(self):
student = Student("Sophia", "Fernandes", 201302, [39])
grade = student.calculate()
self.assertEqual('T', grade)
| gpl-3.0 | -552,155,690,027,600,500 | 35.756098 | 76 | 0.639018 | false |
dts-ait/qgis-edge-bundling | qgis3_plugin/processing_edgebundling/edgebundling.py | 1 | 8883 | # -*- coding: utf-8 -*-
"""
***************************************************************************
edgebundlingProviderPlugin.py
---------------------
Date : January 2018
Copyright : (C) 2018 by Anita Graser
Email : [email protected]
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Anita Graser'
__date__ = 'January 2018'
__copyright__ = '(C) 2018, Anita Graser'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtCore import QCoreApplication, QVariant
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsField,
QgsFeature,
QgsFeatureSink,
QgsFeatureRequest,
QgsProcessing,
QgsProcessingAlgorithm,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterField,
QgsProcessingParameterNumber,
QgsProcessingParameterBoolean,
QgsProcessingParameterFeatureSink
)
from processing_edgebundling.edgebundlingUtils import EdgeCluster
pluginPath = os.path.dirname(__file__)
class Edgebundling(QgsProcessingAlgorithm):
INPUT = 'INPUT'
CLUSTER_FIELD = 'CLUSTER_FIELD'
USE_CLUSTERING = 'USE_CLUSTERING'
INITIAL_STEP_SIZE = 'INITIAL_STEP_SIZE'
COMPATIBILITY = 'COMPATIBILITY'
CYCLES = 'CYCLES'
ITERATIONS = 'ITERATIONS'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def createInstance(self):
return type(self)()
def icon(self):
return QIcon(os.path.join(pluginPath, "icons", "icon.png"))
def tr(self, text):
return QCoreApplication.translate("edgebundling", text)
def name(self):
return "edgebundling"
def displayName(self):
return self.tr("Force-directed edge bundling")
def group(self):
return self.tr("Edge Bundling")
def groupId(self):
return "edgebundling"
def tags(self):
return self.tr("edgebundling,flows").split(",")
def shortHelpString(self):
return self.tr("""
Implementation of force-directed edge bundling for the QGIS Processing toolbox as described in
https://anitagraser.com/2017/10/08/movement-data-in-gis-8-edge-bundling-for-flow-maps/
Usage:
Pre-process your data first!
- Use only Linestrings (no Multilinestrings)
- Your data should only contain lines with exactly 2 nodes: an origin node and a destination node.
- Your data should also only contain lines with a length greater than 0 ("lines" with equal origin and destination node coordinates will cause an error).
Once your data is sufficiently pre-processed and fulfils all above mentioned requirements, you can either first use one of the clustering algorithms and then bundle the lines, or you can directly bundle the lines (which, on the downside, will take significantly longer). Please double check the input parameters to fit your data (e.g. the "initial step size" in the "edge bundling algorithm" dependent on the coordinate reference system of your data).
""")
def helpUrl(self):
return "https://github.com/dts-ait/qgis-edge-bundling"
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(
self.INPUT,
self.tr("Input layer"),
[QgsProcessing.TypeVectorLine]))
self.addParameter(QgsProcessingParameterField(
self.CLUSTER_FIELD,
self.tr("Cluster field"),
None,
self.INPUT))
self.addParameter(QgsProcessingParameterBoolean(
self.USE_CLUSTERING,
self.tr("Use cluster field"),
defaultValue=False))
self.addParameter(QgsProcessingParameterNumber(
self.INITIAL_STEP_SIZE,
self.tr("Initial step size"),
QgsProcessingParameterNumber.Double,
100))
self.addParameter(QgsProcessingParameterNumber(
self.COMPATIBILITY,
self.tr("Compatibility"),
QgsProcessingParameterNumber.Double,
0.6))
self.addParameter(QgsProcessingParameterNumber(
self.CYCLES,
self.tr("Cycles"),
QgsProcessingParameterNumber.Integer,
6))
self.addParameter(QgsProcessingParameterNumber(
self.ITERATIONS,
self.tr("Iterations"),
QgsProcessingParameterNumber.Integer,
90))
self.addParameter(QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr("Bundled edges"),
QgsProcessing.TypeVectorLine))
def processAlgorithm(self, parameters, context, feedback):
cluster_field = self.parameterAsFields(parameters, self.CLUSTER_FIELD, context)[0]
use_clustering = self.parameterAsBool(parameters, self.USE_CLUSTERING, context)
initial_step_size = self.parameterAsDouble(parameters, self.INITIAL_STEP_SIZE, context)
compatibility = self.parameterAsDouble(parameters, self.COMPATIBILITY, context)
cycles = self.parameterAsInt(parameters, self.CYCLES, context)
iterations = self.parameterAsInt(parameters, self.ITERATIONS, context)
source = self.parameterAsSource(parameters, self.INPUT, context)
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
source.fields(), source.wkbType(), source.sourceCrs())
features = source.getFeatures(QgsFeatureRequest())
total = 100.0 / source.featureCount() if source.featureCount() else 0
# Parameter
vlayer = source
fields = vlayer.fields()
# Create edge list
edges = []
for current, feat in enumerate(features):
if feedback.isCanceled():
break
edges.append(feat)
# Create clusters
clusters = []
if use_clustering == True:
# Arrange edges in clusters according to cluster-id
labels = []
for edge in edges:
labels.append(edge[cluster_field])
feedback.pushDebugInfo(cluster_field)
for l in range(0, max(labels) + 1):
clusters.append(list())
for i, label in enumerate(labels):
if label >= 0:
clusters[label].append(edges[i])
else:
clusters.append([edges[i]])
for i, cluster in enumerate(clusters):
clusters[i] = EdgeCluster(cluster, initial_step_size, iterations,
cycles, compatibility)
else:
# If clustering should not be used, create only one big cluster containing all edges
cluster_field = QgsField('CLUSTER', QVariant.Int)
cluster_n_field = QgsField('CLUSTER_N', QVariant.Int)
fields.append(cluster_field)
fields.append(cluster_n_field)
clusters = [EdgeCluster(edges, initial_step_size, iterations,
cycles, compatibility)]
# Do edge-bundling (separately for all clusters)
for c, cl in enumerate(clusters):
feedback.setProgress(80 * ( 1.0 * c / len(clusters)))
if feedback.isCanceled(): break
if cl.E > 1:
cl.force_directed_eb(feedback)
feedback.setProgress(90)
for cl in clusters:
if feedback.isCanceled(): break
for e, edge in enumerate(cl.edges):
feat = QgsFeature()
feat.setGeometry(edge.geometry())
if not use_clustering:
attr = edge.attributes()
attr.append(1)
attr.append(len(edges))
feat.setAttributes(attr)
else:
feat.setAttributes(edge.attributes())
sink.addFeature(feat, QgsFeatureSink.FastInsert)
return {self.OUTPUT: dest_id}
| gpl-2.0 | -8,249,758,353,570,889,000 | 38.834081 | 459 | 0.573793 | false |
longde123/MultiversePlatform | client/Scripts/PropertyKeyFrame.py | 1 | 2788 | #
# The Multiverse Platform is made available under the MIT License.
#
# Copyright (c) 2012 The Multiverse Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
#
import ClientAPI
class PropertyKeyFrame:
#
# Constructor
#
def __init__(self):
assert False
#
# Property Getters
#
def _get_Time(self):
return self._keyFrame.Time
def _get_PropertyValue(self):
return self._keyFrame.NumericValue
def __getattr__(self, attrname):
if attrname in self._getters:
return self._getters[attrname](self)
else:
raise AttributeError, attrname
#
# Property Setters
#
def _set_PropertyValue(self, value):
self._keyFrame.NumericValue = value
def __setattr__(self, attrname, value):
if attrname in self._setters:
self._setters[attrname](self, value)
else:
raise AttributeError, attrname
_getters = { 'Time': _get_Time, 'PropertyValue': _get_PropertyValue }
_setters = { 'PropertyValue': _set_PropertyValue }
#
# Methods
#
#
# This class is just another way of making a PropertyKeyFrame, with a different constructor,
# since we don't have constructor overloading within a single class. This should only
# be used internally by the API.
#
# The way to get a PropertyKeyFrame is to call animationTrack.CreateKeyFrame()
#
class _ExistingPropertyKeyFrame(PropertyKeyFrame):
#
# Constructor
#
def __init__(self, keyFrame):
self.__dict__['_keyFrame'] = keyFrame
def __setattr__(self, attrname, value):
PropertyKeyFrame.__setattr__(self, attrname, value)
| mit | 6,668,025,682,749,727,000 | 30.325843 | 92 | 0.66858 | false |
hsk81/protobuf-rpc-js | example/protocol/reflector_pb2.py | 1 | 3620 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: reflector.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='reflector.proto',
package='Reflector',
syntax='proto3',
serialized_pb=_b('\n\x0freflector.proto\x12\tReflector\"\x1f\n\nAckRequest\x12\x11\n\ttimestamp\x18\x01 \x01(\t\"\x1e\n\tAckResult\x12\x11\n\ttimestamp\x18\x01 \x01(\t2=\n\x07Service\x12\x32\n\x03\x61\x63k\x12\x15.Reflector.AckRequest\x1a\x14.Reflector.AckResultb\x06proto3')
)
_ACKREQUEST = _descriptor.Descriptor(
name='AckRequest',
full_name='Reflector.AckRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='Reflector.AckRequest.timestamp', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=30,
serialized_end=61,
)
_ACKRESULT = _descriptor.Descriptor(
name='AckResult',
full_name='Reflector.AckResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='Reflector.AckResult.timestamp', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=93,
)
DESCRIPTOR.message_types_by_name['AckRequest'] = _ACKREQUEST
DESCRIPTOR.message_types_by_name['AckResult'] = _ACKRESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AckRequest = _reflection.GeneratedProtocolMessageType('AckRequest', (_message.Message,), dict(
DESCRIPTOR = _ACKREQUEST,
__module__ = 'reflector_pb2'
# @@protoc_insertion_point(class_scope:Reflector.AckRequest)
))
_sym_db.RegisterMessage(AckRequest)
AckResult = _reflection.GeneratedProtocolMessageType('AckResult', (_message.Message,), dict(
DESCRIPTOR = _ACKRESULT,
__module__ = 'reflector_pb2'
# @@protoc_insertion_point(class_scope:Reflector.AckResult)
))
_sym_db.RegisterMessage(AckResult)
_SERVICE = _descriptor.ServiceDescriptor(
name='Service',
full_name='Reflector.Service',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=95,
serialized_end=156,
methods=[
_descriptor.MethodDescriptor(
name='ack',
full_name='Reflector.Service.ack',
index=0,
containing_service=None,
input_type=_ACKREQUEST,
output_type=_ACKRESULT,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_SERVICE)
DESCRIPTOR.services_by_name['Service'] = _SERVICE
# @@protoc_insertion_point(module_scope)
| bsd-3-clause | 7,428,923,567,136,072,000 | 26.424242 | 277 | 0.712431 | false |
locked/4stability | motor_test.py | 1 | 1460 | #!/usr/bin/python
from optparse import OptionParser
import os
import sys
import time
import termios
import fcntl
import motor
parser = OptionParser()
parser.add_option("-a", "--action", dest="action", help="reset/manual")
(options, args) = parser.parse_args()
m = motor.Motor(0)
if options.action == "reset":
m.reset()
elif options.action == "cycle":
m.init()
speed_percent = 0
while speed_percent < 30:
speed_percent += 1
while speed_percent > 0:
speed_percent -= 1
m.reset()
elif options.action == "manual":
m.init()
# Terminal init stuff found on stackoverflow (SlashV)
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
try:
speed_percent = 0
while (True):
try:
c = sys.stdin.read(1)
except IOError:
c = ''
if c == "-":
speed_percent = speed_percent - 1 if speed_percent > 1 else 0
elif c == "+":
speed_percent = speed_percent + 1 if speed_percent < 100 else 0
pos = m.set_speed(speed_percent/100.0)
sys.stdout.write("\r%d%% (%d)" % (speed_percent, pos))
sys.stdout.flush()
#time.sleep(.1)
except: pass
finally:
# Reset terminal
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
m.reset()
| bsd-3-clause | 6,159,371,416,633,251,000 | 22.174603 | 71 | 0.670548 | false |
beav/pulp | server/pulp/plugins/util/importer_config.py | 2 | 15079 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
"""
Contains utilities for validating a Pulp standard importer config.
"""
from gettext import gettext as _
from pulp.common.plugins import importer_constants
class InvalidConfig(Exception):
"""
Raised if the importer config fails validation for one or more properties. All
values will be checked regardless of whether or not one is found to be invalid.
The raised exception will contain all of the properties that failed along with
the given value for each.
"""
def __init__(self):
super(InvalidConfig, self).__init__()
self.failure_messages = [] # running list of all i18n'd failure messages encountered
def add_failure_message(self, msg):
self.failure_messages.append(msg)
def has_errors(self):
return len(self.failure_messages) > 0
def validate_config(config):
"""
Validates all standard importer configuration options in the given configuration. All
validations are performed regardless of whether or not an error is encountered. If a failure
occurs, an exception is raised containing a list of all failure messages.
:param config: the configuration object being validated
:type config: pulp.plugins.config.PluginCallConfiguration
:raises InvalidConfig: if one or more validation tests fails
"""
potential_exception = InvalidConfig()
for v in VALIDATIONS:
try:
v(config)
except ValueError, e:
potential_exception.add_failure_message(e[0])
if potential_exception.has_errors():
raise potential_exception
def validate_feed_requirement(config):
"""
Ensures the feed URL is a string if specified.
This validation does not check the integrity of the feed URL.
"""
feed_url = config.get(importer_constants.KEY_FEED)
if feed_url and not isinstance(feed_url, basestring):
msg = _('<%(feed_url)s> must be a string.')
msg = msg % {'feed_url': importer_constants.KEY_FEED}
raise ValueError(msg)
def validate_ssl_validation_flag(config):
"""
Make sure the SSL validation enabled flag is a boolean.
:param config: The configuration object that we are validating.
:type config: pulp.plugins.config.PluginCallConfiguration
"""
_run_validate_is_non_required_bool(config, importer_constants.KEY_SSL_VALIDATION)
def validate_ssl_ca_cert(config):
"""
Make sure the ssl_ca_cert is a string if it is set.
:param config: The configuration object that we are validating.
:type config: pulp.plugins.config.PluginCallConfiguration
"""
ssl_ca_cert = config.get(importer_constants.KEY_SSL_CA_CERT)
if ssl_ca_cert is None:
return # optional
if not isinstance(ssl_ca_cert, basestring):
msg = _('The configuration parameter <%(name)s> should be a string, but it was %(type)s.')
msg = msg % {'name': importer_constants.KEY_SSL_CA_CERT, 'type': type(ssl_ca_cert)}
raise ValueError(msg)
def validate_ssl_client_cert(config):
"""
Make sure the client certificte is a string if it is set.
"""
ssl_client_cert = config.get(importer_constants.KEY_SSL_CLIENT_CERT)
if ssl_client_cert is None and config.get(importer_constants.KEY_SSL_CLIENT_KEY) is None:
return # optional
elif ssl_client_cert is None:
# If the key is set, we should also have a cert
msg = _('The configuration parameter <%(key_name)s> requires the <%(cert_name)s> parameter to also '
'be set.')
msg = msg % {'key_name': importer_constants.KEY_SSL_CLIENT_KEY, 'cert_name': importer_constants.KEY_SSL_CLIENT_CERT}
raise ValueError(msg)
if not isinstance(ssl_client_cert, basestring):
msg = _('The configuration parameter <%(name)s> should be a string, but it was %(type)s.')
msg = msg % {'name': importer_constants.KEY_SSL_CLIENT_CERT, 'type': type(ssl_client_cert)}
raise ValueError(msg)
def validate_ssl_client_key(config):
"""
Make sure the ssl_client_key is a string and that the cert is also provided, if the key is set.
"""
ssl_client_key = config.get(importer_constants.KEY_SSL_CLIENT_KEY)
if ssl_client_key is None:
return # optional
if not isinstance(ssl_client_key, basestring):
msg = _('The configuration parameter <%(name)s> should be a string, but it was %(type)s.')
msg = msg % {'name': importer_constants.KEY_SSL_CLIENT_KEY, 'type': type(ssl_client_key)}
raise ValueError(msg)
def validate_max_speed(config):
"""
Make sure the max speed can be cast to a number, if it is defined.
"""
max_speed = config.get(importer_constants.KEY_MAX_SPEED)
if max_speed is None:
return # optional
try:
max_speed = float(max_speed)
if max_speed <= 0:
raise ValueError()
except ValueError:
msg = _('The configuration parameter <%(max_speed_name)s> must be set to a positive numerical value, '
'but is currently set to <%(max_speed_value)s>.')
msg = msg % {'max_speed_name': importer_constants.KEY_MAX_SPEED, 'max_speed_value': max_speed}
raise ValueError(msg)
def validate_max_downloads(config):
"""
Make sure the maximum downloads value is a positive integer if it is set.
"""
max_downloads = config.get(importer_constants.KEY_MAX_DOWNLOADS)
if max_downloads is None:
return # optional
try:
max_downloads = _cast_to_int_without_allowing_floats(max_downloads)
if max_downloads < 1:
raise ValueError()
except ValueError:
msg = _('The configuration parameter <%(num_threads_name)s> must be set to a positive integer, but '
'is currently set to <%(num_threads)s>.')
msg = msg % {'num_threads_name': importer_constants.KEY_MAX_DOWNLOADS, 'num_threads': max_downloads}
raise ValueError(msg)
def validate_proxy_host(config):
"""
Make sure the proxy host is a string if it is set.
:param config: The configuration object that we are validating.
:type config: pulp.plugins.config.PluginCallConfiguration
"""
dependencies = [importer_constants.KEY_PROXY_PASS, importer_constants.KEY_PROXY_PORT,
importer_constants.KEY_PROXY_USER]
proxy_url = config.get(importer_constants.KEY_PROXY_HOST)
if proxy_url is None and all([config.get(parameter) is None for parameter in dependencies]):
return # optional
elif proxy_url is None:
msg = _('The configuration parameter <%(name)s> is required when any of the following other '
'parameters are defined: ' + ', '.join(dependencies) + '.')
msg = msg % {'name': importer_constants.KEY_PROXY_HOST}
raise ValueError(msg)
if not isinstance(proxy_url, basestring):
msg = _('The configuration parameter <%(name)s> should be a string, but it was %(type)s.')
msg = msg % {'name': importer_constants.KEY_PROXY_HOST, 'type': type(proxy_url)}
raise ValueError(msg)
def validate_proxy_port(config):
"""
The proxy_port is optional. If it is set, this will make sure the proxy_url is also set, and that the port
is a positive integer.
:param config: The configuration object that we are validating.
:type config: pulp.plugins.config.PluginCallConfiguration
"""
proxy_port = config.get(importer_constants.KEY_PROXY_PORT)
if proxy_port is None:
return # optional
try:
proxy_port = _cast_to_int_without_allowing_floats(proxy_port)
if proxy_port < 1:
raise ValueError()
except ValueError:
msg = _('The configuration parameter <%(name)s> must be set to a positive integer, but is currently '
'set to <%(value)s>.')
msg = msg % {'name': importer_constants.KEY_PROXY_PORT, 'value': proxy_port}
raise ValueError(msg)
def validate_proxy_username(config):
"""
The proxy_username is optional. If it is set, this method will ensure that it is a string, and it will
also ensure that the proxy_password and proxy_url settings are set.
:param config: The configuration object that we are validating.
:type config: pulp.plugins.config.PluginCallConfiguration
"""
proxy_username = config.get(importer_constants.KEY_PROXY_USER)
# Proxy username is not required unless the password is set
if proxy_username is None and config.get(importer_constants.KEY_PROXY_PASS) is None:
return
elif proxy_username is None:
# If proxy_password is set, proxy_username must also be set
msg = _('The configuration parameter <%(password_name)s> requires the <%(username_name)s> parameter '
'to also be set.')
msg = msg % {'password_name': importer_constants.KEY_PROXY_PASS,
'username_name': importer_constants.KEY_PROXY_USER}
raise ValueError(msg)
if not isinstance(proxy_username, basestring):
msg = _('The configuration parameter <%(name)s> should be a string, but it was %(type)s.')
msg = msg % {'name': importer_constants.KEY_PROXY_USER, 'type': type(proxy_username)}
raise ValueError(msg)
def validate_proxy_password(config):
"""
The proxy password setting is optional. However, if it is set, it must be a string. Also, if it
is set, user must also be set.
:param config: The configuration object that we are validating.
:type config: pulp.plugins.config.PluginCallConfiguration
"""
proxy_password = config.get(importer_constants.KEY_PROXY_PASS)
if proxy_password is None and config.get(importer_constants.KEY_PROXY_USER) is None:
return # optional
elif proxy_password is None:
# If proxy_password is set, proxy_username must also be set
msg = _('The configuration parameter <%(username_name)s> requires the <%(password_name)s> '
'parameter to also be set.')
msg = msg % {'password_name': importer_constants.KEY_PROXY_PASS,
'username_name': importer_constants.KEY_PROXY_USER}
raise ValueError(msg)
if not isinstance(proxy_password, basestring):
msg = _('The configuration parameter <%(proxy_password_name)s> should be a string, but it was '
'%(type)s.')
msg = msg % {'proxy_password_name': importer_constants.KEY_PROXY_PASS,
'type': type(proxy_password)}
raise ValueError(msg)
def validate_validate_downloads(config):
"""
This (humorously named) method will validate the optional config option called
"validate_downloads". If it is set, it must be a boolean, otherwise it may be None.
:param config: the config to be validated
:type config: pulp.plugins.config.PluginCallConfiguration
"""
_run_validate_is_non_required_bool(config, importer_constants.KEY_VALIDATE)
def validate_remove_missing(config):
"""
This method will validate the optional config setting called "remove_missing_units". If it is set, it must
be a boolean, otherwise it may be None.
:param config: the config to be validated
:type config: pulp.plugins.config.PluginCallConfiguration
"""
_run_validate_is_non_required_bool(config, importer_constants.KEY_UNITS_REMOVE_MISSING)
def validate_retain_old_count(config):
"""
Makes sure the number of old units to retain is a number greater than or equal to 0.
:param config: the config to be validated
:type config: pulp.plugins.config.PluginCallConfiguration
"""
retain_old_count = config.get(importer_constants.KEY_UNITS_RETAIN_OLD_COUNT)
if retain_old_count is None:
return # optional
try:
retain_old_count = _cast_to_int_without_allowing_floats(retain_old_count)
if retain_old_count < 0:
raise ValueError()
except ValueError:
msg = _('The configuration parameter <%(old_count_name)s> must be set to an integer greater '
'than or equal to zero, but is currently set to <%(old_count)s>.')
msg = msg % {'old_count_name': importer_constants.KEY_UNITS_RETAIN_OLD_COUNT,
'old_count': retain_old_count}
raise ValueError(msg)
# -- utilities ----------------------------------------------------------------
def _cast_to_int_without_allowing_floats(value):
"""
Attempt to return an int of the value, without allowing any floating point values. This is useful to
ensure that you get an int type out of value, while allowing a string representation of the value. If
there are any non numerical characters in value, this will raise ValueError.
:param value: The value you want to validate
:type value: int or basestring
:return: The integer representation of value
:rtype: int
"""
if isinstance(value, basestring):
# We don't want to allow floating point values
if not value.isdigit():
raise ValueError()
# Interpret num_threads as an integer
value = int(value)
if not isinstance(value, int):
raise ValueError()
return value
def _run_validate_is_non_required_bool(config, setting_name):
"""
Validate that the bool represented in the config by setting_name is either not set, or if it is set that
it is a boolean value.
:param config: the config to be validated
:type config: pulp.plugins.config.PluginCallConfiguration
:param setting_name: The name of the setting we wish to validate in the config
:type setting_name: str
"""
original_setting = setting = config.get(setting_name)
if setting is None:
# We don't require any settings
return
if isinstance(setting, basestring):
setting = config.get_boolean(setting_name)
if isinstance(setting, bool):
return
msg = _('The configuration parameter <%(name)s> must be set to a boolean value, but is '
'currently set to <%(value)s>.')
msg = msg % {'name': setting_name, 'value': original_setting}
raise ValueError(msg)
VALIDATIONS = (
validate_feed_requirement,
validate_ssl_validation_flag,
validate_ssl_ca_cert,
validate_ssl_client_cert,
validate_ssl_client_key,
validate_max_speed,
validate_max_downloads,
validate_proxy_host,
validate_proxy_port,
validate_proxy_username,
validate_proxy_password,
validate_validate_downloads,
validate_remove_missing,
validate_retain_old_count,
)
| gpl-2.0 | 2,447,617,668,581,289,500 | 38.268229 | 124 | 0.667087 | false |
catiabandeiras/StemFactory | expansion_tech.py | 1 | 7506 | #Imports the generic Python packages
import simpy
import random
import math
#IMPORT THE ANCILLARY METHODS MODULES
from get_places import *
def expansion_tech_run(env,et,donor,lab,gui,int_db):
#First, try to find an available worker
while True:
if lab.occupied_workers < gui.TOTAL_WORKERS:
worker_index = 0
while worker_index < gui.TOTAL_WORKERS:
#Select a worker
worker = lab.list_of_workers[worker_index]
if worker.count < worker.capacity:
with worker.request() as request:
yield request | env.timeout(0.0001)
donor.worker_queue = worker
lab.occupied_workers = sum([lab.list_of_workers[worker_index].count for worker_index in range(gui.TOTAL_WORKERS)])
#yield env.timeout(0.0001)
#print('Lab workers at %.5f seen by %s in the beginning of seeding are %d' % (env.now,et.full_name,lab.occupied_workers))
#2) If worker is available, calls the seeding block
procedure = 'seeding'
bsc_procedure = bsc_et(env,et,donor,lab,procedure,gui,int_db)
env.process(bsc_procedure)
yield env.timeout((donor.worker_queue.count/donor.worker_queue.capacity)*(lab.seeding_time))
#print('Seeding of %s finished at %.5f' % (et.full_name,env.now))
break
else:
worker_index += 1
break
else:
yield env.timeout(0.0001)
continue
#If harvesting before the density is favorable, do it
while et.no_cells < min(et.harvest_density*et.area,gui.CELL_NUMBER_PER_DOSE*(gui.ANNUAL_DEMAND-lab.total_doses)):
#Chooses if the process goes to a bioreactor system or is in the incubator only
if et.base_name[0] == 'b':
incubation = get_place_bioreactor(env,et,donor,lab,gui,int_db)
else:
incubation = get_place_incubator(env,et,donor,lab,gui,int_db)
env.process(incubation)
#print('Incubation of %s started at %.5f' % (et.full_name,env.now))
yield env.timeout(lab.incubation_time)
#print('Incubation of %s finished at %.5f' % (et.full_name,env.now))
if et.no_cells >= et.harvest_density*et.area:
'''Sent for harvesting when the number of cells in the flask to harvest is reached'''
print('%s is sent for harvesting at %.4f' % (et.full_name,env.now))
break
else:
'''Undergoes feeding when the period of incubation is reached'''
while True:
if lab.occupied_workers < gui.TOTAL_WORKERS:
worker_index = 0
while worker_index < gui.TOTAL_WORKERS:
#Select a worker
worker = lab.list_of_workers[worker_index]
if worker.count < worker.capacity:
with worker.request() as request:
yield request | env.timeout(0.0001)
donor.worker_queue = worker
lab.occupied_workers = sum([lab.list_of_workers[worker_index].count for worker_index in range(gui.TOTAL_WORKERS)])
#print('Feeding block initialized for %s at %.5f' % (et.full_name,env.now))
procedure = 'feeding'
#print('Feeding of %s started at %.5f' % (et.full_name,env.now))
bsc_procedure = bsc_et(env,et,donor,lab,procedure,gui,int_db)
env.process(bsc_procedure)
yield env.timeout((donor.worker_queue.count/donor.worker_queue.capacity)*(lab.feeding_time))
#print('Feeding of %s finished at %.5f' % (et.full_name,env.now))
#print('Feeding block terminated for %s at %.5f' % (et.full_name,env.now))
break
else:
worker_index += 1
break
else:
yield env.timeout(0.0001)
continue
# print(lab.reagent_volumes)
#4) Check that the bsc and worker are not busy before going to harvesting
while True:
'''Launches the harvesting steps'''
worker_index = 0
harvested = 0
while worker_index < gui.TOTAL_WORKERS:
#Select a worker
worker = lab.list_of_workers[worker_index]
#print(lab.list_of_workers)
if worker.count < worker.capacity:
# print('Stats before harvesting queue request of %s' % et.full_name)
# print(donor.donor_index)
# print(lab.occupied_workers)
# print(env.now)
with worker.request() as request:
yield request | env.timeout(0.0001)
donor.worker_queue = worker
lab.occupied_workers = sum([lab.list_of_workers[worker_index].count for worker_index in range(gui.TOTAL_WORKERS)])
#yield env.timeout(0.0001)
#print('Lab workers at %.5f seen by %s in the beginning of harvesting are %d' % (env.now,et.full_name,lab.occupied_workers))
#print('Harvesting block initialized for %s at %.5f' % (et.full_name,env.now))
procedure = 'harvesting'
# print('Harvested flasks per passage at %.5f' % env.now)
# print(donor.harvested_per_passage[donor.passage_no-1])
bsc_procedure = bsc_et(env,et,donor,lab,procedure,gui,int_db)
env.process(bsc_procedure)
#print('Harvesting of %s started at %.5f' % (et.full_name,env.now))
yield env.timeout((donor.worker_queue.count/donor.worker_queue.capacity)*(lab.harvesting_time)+int_db.FIXED_HARVESTING_TIME)
#print('Harvesting of %s finished at %.5f' % (et.full_name,env.now))
#print('Harvesting block terminated for %s at %.5f' % (et.full_name,env.now))
harvested = 1
break
else:
worker_index += 1
if harvested == 1:
break
else:
yield env.timeout(0.0001)
continue
# else:
# yield env.timeout(0.0001)
# continue
# print('Worker queue right before finishing the processing')
# print(et.full_name)
# worker_counts = [lab.list_of_workers[worker_index].count for worker_index in range(TOTAL_WORKERS)]
# print(worker_counts)
# print(env.now)
# print('Harvested flasks per passage at %.5f' % env.now)
# print(donor.harvested_per_passage[donor.passage_no-1])
env.exit()
| mit | -4,032,397,442,333,823,000 | 28.762295 | 154 | 0.504396 | false |
saifuddin778/LDA | test.py | 1 | 1104 | from __future__ import division
import sys
import copy
sys.dont_write_bytecode = True
"""
Testing LDA
"""
def test_LDA():
from LDA import LDA
x = [
[2.95, 6.63],
[2.53, 7.79],
[3.57, 5.65],
[3.16, 5.47],
[2.58, 4.46],
[2.16, 6.22],
[3.27, 3.52]
]
e = copy.deepcopy(x)
y = [1,1,1,1,2,2,2]
t = LDA(x, y)
for a in e:
r = t.predict(a)
print max(r, key=r.get)
"""
Testing multiclass LDA
"""
def test_multiclass_LDA():
from LDA import multiclass_LDA
from sklearn import datasets
print 'data loaded..'
iris = datasets.load_iris()
x = iris['data']
y = iris['target']
l = copy.deepcopy(x)
m = copy.deepcopy(y)
t = multiclass_LDA(x, y)
for a,b in zip(l, m):
print t.predict(a), b
#t = test_LDA()
#t = test_multiclass_LDA()
if __name__ == '__main__' and len(sys.argv) == 2:
print sys.argv
method_to_test = sys.argv[1]
if method_to_test == 'LDA':
test_LDA()
elif method_to_test == 'multiclass_LDA':
test_multiclass_LDA()
| mit | -6,399,753,658,631,220,000 | 19.444444 | 49 | 0.521739 | false |
michelesr/network-monitor-server | src/hardware.py | 1 | 2977 | #! /usr/bin/env python
from threading import Thread
from sys import exit
from time import sleep
from psutil import cpu_percent, virtual_memory, swap_memory, \
net_io_counters, disk_io_counters
"""
Framework di monitoraggio della rete
Modulo per le risorse hardware
Questo modulo si occupa di ottenere dal sistema operativo lo stato delle
risorse hardware, e in particolare:
- utilizzo cpu;
- utilizzo della memoria centrale;
- utilizzo dello swap;
- utilizzo del disco in lettura e scrittura;
- banda di rete in entrata e uscita.
Questo modulo utilizza la libreria esterna psutil, installabile tramite
un gestore di pacchetti python come PIP (Alternative Python Package
Installer).
"""
class Hardware(Thread):
def __init__(self):
"""
Costruttore del Thread, inizializza il thread, lo setta
come demone e inizializza gli attributi dell'oggetto
"""
Thread.__init__(self)
self.setDaemon(True)
self.cpu=0
self.cores = len(cpu_percent(percpu=True))
self.ram=0
self.total_ram = virtual_memory().total
self.swap=0
self.total_swap = swap_memory().total
self.read=0
self.write=0
self.net_in=0
self.net_out=0
def run(self):
"""
Metodo run del thread, raccoglie in tempo reale le informazioni
sull'hardware tramite psutil.
"""
try:
while True:
# disk, net (temp)
self.read_tmp = disk_io_counters().read_bytes
self.write_tmp = disk_io_counters().write_bytes
self.net_in_tmp = net_io_counters().bytes_recv
self.net_out_tmp = net_io_counters().bytes_sent
# cpu
self.cpu = cpu_percent(interval=1)
# disk
self.read = \
disk_io_counters().read_bytes - self.read_tmp
self.write = \
disk_io_counters().write_bytes - self.write_tmp
# net
self.net_in = \
net_io_counters().bytes_recv - self.net_in_tmp
self.net_out = \
net_io_counters().bytes_sent - self.net_out_tmp
# memories
self.ram = virtual_memory().percent
self.swap = swap_memory().percent
sleep(1)
except:
exit()
def get_results(self):
"""
Restituisce le informazioni sull'hardware sotto forma
di dizionario
"""
return {
'cpu': self.cpu,
'cores': self.cores,
'ram': self.ram,
'total_ram': self.total_ram,
'swap': self.swap,
'total_swap': self.total_swap,
'disk_r': self.read,
'disk_w': self.write,
'net_in': self.net_in,
'net_out': self.net_out,
}
| gpl-3.0 | -2,589,567,619,368,150,500 | 25.81982 | 72 | 0.543164 | false |
sjmh/cobbler | koan/register.py | 1 | 5415 | """
registration tool for cobbler.
Copyright 2009 Red Hat, Inc and Others.
Michael DeHaan <michael.dehaan AT gmail>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from __future__ import print_function
import os
import traceback
from optparse import OptionParser
import time
import sys
import socket
from . import utils
from cexceptions import InfoException
import string
# usage: cobbler-register [--server=server] [--fqdn=hostname] --profile=foo
def main():
"""
Command line stuff...
"""
p = OptionParser()
p.add_option(
"-s",
"--server",
dest="server",
default=os.environ.get("COBBLER_SERVER", ""),
help="attach to this cobbler server"
)
p.add_option(
"-f",
"--fqdn",
dest="hostname",
default="",
help="override the discovered hostname"
)
p.add_option(
"-p",
"--port",
dest="port",
default="80",
help="cobbler port (default 80)"
)
p.add_option(
"-P",
"--profile",
dest="profile",
default="",
help="assign this profile to this system"
)
p.add_option(
"-b",
"--batch",
dest="batch",
action="store_true",
help="indicates this is being run from a script"
)
(options, args) = p.parse_args()
# if not os.getuid() == 0:
# print("koan requires root access")
# return 3
try:
k = Register()
k.server = options.server
k.port = options.port
k.profile = options.profile
k.hostname = options.hostname
k.batch = options.batch
k.run()
except Exception as e:
(xa, xb, tb) = sys.exc_info()
try:
getattr(e, "from_koan")
print(str(e)[1:-1]) # nice exception, no traceback needed
except:
print(xa)
print(xb)
print(string.join(traceback.format_list(traceback.extract_tb(tb))))
return 1
return 0
class Register:
def __init__(self):
"""
Constructor. Arguments will be filled in by optparse...
"""
self.server = ""
self.port = ""
self.profile = ""
self.hostname = ""
self.batch = ""
def run(self):
"""
Commence with the registration already.
"""
# not really required, but probably best that ordinary users don't try
# to run this not knowing what it does.
if os.getuid() != 0:
raise InfoException("root access is required to register")
print("- preparing to koan home")
self.conn = utils.connect_to_server(self.server, self.port)
reg_info = {}
print("- gathering network info")
netinfo = utils.get_network_info()
reg_info["interfaces"] = netinfo
print("- checking hostname")
sysname = ""
if self.hostname != "" and self.hostname != "*AUTO*":
hostname = self.hostname
sysname = self.hostname
else:
hostname = socket.getfqdn()
if hostname == "localhost.localdomain":
if self.hostname == '*AUTO*':
hostname = ""
sysname = str(time.time())
else:
raise InfoException(
"must specify --fqdn, could not discover")
if sysname == "":
sysname = hostname
if self.profile == "":
raise InfoException("must specify --profile")
# we'll do a profile check here just to avoid some log noise on the remote end.
# network duplication checks and profile checks also happen on the
# remote end.
avail_profiles = self.conn.get_profiles()
matched_profile = False
for x in avail_profiles:
if x.get("name", "") == self.profile:
matched_profile = True
break
reg_info['name'] = sysname
reg_info['profile'] = self.profile
reg_info['hostname'] = hostname
if not matched_profile:
raise InfoException(
"no such remote profile, see 'koan --list-profiles'")
if not self.batch:
self.conn.register_new_system(reg_info)
print("- registration successful, new system name: %s" % sysname)
else:
try:
self.conn.register_new_system(reg_info)
print("- registration successful, new system name: %s"
% sysname)
except:
traceback.print_exc()
print("- registration failed, ignoring because of --batch")
return
if __name__ == "__main__":
main()
| gpl-2.0 | -5,991,250,616,883,523,000 | 27.650794 | 87 | 0.563804 | false |
google-research/understanding-curricula | utils/__init__.py | 1 | 1052 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .utils import get_model, get_optimizer, get_scheduler, LossTracker, AverageMeter, ProgressMeter, accuracy, balance_order_val,balance_order,get_pacing_function,run_cmd
from .get_data import get_dataset
from .cifar_label import CIFAR100N
__all__ = [ "get_dataset", "ImageMemFolder", "AverageMeter", "ProgressMeter", "accuracy", "get_optimizer", "get_scheduler", "get_model", "LossTracker","cifar_label","balance_order_val","balance_order","get_pacing_function","run_cmd"]
| apache-2.0 | -3,094,138,163,254,975,500 | 54.421053 | 233 | 0.759506 | false |
Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_02_01/aio/operations/_storage_accounts_operations.py | 1 | 44288 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class StorageAccountsOperations:
"""StorageAccountsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2018_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def check_name_availability(
self,
account_name: "_models.StorageAccountCheckNameAvailabilityParameters",
**kwargs
) -> "_models.CheckNameAvailabilityResult":
"""Checks that the storage account name is valid and is not already in use.
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: ~azure.mgmt.storage.v2018_02_01.models.StorageAccountCheckNameAvailabilityParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckNameAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_02_01.models.CheckNameAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckNameAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.check_name_availability.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(account_name, 'StorageAccountCheckNameAvailabilityParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckNameAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.StorageAccountCreateParameters",
**kwargs
) -> Optional["_models.StorageAccount"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.StorageAccount"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'StorageAccountCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.StorageAccountCreateParameters",
**kwargs
) -> AsyncLROPoller["_models.StorageAccount"]:
"""Asynchronously creates a new storage account with the specified parameters. If an account is
already created and a subsequent create request is issued with different properties, the
account properties will be updated. If an account is already created and a subsequent create or
update request is issued with the exact same set of properties, the request will succeed.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the created account.
:type parameters: ~azure.mgmt.storage.v2018_02_01.models.StorageAccountCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either StorageAccount or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.storage.v2018_02_01.models.StorageAccount]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
account_name=account_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> None:
"""Deletes a storage account in Microsoft Azure.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def get_properties(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> "_models.StorageAccount":
"""Returns the properties for the specified storage account including but not limited to name, SKU
name, location, and account status. The ListKeys operation should be used to retrieve storage
keys.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccount, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_02_01.models.StorageAccount
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
# Construct URL
url = self.get_properties.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def update(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.StorageAccountUpdateParameters",
**kwargs
) -> "_models.StorageAccount":
"""The update operation can be used to update the SKU, encryption, access tier, or tags for a
storage account. It can also be used to map the account to a custom domain. Only one custom
domain is supported per storage account; the replacement/change of custom domain is not
supported. In order to replace an old custom domain, the old value must be cleared/unregistered
before a new value can be set. The update of multiple properties is supported. This call does
not change the storage keys for the account. If you want to change the storage account keys,
use the regenerate keys operation. The location and name of the storage account cannot be
changed after creation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the updated account.
:type parameters: ~azure.mgmt.storage.v2018_02_01.models.StorageAccountUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccount, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_02_01.models.StorageAccount
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'StorageAccountUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.StorageAccountListResult"]:
"""Lists all the storage accounts available under the subscription. Note that storage keys are not
returned; use the ListKeys operation for this.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageAccountListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2018_02_01.models.StorageAccountListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('StorageAccountListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.StorageAccountListResult"]:
"""Lists all the storage accounts available under the given resource group. Note that storage keys
are not returned; use the ListKeys operation for this.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageAccountListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2018_02_01.models.StorageAccountListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('StorageAccountListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts'} # type: ignore
async def list_keys(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> "_models.StorageAccountListKeysResult":
"""Lists the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccountListKeysResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_02_01.models.StorageAccountListKeysResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListKeysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
# Construct URL
url = self.list_keys.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccountListKeysResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys'} # type: ignore
async def regenerate_key(
self,
resource_group_name: str,
account_name: str,
regenerate_key: "_models.StorageAccountRegenerateKeyParameters",
**kwargs
) -> "_models.StorageAccountListKeysResult":
"""Regenerates one of the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param regenerate_key: Specifies name of the key which should be regenerated -- key1 or key2.
:type regenerate_key: ~azure.mgmt.storage.v2018_02_01.models.StorageAccountRegenerateKeyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccountListKeysResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_02_01.models.StorageAccountListKeysResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListKeysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.regenerate_key.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(regenerate_key, 'StorageAccountRegenerateKeyParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccountListKeysResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
regenerate_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey'} # type: ignore
async def list_account_sas(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.AccountSasParameters",
**kwargs
) -> "_models.ListAccountSasResponse":
"""List SAS credentials of a storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide to list SAS credentials for the storage account.
:type parameters: ~azure.mgmt.storage.v2018_02_01.models.AccountSasParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListAccountSasResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_02_01.models.ListAccountSasResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListAccountSasResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.list_account_sas.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AccountSasParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListAccountSasResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_account_sas.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListAccountSas'} # type: ignore
async def list_service_sas(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.ServiceSasParameters",
**kwargs
) -> "_models.ListServiceSasResponse":
"""List service SAS credentials of a specific resource.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide to list service SAS credentials.
:type parameters: ~azure.mgmt.storage.v2018_02_01.models.ServiceSasParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListServiceSasResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_02_01.models.ListServiceSasResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListServiceSasResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.list_service_sas.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ServiceSasParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListServiceSasResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_service_sas.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListServiceSas'} # type: ignore
| mit | 5,031,194,970,118,649,000 | 51.849642 | 198 | 0.658395 | false |
toros-astro/corral | tests/tests.py | 1 | 2690 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Cabral, Juan; Sanchez, Bruno & Berois, Martín
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
# DOCS
# =============================================================================
"""This are the tests for de tests pipeline"""
# =============================================================================
# CLASSES
# =============================================================================
# =============================================================================
# IMPORTS
# =============================================================================
from corral import qa
from . import steps, commands
# =============================================================================
# EXAMPLES
# =============================================================================
class ExampleTestStep1(qa.TestCase):
subject = steps.Step1
def setup(self):
pass
def validate(self):
pass
class ExampleTestCommand(qa.TestCase):
subject = commands.TestAPICommand
def setup(self):
pass
def validate(self):
pass
| bsd-3-clause | -3,456,926,474,809,630,700 | 33.922078 | 79 | 0.573075 | false |
hmenke/espresso | testsuite/python/coulomb_cloud_wall_duplicated.py | 1 | 4487 |
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
from __future__ import print_function
import unittest as ut
import numpy as np
import espressomd
import espressomd.cuda_init
import espressomd.electrostatics
from espressomd import scafacos
from tests_common import abspath
class CoulombCloudWall(ut.TestCase):
if "ELECTROSTATICS" in espressomd.features():
"""This compares p3m, p3m_gpu, scafacos_p3m and scafacos_p2nfft
electrostatic forces and energy against stored data."""
S = espressomd.System(box_l=[1.0, 1.0, 1.0])
S.seed = S.cell_system.get_state()['n_nodes'] * [1234]
np.random.seed(S.seed)
forces = {}
tolerance = 1E-3
# Reference energy from p3m in the tcl test case
reference_energy = 2. * 148.94229549
def setUp(self):
self.S.box_l = (10, 10, 20)
self.S.time_step = 0.01
self.S.cell_system.skin = 0.4
# Clear actors that might be left from prev tests
if self.S.actors:
del self.S.actors[0]
self.S.part.clear()
data = np.genfromtxt(
abspath("data/coulomb_cloud_wall_duplicated_system.data"))
# Add particles to system and store reference forces in hash
# Input format: id pos q f
for particle in data:
id = particle[0]
pos = particle[1:4]
q = particle[4]
f = particle[5:]
self.S.part.add(id=int(id), pos=pos, q=q)
self.forces[id] = f
def compare(self, method_name, energy=True):
# Compare forces and energy now in the system to stored ones
# Force
force_abs_diff = 0.
for p in self.S.part:
force_abs_diff += abs(
np.sqrt(sum((p.f - self.forces[p.id])**2)))
force_abs_diff /= len(self.S.part)
# Energy
if energy:
energy_abs_diff = abs(
self.S.analysis.energy()["total"] - self.reference_energy)
self.assertTrue(energy_abs_diff <= self.tolerance, "Absolute energy difference " +
str(energy_abs_diff) + " too large for " + method_name)
self.assertTrue(force_abs_diff <= self.tolerance, "Absolute force difference " +
str(force_abs_diff) + " too large for method " + method_name)
# Tests for individual methods
if "P3M" in espressomd.features():
def test_p3m(self):
self.S.actors.add(
espressomd.electrostatics.P3M(
prefactor=1, r_cut=1.001, accuracy=1e-3,
mesh=[64, 64, 128], cao=7, alpha=2.70746, tune=False))
self.S.integrator.run(0)
self.compare("p3m", energy=True)
if espressomd.has_features(["ELECTROSTATICS", "CUDA"]) and not \
str(espressomd.cuda_init.CudaInitHandle().device_list[0]) == "Device 687f":
def test_p3m_gpu(self):
self.S.actors.add(
espressomd.electrostatics.P3MGPU(
prefactor=1,
r_cut=1.001,
accuracy=1e-3,
mesh=[64, 64, 128],
cao=7,
alpha=2.70746,
tune=False))
self.S.integrator.run(0)
self.compare("p3m_gpu", energy=False)
def test_zz_deactivation(self):
# Is the energy 0, if no methods active
self.assertTrue(self.S.analysis.energy()["total"] == 0.0)
if __name__ == "__main__":
ut.main()
| gpl-3.0 | 4,426,372,993,255,538,700 | 37.350427 | 98 | 0.560508 | false |
mwoc/pydna | dna/components/heatex.py | 1 | 14848 | import scipy
import scipy.optimize
import warnings
# Some short-hands:
from dna.states import state
from dna.iterate import IterateParamHelper
from dna.component import Component
from dna.vendor import refprop as rp
class ConvergenceError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class PinchCalc:
def __init__ (self, n1, n2, n3, n4, Nseg, dTmin):
self.n1 = n1
self.n2 = n2
self.n3 = n3
self.n4 = n4
self.Nseg = Nseg
self.dTmin = dTmin
def check(self, n1, n2, n3, n4):
dH_H = (n1['h']-n2['h'])/self.Nseg
dH_C = (n4['h']-n3['h'])/self.Nseg
dT_left = n1['t'] - n4['t']
dT_right = n2['t'] - n3['t']
dT_pinch = min(dT_left, dT_right)
pinch_pos = 0
Th = []
Tc = []
n1_2 = {
'media': n1['media'],
'y': n1['y'],
'cp': n1['cp'],
'p': n1['p'],
'h': n1['h']
}
n3_4 = {
'media': n3['media'],
'y': n3['y'],
'cp': n3['cp'],
'p': n3['p'],
'h': n4['h'] # Note n4 usage
}
for i in range(self.Nseg+1):
# Be explicit about the copying
n2_ = n1_2.copy()
n3_ = n3_4.copy()
n2_['h'] = n1['h'] - dH_H*i
n3_['h'] = n4['h'] - dH_C*i
T2_ = state(n2_)['t']
Th.append(T2_)
T3_ = state(n3_)['t']
Tc.append(T3_)
if T2_ - T3_ < dT_pinch:
pinch_pos = i
dT_pinch = T2_ - T3_
# Get effectiveness from NTU method
Q_max_cold = n3['mdot'] * (n1['h'] - n3['h'])
Q_max_hot = n1['mdot'] * (n1['h'] - n3['h'])
Q_max = min(abs(Q_max_cold), abs(Q_max_hot))
Q = n1['mdot'] * (n1['h'] - n2['h'])
if Q > 0 and Q_max > 0:
# Guard against division by zero
eff = Q / Q_max
else:
eff = 0
return {'dTmin':dT_pinch, 'Th':Th, 'Tc':Tc, 'percent': pinch_pos / self.Nseg, 'eff': eff, 'Q': Q}
def iterate(self, side=1):
'''
Try to find optimal configuration of heat exchanger which satisfies
pinch point and has the exergy loss as low as possible.
Ideally, the pinch point is close to the hot side, so the cold flow
is heated up maximally.
'''
dTmin = self.dTmin
# Try pinch at cold side (cold in, hot out)
# Iteration params
tol = 0.1
delta = 1
convergence = 1
currIter = IterateParamHelper()
i = 0
dT_left = dTmin
result = {}
find_mdot = False
find_mdot1 = False
find_mdot3 = False
# If enough info is known about the heat transfer, we can deduct an mdot
if not 'mdot' in self.n1:
find_mdot = find_mdot1 = True
#
elif not 'mdot' in self.n3:
find_mdot = find_mdot3 = True
#
print('n1 = ', self.n1['t'])
print('n3 = ', self.n3['t'])
# Tolerance of 0.01 K is close enough
# do NOT alter convergence rate parameter. Too high value breaks design
while abs(delta) > tol and i < 20:
# Make local copies of input
_n1 = self.n1.copy()
_n2 = self.n2.copy()
_n3 = self.n3.copy()
_n4 = self.n4.copy()
if not find_mdot and (_n1['mdot'] <= 0 or _n3['mdot'] <= 0):
# No iteration possible, early return
result['pinch'] = self.check(_n1, _n2, _n3, _n4)
return result['pinch']
if len(currIter.x) > 0:
dT_left = currIter.optimize(dT_left, manual = True)
else:
if side == 1:
dT_left = - 0.25 * (_n1['t'] - _n3['t'])
else:
dT_left = dTmin
if side == 1:
# Side 1 is hot side, 1 and 4
_n4['t'] = _n1['t'] + dT_left
if _n4['t'] > _n1['t']:
_n4['t'] = _n1['t'] - 2*dTmin
dT_left = -2*dTmin
state(_n4)
print('n4 = ', _n4['t'])
_n2['h'] = (_n1['h'] * _n1['mdot'] - (_n3['mdot'] * (_n4['h'] - _n3['h']))) / _n1['mdot']
state(_n2)
if _n2['t'] < _n3['t']:
print('Pretty sure this should be analysed from side 2')
print('n2 = ', _n2['t'])
# Update looping parameters
delta = _n2['t'] - (_n3['t'] + dTmin)
elif side == 2:
# Side 2 is cold side, 2 and 3
_n2['t'] = _n3['t'] - dT_left
if _n2['t'] < _n3['t']:
_n2['t'] = _n3['t'] - dTmin
dT_left = dTmin
state(_n2)
print('n2 = ', _n2['t'])
_n4['h'] = (_n3['h'] * _n3['mdot'] + (_n1['mdot'] * (_n1['h'] - _n2['h']))) / _n3['mdot']
state(_n4)
print('n4 = ', _n4['t'])
if _n4['t'] > _n1['t']:
print('Pretty sure this should be analysed from side 1')
# Update looping parameters
delta = _n1['t'] - (_n4['t'] + dTmin)
else:
# Assume one side is fixed, depending on if find_mdot1 or find_mdot3 is set
if find_mdot1:
# t2 and m1 unknown
_n2['t'] = _n3['t'] - dT_left
if _n2['t'] < _n3['t']:
_n2['t'] = _n3['t'] - dTmin
dT_left = dTmin
if 'tmin' in _n1 and _n2['t'] < _n1['tmin']:
_n2['t'] = _n1['tmin']
dT_left = _n3['t'] - _n2['t']
state(_n2)
_n1['mdot'] = ((_n4['h'] - _n3['h']) * _n3['mdot']) / (_n1['h'] - _n2['h'])
delta = _n1['t'] - (_n4['t'] + dTmin)
elif find_mdot3:
# t4 and m3 unknown
raise Exception('Not implemented')
#n3['mdot'] = ((n1['h'] - n2['h']) * n1['mdot']) / (n4['h'] - n3['h'])
else:
print(_n1)
print(_n2)
print(_n3)
print(_n4)
raise Exception('Wrong unknowns')
# Only accept positive delta for internal pinch calculation
if delta >= 0 - tol:
# At least the pinch at in/outlets is ok. Now check
# it internally
try:
# Check internal pinch too
result['pinch'] = self.check(_n1, _n2, _n3, _n4)
except rp.RefpropError as e:
# Ignore me
print(e)
print('Next')
else:
# Calculation succeeded
delta = result['pinch']['dTmin'] - dTmin
currIter.delta = delta # commented out to prevent IterateParamHelper from guessing
currIter.append(dT_left, delta)
i = i + 1
print('Iteration: ', i, '. Residual: ', currIter.y[-1])
if abs(delta) > tol:
print(delta, convergence, i)
raise ConvergenceError('No convergence reached')
if not 'pinch' in result:
warnings.warn('No pinch solution found', RuntimeWarning)
return False
else:
self.n1.update(_n1)
self.n2.update(_n2)
self.n3.update(_n3)
self.n4.update(_n4)
return result['pinch']
class PinchHex(Component):
def nodes(self, in1, out1, in2, out2):
self.addInlet(in1)
self.addInlet(in2)
self.addOutlet(out1)
self.addOutlet(out2)
return self
def calc(self, Nseg = 11, dTmin = 5, Q = False):
n = self.getNodes()
n1 = n['i'][0]
n2 = n['o'][0]
n3 = n['i'][1]
n4 = n['o'][1]
# Find states for all known inputs:
state(n1) # Hot inlet
state(n3) # Cold inlet
n2['p'] = n1['p']
n2['y'] = n1['y']
if 'media' in n1:
n2['media'] = n1['media']
if 'cp' in n2:
n2['cp'] = n1['cp']
n4['p'] = n3['p']
n4['y'] = n3['y']
if 'media' in n3:
n4['media'] = n3['media']
if 'cp' in n3:
n4['cp'] = n3['cp']
if 'mdot' in n1:
n2['mdot'] = n1['mdot']
if 'mdot' in n3:
n4['mdot'] = n3['mdot']
if n1['t'] < n3['t']:
# Act as if this component is bypassed
n2['t'] = n1['t']
state(n2)
n4['t'] = n3['t']
state(n4)
warnings.warn(self.name + " - cold inlet has higher temperature than hot inlet, this is not possible so setting heat exchange to 0", RuntimeWarning)
return self
calc = False
if 'q' in n2 or 't' in n2:
n2h = state(n2.copy())['h']
# Enthalpy in hot fluid cannot increase
if n2h >= n1['h']:
n2['h'] = n1['h']
state(n2)
if 't' in n4 or 'q' in n4:
n4h = state(n4.copy())['h']
# Enthalpy in cold fluid cannot decrease
if n4h <= n3['h']:
n4['h'] = n3['h']
state(n4) # Cold outlet
# Initiate pincher for later use
pincher = PinchCalc(n1, n2, n3, n4, Nseg, dTmin)
if 'h' in n1 and 'h' in n2 and 'mdot' in n1:
Q = n1['mdot'] * (n1['h'] - n2['h'])
if 'h' in n3 and 'h' in n4 and 'mdot' in n3:
Q = n3['mdot'] * (n4['h'] - n3['h'])
# Find any unknown inputs:
if not 't' in n2 and not 't' in n4:
# Find pinch by iteration, for given mass flow rates and inlet temperatures
calc = True
if n1['mdot'] <= 0 or n3['mdot'] <= 0:
# No heat exchange at all
n2['t'] = n1['t']
state(n2)
n4['t'] = n3['t']
state(n4)
else:
# First try one side of the HEX
try:
pinch = pincher.iterate(side = 1)
except RuntimeError as e:
print('First side failed, trying second. Reason:')
print(e)
# If that failed, try from the other
try:
pinch = pincher.iterate(side = 2)
except rp.RefpropError as e:
print('Second side iteration also failed.')
raise Exception(e)
except rp.RefpropError as e:
print('First side failed, trying second. Reason:')
print(e)
# If that failed, try from the other
try:
pinch = pincher.iterate(side = 2)
except rp.RefpropError as e:
print('Second side iteration also failed.')
raise Exception(e)
except ConvergenceError as e:
print('Convergence failed, trying other side', e)
try:
pinch = pincher.iterate(side = 2)
except rp.RefpropError as e:
print('Second side iteration also failed.')
raise Exception(e)
except Exception as e:
print('Unexpected exception: ', e)
raise(e)
finally:
print('Pinch - {} - following outlet temperatures found:'.format(self.name))
print('T2: ', n2['t'], ' T4: ', n4['t'])
elif not 'h' in n4:
# Calculate T4 for given mass flow rates and other temperatures
calc = True
if 'mdot' in n1 and 'mdot' in n3:
n4['h'] = (n3['h'] * n3['mdot'] + (n1['mdot'] * (n1['h'] - n2['h']))) / n3['mdot']
state(n4)
else:
n1['mdot'] = Q / (n1['h'] - n2['h'])
try:
pinch = pincher.iterate(side = False)
except Exception as e:
raise(e)
elif not 'h' in n2:
# Calculate T2 for given mass flow rates and other temperatures
calc = True
if 'mdot' in n1 and 'mdot' in n3:
n2['h'] = (n1['h'] * n1['mdot'] - (n3['mdot'] * (n4['h'] - n3['h']))) / n1['mdot']
state(n2)
else:
n3['mdot'] = Q / (n4['h'] - n3['h'])
try:
pinch = pincher.iterate(side = False)
except Exception as e:
raise(e)
if not 'mdot' in n3:
# Calculate m3 for given m1 or Q, and given temperatures
calc = True
if not 'mdot' in n1:
n1['mdot'] = Q / (n1['h'] - n2['h'])
n3['mdot'] = ((n1['h'] - n2['h']) * n1['mdot']) / (n4['h'] - n3['h'])
elif not 'mdot' in n1:
# Calculate m1 for given m3 or Q, and given temperatures
calc = True
if not 'mdot' in n3:
n3['mdot'] = Q / (n4['h'] - n3['h'])
n1['mdot'] = ((n4['h'] - n3['h']) * n3['mdot']) / (n1['h'] - n2['h'])
if calc == False:
print('Model overly specified for heatex `{}`'.format(self.name))
n2['mdot'] = n1['mdot']
n4['mdot'] = n3['mdot']
# Find the pinch point
pinch = pincher.check(n1, n2, n3, n4)
self.storeResult(pinch)
if abs(pinch['dTmin'] - dTmin) > 0.1:
print('Pinch - {} - value {:.2f} not enforced, found {:.2f} from conditions'.format(self.name, dTmin, pinch['dTmin']))
return self
class Condenser(Component):
def nodes(self, in1, out1):
self.addInlet(in1)
self.addOutlet(out1)
return self
def calc(self):
n = self.getNodes()
n1 = n['i'][0]
n2 = n['o'][0]
if 'media' in n1:
n2['media'] = n1['media']
n2['p'] = n1['p']
n2['y'] = n1['y']
n2['mdot'] = n1['mdot']
# If it is subcooled liquid entering the condenser, pass it through unamended
Tsat = state({'p': n1['p'], 'y': n1['y'], 'q': 0})['t']
if Tsat > n1['t']:
n2['t'] = n1['t']
else:
n2['t'] = Tsat
state(n2)
return self
| bsd-3-clause | 5,614,497,316,062,480,000 | 29.240326 | 160 | 0.420999 | false |
Nic30/hwtLib | hwtLib/cesnet/mi32/intf.py | 1 | 5517 | from hwt.hdl.constants import READ, WRITE, READ_WRITE
from hwt.interfaces.agents.handshaked import HandshakedAgent
from hwt.interfaces.agents.vldSynced import VldSyncedAgent
from hwt.interfaces.std import VectSignal, Signal
from hwt.simulator.agentBase import SyncAgentBase
from hwt.synthesizer.interface import Interface
from hwt.synthesizer.param import Param
from hwtLib.avalon.mm import AvalonMmAddrAgent
from ipCorePackager.constants import DIRECTION
from pyMathBitPrecise.bit_utils import mask
from hwtSimApi.hdlSimulator import HdlSimulator
class Mi32(Interface):
"""
Simple memory interface similar to AvalonMM
:ivar ~.addr: r/w address
:ivar ~.rd: read enable
:ivar ~.wr: write enable
:ivar ~.ardy: slave address channel ready
:ivar ~.be: data byte mask for write
:ivar ~.dwr: write data
:ivar ~.drd: read data
:ivar ~.drdy: read data valid
.. hwt-autodoc::
"""
def _config(self):
self.DATA_WIDTH = Param(32)
self.ADDR_WIDTH = Param(32)
def _declr(self):
self.addr = VectSignal(self.ADDR_WIDTH)
self.rd = Signal()
self.wr = Signal()
self.ardy = Signal(masterDir=DIRECTION.IN)
self.be = VectSignal(self.DATA_WIDTH // 8)
self.dwr = VectSignal(self.DATA_WIDTH)
self.drd = VectSignal(self.DATA_WIDTH, masterDir=DIRECTION.IN)
self.drdy = Signal(masterDir=DIRECTION.IN)
def _getWordAddrStep(self):
"""
:return: size of one word in unit of address
"""
return int(self.DATA_WIDTH) // self._getAddrStep()
def _getAddrStep(self):
"""
:return: how many bits is one unit of address
(e.g. 8 bits for char * pointer, 36 for 36 bit bram)
"""
return 8
def _initSimAgent(self, sim: HdlSimulator):
self._ag = Mi32Agent(sim, self)
class Mi32Agent(SyncAgentBase):
"""
Simulation agent for Mi32 bus interface
:ivar ~.requests: request data, items are tuples (READ, address)
or (WRITE, address, data, be_mask)
:ivar ~.rData: data read from interface
"""
def __init__(self, sim: HdlSimulator, intf: Mi32, allowNoReset=False):
SyncAgentBase.__init__(self, sim, intf, allowNoReset=allowNoReset)
self.addrAg = Mi32AddrAgent(sim, intf, allowNoReset=allowNoReset)
self.dataAg = Mi32DataAgent(sim, intf, allowNoReset=allowNoReset)
def requests_get(self):
return self.addrAg.data
def requests_set(self, v):
self.addrAg.data = v
requests = property(requests_get, requests_set)
def r_data_get(self):
return self.dataAg.data
def r_data_set(self, v):
self.dataAg.data = v
r_data = property(r_data_get, r_data_set)
def getDrivers(self):
self.setEnable = self.setEnable_asDriver
return (self.dataAg.getMonitors()
+self.addrAg.getDrivers())
def getMonitors(self):
self.setEnable = self.setEnable_asMonitor
return (self.dataAg.getDrivers()
+self.addrAg.getMonitors())
class Mi32AddrAgent(HandshakedAgent):
"""
:ivar ~.requests: request data, items are tuples (READ, address)
or (WRITE, address, data, be_mask)
:note: two valid signals "read", "write"
:note: one ready_n signal "waitrequest"
:note: on write set data and byteenamble as well
"""
@classmethod
def get_ready_signal(cls, intf):
return intf.ardy
@classmethod
def get_valid_signal(cls, intf):
return (intf.rd, intf.wr)
def get_valid(self):
r = self._vld[0].read()
w = self._vld[1].read()
r.val = r.val | w.val
r.vld_mask = r.vld_mask & w.vld_mask
return r
def set_valid(self, val):
AvalonMmAddrAgent.set_valid(self, val)
def get_data(self):
intf = self.intf
address = intf.addr.read()
byteEnable = intf.be.read()
read = bool(intf.rd.read())
write = bool(intf.wr.read())
wdata = intf.dwr.read()
if read and write:
rw = READ_WRITE
elif read:
rw = READ
elif write:
rw = WRITE
else:
raise AssertionError("This funtion should not be called when data"
"is not ready on interface")
return (rw, address, wdata, byteEnable)
def set_data(self, data):
intf = self.intf
if data is None:
intf.addr.write(None)
intf.be.write(None)
intf.rd.write(0)
intf.wr.write(0)
else:
rw = data[0]
if rw is READ:
_, address = data
rd, wr = 1, 0
be = mask(intf.DATA_WIDTH // 8)
wdata = None
elif rw is WRITE:
rd, wr = 0, 1
_, address, wdata, be = data
elif rw is READ_WRITE:
rd, wr = 1, 1
_, address, wdata, be = data
else:
raise TypeError(f"rw is in invalid format {rw}")
intf.addr.write(address)
intf.rd.write(rd)
intf.wr.write(wr)
intf.be.write(be)
intf.dwr.write(wdata)
class Mi32DataAgent(VldSyncedAgent):
@classmethod
def get_valid_signal(cls, intf: Mi32):
return intf.drdy
def get_data(self):
return self.intf.drd.read()
def set_data(self, data):
self.intf.drd.write(data)
| mit | 5,637,182,407,054,393,000 | 27.734375 | 78 | 0.590357 | false |
closeio/nylas | inbox/models/contact.py | 1 | 5371 | from sqlalchemy import Column, Integer, String, Enum, Text, Index, BigInteger, \
ForeignKey
from sqlalchemy.orm import relationship, backref, validates
from sqlalchemy.schema import UniqueConstraint
from inbox.sqlalchemy_ext.util import MAX_TEXT_CHARS
from inbox.models.mixins import (HasPublicID, HasEmailAddress, HasRevisions,
UpdatedAtMixin, DeletedAtMixin)
from inbox.models.base import MailSyncBase
from inbox.models.event import Event
from inbox.models.message import Message
from inbox.models.namespace import Namespace
from inbox.util.encoding import unicode_safe_truncate
class Contact(MailSyncBase, HasRevisions, HasPublicID, HasEmailAddress,
UpdatedAtMixin, DeletedAtMixin):
"""Data for a user's contact."""
API_OBJECT_NAME = 'contact'
namespace_id = Column(BigInteger, nullable=False, index=True)
namespace = relationship(
Namespace,
primaryjoin='foreign(Contact.namespace_id) == remote(Namespace.id)',
load_on_pending=True)
# A server-provided unique ID.
# NB: We specify the collation here so that the test DB gets setup correctly.
uid = Column(String(64, collation='utf8mb4_bin'), nullable=False)
# A constant, unique identifier for the remote backend this contact came
# from. E.g., 'google', 'eas', 'inbox'
provider_name = Column(String(64))
name = Column(Text)
raw_data = Column(Text)
# A score to use for ranking contact search results. This should be
# precomputed to facilitate performant search.
score = Column(Integer)
# Flag to set if the contact is deleted in a remote backend.
# (This is an unmapped attribute, i.e., it does not correspond to a
# database column.)
deleted = False
__table_args__ = (UniqueConstraint('uid', 'namespace_id',
'provider_name'),
Index('idx_namespace_created', 'namespace_id',
'created_at'),
Index('ix_contact_ns_uid_provider_name',
'namespace_id', 'uid', 'provider_name'))
@validates('raw_data')
def validate_text_column_length(self, key, value):
if value is None:
return None
return unicode_safe_truncate(value, MAX_TEXT_CHARS)
@property
def versioned_relationships(self):
return ['phone_numbers']
def merge_from(self, new_contact):
# This must be updated when new fields are added to the class.
merge_attrs = ['name', 'email_address', 'raw_data']
for attr in merge_attrs:
if getattr(self, attr) != getattr(new_contact, attr):
setattr(self, attr, getattr(new_contact, attr))
class PhoneNumber(MailSyncBase, UpdatedAtMixin, DeletedAtMixin):
STRING_LENGTH = 64
contact_id = Column(BigInteger, index=True)
contact = relationship(
Contact,
primaryjoin='foreign(PhoneNumber.contact_id) == remote(Contact.id)',
backref=backref('phone_numbers', cascade='all, delete-orphan'))
type = Column(String(STRING_LENGTH), nullable=True)
number = Column(String(STRING_LENGTH), nullable=False)
class MessageContactAssociation(MailSyncBase):
"""Association table between messages and contacts.
Examples
--------
If m is a message, get the contacts in the to: field with
[assoc.contact for assoc in m.contacts if assoc.field == 'to_addr']
If c is a contact, get messages sent to contact c with
[assoc.message for assoc in c.message_associations if assoc.field ==
... 'to_addr']
"""
contact_id = Column(BigInteger, primary_key=True, index=True)
message_id = Column(ForeignKey(Message.id, ondelete='CASCADE'),
primary_key=True)
field = Column(Enum('from_addr', 'to_addr',
'cc_addr', 'bcc_addr', 'reply_to'))
# Note: The `cascade` properties need to be a parameter of the backref
# here, and not of the relationship. Otherwise a sqlalchemy error is thrown
# when you try to delete a message or a contact.
contact = relationship(
Contact,
primaryjoin='foreign(MessageContactAssociation.contact_id) == '
'remote(Contact.id)',
backref=backref('message_associations', cascade='all, delete-orphan'))
message = relationship(
Message,
backref=backref('contacts', cascade='all, delete-orphan'))
class EventContactAssociation(MailSyncBase):
"""Association table between event participants and contacts."""
contact_id = Column(BigInteger, primary_key=True, index=True)
event_id = Column(ForeignKey(Event.id, ondelete='CASCADE'),
primary_key=True)
field = Column(Enum('participant', 'title', 'description', 'owner'))
# Note: The `cascade` properties need to be a parameter of the backref
# here, and not of the relationship. Otherwise a sqlalchemy error is thrown
# when you try to delete an event or a contact.
contact = relationship(
Contact,
primaryjoin='foreign(EventContactAssociation.contact_id) == '
'remote(Contact.id)',
backref=backref('event_associations', cascade='all, delete-orphan'))
event = relationship(
Event,
backref=backref('contacts', cascade='all, delete-orphan'))
| agpl-3.0 | 1,267,026,911,800,225,000 | 40 | 81 | 0.656675 | false |
samsu/api_client | api_client/eventlet_client.py | 1 | 7266 | # Copyright 2015 Fortinet, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import eventlet
eventlet.monkey_patch(thread=False, socket=False)
import atexit
import time
try:
import Queue
except Exception:
import queue as Queue
from oslo_log import log as logging
from . import base
from . import eventlet_request
from ._i18n import _LE
LOG = logging.getLogger(__name__)
class EventletApiClient(base.ApiClientBase):
"""Eventlet-based implementation of FortiOS ApiClient ABC."""
def __init__(self, api_providers, user, password,
key_file=None, cert_file=None, ca_file=None, ssl_sni=None,
concurrent_connections=base.DEFAULT_CONCURRENT_CONNECTIONS,
gen_timeout=base.GENERATION_ID_TIMEOUT,
use_https=True,
connect_timeout=base.DEFAULT_CONNECT_TIMEOUT,
singlethread=False):
'''Constructor
:param api_providers: a list of tuples of the form: (host, port,
is_ssl).
:param user: login username.
:param password: login password.
:param concurrent_connections: total number of concurrent connections.
:param use_https: whether or not to use https for requests.
:param connect_timeout: connection timeout in seconds.
:param gen_timeout controls how long the generation id is kept
if set to -1 the generation id is never timed out
'''
if not api_providers:
api_providers = []
self._singlethread = singlethread
self._api_providers = set([tuple(p) for p in api_providers])
self._api_provider_data = {} # tuple(semaphore, session_cookie|auth)
for p in self._api_providers:
self._set_provider_data(p, self.get_default_data())
self._user = user
self._password = password
self._key_file = key_file
self._cert_file = cert_file
self._ca_file = ca_file
# SSL server_name_indication
self._ssl_sni = ssl_sni
self._concurrent_connections = concurrent_connections
self._use_https = use_https
self._connect_timeout = connect_timeout
self._config_gen = None
self._config_gen_ts = None
self._gen_timeout = gen_timeout
# Connection pool is a list of queues.
if self._singlethread:
_queue = Queue.PriorityQueue
else:
_queue = eventlet.queue.PriorityQueue
self._conn_pool = _queue()
self._next_conn_priority = 1
for host, port, is_ssl in api_providers:
for __ in range(concurrent_connections):
conn = self._create_connection(host, port, is_ssl)
self._conn_pool.put((self._next_conn_priority, conn))
self._next_conn_priority += 1
atexit.register(self.close_connection)
def get_default_data(self):
if self._singlethread:
return None, None
else:
return eventlet.semaphore.Semaphore(1), None
def acquire_redirect_connection(self, conn_params, auto_login=True,
headers=None):
""" Check out or create connection to redirected API server.
Args:
conn_params: tuple specifying target of redirect, see
self._conn_params()
auto_login: returned connection should have valid session cookie
headers: headers to pass on if auto_login
Returns: An available HTTPConnection instance corresponding to the
specified conn_params. If a connection did not previously
exist, new connections are created with the highest prioity
in the connection pool and one of these new connections
returned.
"""
result_conn = None
data = self._get_provider_data(conn_params)
if data:
# redirect target already exists in provider data and connections
# to the provider have been added to the connection pool. Try to
# obtain a connection from the pool, note that it's possible that
# all connection to the provider are currently in use.
conns = []
while not self._conn_pool.empty():
priority, conn = self._conn_pool.get_nowait()
if not result_conn and self._conn_params(conn) == conn_params:
conn.priority = priority
result_conn = conn
else:
conns.append((priority, conn))
for priority, conn in conns:
self._conn_pool.put((priority, conn))
# hack: if no free connections available, create new connection
# and stash "no_release" attribute (so that we only exceed
# self._concurrent_connections temporarily)
if not result_conn:
conn = self._create_connection(*conn_params)
conn.priority = 0 # redirect connections have highest priority
conn.no_release = True
result_conn = conn
else:
# redirect target not already known, setup provider lists
self._api_providers.update([conn_params])
self._set_provider_data(conn_params, self.get_default_data())
# redirects occur during cluster upgrades, i.e. results to old
# redirects to new, so give redirect targets highest priority
priority = 0
for i in range(self._concurrent_connections):
conn = self._create_connection(*conn_params)
conn.priority = priority
if i == self._concurrent_connections - 1:
break
self._conn_pool.put((priority, conn))
result_conn = conn
if result_conn:
result_conn.last_used = time.time()
if auto_login and self.auth_data(conn) is None:
self._wait_for_login(result_conn, headers)
return result_conn
def _login(self, conn=None, headers=None):
'''Issue login request and update authentication cookie.'''
cookie = None
g = eventlet_request.LoginRequestEventlet(
self, self._user, self._password, conn, headers)
g.start()
ret = g.join()
if ret:
if isinstance(ret, Exception):
LOG.error(_LE('Login error "%s"'), ret)
raise ret
cookie = ret.getheader("Set-Cookie")
if cookie:
LOG.debug("Saving new authentication cookie '%s'", cookie)
return cookie
# Register as subclass.
base.ApiClientBase.register(EventletApiClient)
| apache-2.0 | -4,340,461,156,337,409,500 | 38.923077 | 79 | 0.604872 | false |
naototty/vagrant-lxc-ironic | ironic/conductor/task_manager.py | 1 | 14091 | # coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A context manager to perform a series of tasks on a set of resources.
:class:`TaskManager` is a context manager, created on-demand to allow
synchronized access to a node and its resources.
The :class:`TaskManager` will, by default, acquire an exclusive lock on
a node for the duration that the TaskManager instance exists. You may
create a TaskManager instance without locking by passing "shared=True"
when creating it, but certain operations on the resources held by such
an instance of TaskManager will not be possible. Requiring this exclusive
lock guards against parallel operations interfering with each other.
A shared lock is useful when performing non-interfering operations,
such as validating the driver interfaces.
An exclusive lock is stored in the database to coordinate between
:class:`ironic.conductor.manager` instances, that are typically deployed on
different hosts.
:class:`TaskManager` methods, as well as driver methods, may be decorated to
determine whether their invocation requires an exclusive lock.
The TaskManager instance exposes certain node resources and properties as
attributes that you may access:
task.context
The context passed to TaskManager()
task.shared
False if Node is locked, True if it is not locked. (The
'shared' kwarg arg of TaskManager())
task.node
The Node object
task.ports
Ports belonging to the Node
task.driver
The Driver for the Node, or the Driver based on the
'driver_name' kwarg of TaskManager().
Example usage:
::
with task_manager.acquire(context, node_id) as task:
task.driver.power.power_on(task.node)
If you need to execute task-requiring code in a background thread, the
TaskManager instance provides an interface to handle this for you, making
sure to release resources when the thread finishes (successfully or if
an exception occurs). Common use of this is within the Manager like so:
::
with task_manager.acquire(context, node_id) as task:
<do some work>
task.spawn_after(self._spawn_worker,
utils.node_power_action, task, new_state)
All exceptions that occur in the current GreenThread as part of the
spawn handling are re-raised. You can specify a hook to execute custom
code when such exceptions occur. For example, the hook is a more elegant
solution than wrapping the "with task_manager.acquire()" with a
try..exception block. (Note that this hook does not handle exceptions
raised in the background thread.):
::
def on_error(e):
if isinstance(e, Exception):
...
with task_manager.acquire(context, node_id) as task:
<do some work>
task.set_spawn_error_hook(on_error)
task.spawn_after(self._spawn_worker,
utils.node_power_action, task, new_state)
"""
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import retrying
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common.i18n import _LW
from ironic.common import states
from ironic import objects
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def require_exclusive_lock(f):
"""Decorator to require an exclusive lock.
Decorated functions must take a :class:`TaskManager` as the first
parameter. Decorated class methods should take a :class:`TaskManager`
as the first parameter after "self".
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
task = args[0] if isinstance(args[0], TaskManager) else args[1]
if task.shared:
raise exception.ExclusiveLockRequired()
return f(*args, **kwargs)
return wrapper
def acquire(context, node_id, shared=False, driver_name=None):
"""Shortcut for acquiring a lock on a Node.
:param context: Request context.
:param node_id: ID or UUID of node to lock.
:param shared: Boolean indicating whether to take a shared or exclusive
lock. Default: False.
:param driver_name: Name of Driver. Default: None.
:returns: An instance of :class:`TaskManager`.
"""
return TaskManager(context, node_id, shared=shared,
driver_name=driver_name)
class TaskManager(object):
"""Context manager for tasks.
This class wraps the locking, driver loading, and acquisition
of related resources (eg, Node and Ports) when beginning a unit of work.
"""
def __init__(self, context, node_id, shared=False, driver_name=None):
"""Create a new TaskManager.
Acquire a lock on a node. The lock can be either shared or
exclusive. Shared locks may be used for read-only or
non-disruptive actions only, and must be considerate to what
other threads may be doing on the same node at the same time.
:param context: request context
:param node_id: ID or UUID of node to lock.
:param shared: Boolean indicating whether to take a shared or exclusive
lock. Default: False.
:param driver_name: The name of the driver to load, if different
from the Node's current driver.
:raises: DriverNotFound
:raises: NodeNotFound
:raises: NodeLocked
"""
self._spawn_method = None
self._on_error_method = None
self.context = context
self.node = None
self.shared = shared
self.fsm = states.machine.copy()
# NodeLocked exceptions can be annoying. Let's try to alleviate
# some of that pain by retrying our lock attempts. The retrying
# module expects a wait_fixed value in milliseconds.
@retrying.retry(
retry_on_exception=lambda e: isinstance(e, exception.NodeLocked),
stop_max_attempt_number=CONF.conductor.node_locked_retry_attempts,
wait_fixed=CONF.conductor.node_locked_retry_interval * 1000)
def reserve_node():
LOG.debug("Attempting to reserve node %(node)s",
{'node': node_id})
self.node = objects.Node.reserve(context, CONF.host, node_id)
try:
if not self.shared:
reserve_node()
else:
self.node = objects.Node.get(context, node_id)
self.ports = objects.Port.list_by_node_id(context, self.node.id)
self.driver = driver_factory.get_driver(driver_name or
self.node.driver)
# NOTE(deva): this handles the Juno-era NOSTATE state
# and should be deleted after Kilo is released
if self.node.provision_state is states.NOSTATE:
self.node.provision_state = states.AVAILABLE
self.node.save()
self.fsm.initialize(self.node.provision_state)
except Exception:
with excutils.save_and_reraise_exception():
self.release_resources()
def spawn_after(self, _spawn_method, *args, **kwargs):
"""Call this to spawn a thread to complete the task.
The specified method will be called when the TaskManager instance
exits.
:param _spawn_method: a method that returns a GreenThread object
:param args: args passed to the method.
:param kwargs: additional kwargs passed to the method.
"""
self._spawn_method = _spawn_method
self._spawn_args = args
self._spawn_kwargs = kwargs
def set_spawn_error_hook(self, _on_error_method, *args, **kwargs):
"""Create a hook to handle exceptions when spawning a task.
Create a hook that gets called upon an exception being raised
from spawning a background thread to do a task.
:param _on_error_method: a callable object, it's first parameter
should accept the Exception object that was raised.
:param args: additional args passed to the callable object.
:param kwargs: additional kwargs passed to the callable object.
"""
self._on_error_method = _on_error_method
self._on_error_args = args
self._on_error_kwargs = kwargs
def release_resources(self):
"""Unlock a node and release resources.
If an exclusive lock is held, unlock the node. Reset attributes
to make it clear that this instance of TaskManager should no
longer be accessed.
"""
if not self.shared:
try:
if self.node:
objects.Node.release(self.context, CONF.host, self.node.id)
except exception.NodeNotFound:
# squelch the exception if the node was deleted
# within the task's context.
pass
self.node = None
self.driver = None
self.ports = None
self.fsm = None
def _thread_release_resources(self, t):
"""Thread.link() callback to release resources."""
self.release_resources()
def process_event(self, event, callback=None, call_args=None,
call_kwargs=None, err_handler=None):
"""Process the given event for the task's current state.
:param event: the name of the event to process
:param callback: optional callback to invoke upon event transition
:param call_args: optional \*args to pass to the callback method
:param call_kwargs: optional \**kwargs to pass to the callback method
:param err_handler: optional error handler to invoke if the
callback fails, eg. because there are no workers available
(err_handler should accept arguments node, prev_prov_state, and
prev_target_state)
:raises: InvalidState if the event is not allowed by the associated
state machine
"""
# Advance the state model for the given event. Note that this doesn't
# alter the node in any way. This may raise InvalidState, if this event
# is not allowed in the current state.
self.fsm.process_event(event)
# stash current states in the error handler if callback is set,
# in case we fail to get a worker from the pool
if err_handler and callback:
self.set_spawn_error_hook(err_handler, self.node,
self.node.provision_state,
self.node.target_provision_state)
self.node.provision_state = self.fsm.current_state
self.node.target_provision_state = self.fsm.target_state
# set up the async worker
if callback:
# clear the error if we're going to start work in a callback
self.node.last_error = None
if call_args is None:
call_args = ()
if call_kwargs is None:
call_kwargs = {}
self.spawn_after(callback, *call_args, **call_kwargs)
# publish the state transition by saving the Node
self.node.save()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None and self._spawn_method is not None:
# Spawn a worker to complete the task
# The linked callback below will be called whenever:
# - background task finished with no errors.
# - background task has crashed with exception.
# - callback was added after the background task has
# finished or crashed. While eventlet currently doesn't
# schedule the new thread until the current thread blocks
# for some reason, this is true.
# All of the above are asserted in tests such that we'll
# catch if eventlet ever changes this behavior.
thread = None
try:
thread = self._spawn_method(*self._spawn_args,
**self._spawn_kwargs)
# NOTE(comstud): Trying to use a lambda here causes
# the callback to not occur for some reason. This
# also makes it easier to test.
thread.link(self._thread_release_resources)
# Don't unlock! The unlock will occur when the
# thread finshes.
return
except Exception as e:
with excutils.save_and_reraise_exception():
try:
# Execute the on_error hook if set
if self._on_error_method:
self._on_error_method(e, *self._on_error_args,
**self._on_error_kwargs)
except Exception:
LOG.warning(_LW("Task's on_error hook failed to "
"call %(method)s on node %(node)s"),
{'method': self._on_error_method.__name__,
'node': self.node.uuid})
if thread is not None:
# This means the link() failed for some
# reason. Nuke the thread.
thread.cancel()
self.release_resources()
self.release_resources()
| apache-2.0 | 1,835,137,189,771,582,200 | 38.033241 | 79 | 0.623944 | false |
bxm156/yelpy | yelpy/yelpy_signer.py | 1 | 1078 | import oauth2
import os
class YelpySigner(object):
def __init__(self, consumer_key=None, consumer_secret=None, token=None, token_secret=None):
super(YelpySigner, self).__init__()
self.consumer_key = consumer_key or os.environ['YELPY_CONSUMER_KEY']
self.consumer_secret = consumer_secret or os.environ['YELPY_CONSUMER_SECRET']
self.token = token or os.environ['YELPY_TOKEN']
self.token_secret = token_secret or os.environ['YELPY_TOKEN_SECRET']
def sign(self, url):
consumer = oauth2.Consumer(self.consumer_key, self.consumer_secret)
oauth_request = oauth2.Request('GET', url, {})
oauth_request.update({
'oauth_nonce': oauth2.generate_nonce(),
'oauth_timestamp': oauth2.generate_timestamp(),
'oauth_token': self.token,
'oauth_consumer_key': self.consumer_key,
})
token = oauth2.Token(self.token, self.token_secret)
oauth_request.sign_request(oauth2.SignatureMethod_HMAC_SHA1(), consumer, token)
return oauth_request.to_url()
| gpl-2.0 | 6,199,489,522,802,149,000 | 43.916667 | 95 | 0.648423 | false |
thomper/ausabavalidator | testdetail.py | 1 | 8026 | import string
from rulesdetail import TRANSACTION_CODES
from rulesdetail import record_type, bsb_number, account_number, indicator, transaction_code, amount, title
from rulesdetail import lodgement_reference, trace_record_bsb, trace_record_account_number, remitter_name
from rulesdetail import withholding_tax
def test_record_type_valid():
all_lines = ('1 (remainder of string should not matter)', )
assert record_type(all_lines, 0) is None
def test_record_type_invalid():
all_lines = tuple((ch for ch in string.printable if ch != '1')) # '1' is the right character
for i, _ in enumerate(all_lines):
assert record_type(all_lines, i) is not None
def test_bsb_number_valid():
all_lines = (' 123-456 ', )
assert bsb_number(all_lines, 0) is None
def test_bsb_number_invalid_hyphen_missing():
all_lines = (' 123456 ', )
assert bsb_number(all_lines, 0) is not None
def test_bsb_number_invalid_hyphen_replaced():
all_lines = (' 123 456 ', )
assert bsb_number(all_lines, 0) is not None
def test_bsb_number_invalid_non_digit_in_first_triplet():
all_lines = (' 1a3-456 ', )
assert bsb_number(all_lines, 0) is not None
def test_bsb_number_invalid_non_digit_in_second_triplet():
all_lines = (' 123-45x ', )
assert bsb_number(all_lines, 0) is not None
def test_account_number_valid_eight_digits_no_hyphen():
all_lines = (' ' * 8 + ' 12345678', )
assert account_number(all_lines, 0) is None
def test_account_number_valid_eight_digits_with_hyphen():
all_lines = (' ' * 8 + '1234-5678', )
assert account_number(all_lines, 0) is None
def test_account_number_valid_nine_digits():
all_lines = (' ' * 8 + '123456789', )
assert account_number(all_lines, 0) is None
def test_account_number_valid_blank():
all_lines = (' ' * 8 + ' ' * 9, ) # for credit card transactions the account number can be blank
assert account_number(all_lines, 0) is None
def test_account_number_valid_employee_benefits_card():
all_lines = (' ' * 8 + ' 999999', ) # for employee benefits card transactions, account number must be '999999'
assert account_number(all_lines, 0) is None
def test_account_number_invalid_left_justified():
all_lines = (' ' * 8 + '123456 ', )
assert account_number(all_lines, 0) is not None
def test_account_number_invalid_bad_character():
all_lines = (' ' * 8 + ' x23456', )
assert account_number(all_lines, 0) is not None
def test_account_number_invalid_all_zeroes():
all_lines = (' ' * 8 + '0' * 9, )
assert account_number(all_lines, 0) is not None
def test_indicator_valid():
good_chars = ' NWXY'
all_lines = tuple((' ' * 17 + ch for ch in good_chars))
for i, _ in enumerate(all_lines):
assert indicator(all_lines, i) is None
def test_indicator_invalid():
good_chars = ' NWXY'
all_lines = tuple((' ' * 17 + ch for ch in string.printable if ch not in good_chars))
for i, _ in enumerate(all_lines):
assert indicator(all_lines, i) is not None
def test_transaction_code_valid():
all_lines = tuple((' ' * 18 + code for code in TRANSACTION_CODES))
for i, _ in enumerate(all_lines):
assert transaction_code(all_lines, i) is None
def test_transaction_code_invalid():
all_lines = tuple((' ' * 18 + code for code in (' ', 'ab', '12', ')(')))
for i, _ in enumerate(all_lines):
assert transaction_code(all_lines, i) is not None
def test_amount_valid():
all_lines = tuple((' ' * 20 + '{:010d}'.format(i) for i in (1, 100, 10000, 1000000000, 9999999999)))
for i, _ in enumerate(all_lines):
assert amount(all_lines, i) is None
def test_amount_invalid():
all_lines = tuple((' ' * 20 + amount_ for amount_ in ('not an amount', 'blah blah ' ' ')))
for i, _ in enumerate(all_lines):
assert amount(all_lines, i) is not None
def test_title_valid_full():
all_lines = (' ' * 30 + 'x' * 32, )
assert title(all_lines, 0) is None
def test_title_valid_left_justified():
all_lines = (' ' * 30 + 'x' * 27 + ' ' * 5, )
assert title(all_lines, 0) is None
def test_title_invalid_right_justified():
all_lines = (' ' * 30 + ' ' * 5 + 'x' * 27, )
assert title(all_lines, 0) is not None
def test_title_invalid_blank():
all_lines = (' ' * 62, )
assert title(all_lines, 0) is not None
def test_lodgement_reference_valid_full():
all_lines = (' ' * 62 + 'x' * 18, )
assert lodgement_reference(all_lines, 0) is None
def test_lodgement_reference_valid_left_justified():
all_lines = (' ' * 60 + 'x' * 14 + ' ' * 4, )
assert lodgement_reference(all_lines, 0) is None
def test_lodgement_reference_invalid_right_justified():
all_lines = (' ' * 60 + ' ' * 4 + 'x' * 14, )
assert lodgement_reference(all_lines, 0) is not None
def test_lodgement_reference_invalid_blank():
all_lines = (' ' * 80, )
assert lodgement_reference(all_lines, 0) is not None
def test_trace_record_bsb_valid():
all_lines = (' ' * 80 + '123-456', )
assert trace_record_bsb(all_lines, 0) is None
def test_trace_record_bsb_invalid_hyphen_missing():
all_lines = (' ' * 80 + '123456', )
assert trace_record_bsb(all_lines, 0) is not None
def test_trace_record_bsb_invalid_hyphen_replaced():
all_lines = (' ' * 80 + '123 456', )
assert trace_record_bsb(all_lines, 0) is not None
def test_trace_record_bsb_invalid_non_digit_in_first_triplet():
all_lines = (' ' * 80 + '1a3-456', )
assert trace_record_bsb(all_lines, 0) is not None
def test_trace_record_bsb_invalid_non_digit_in_second_triplet():
all_lines = (' 123-45x ', )
assert trace_record_bsb(all_lines, 0) is not None
def test_trace_record_account_number_valid_eight_digits_no_hyphen():
all_lines = (' ' * 87 + ' 12345678', )
assert trace_record_account_number(all_lines, 0) is None
def test_trace_record_account_number_valid_eight_digits_with_hyphen():
all_lines = (' ' * 87 + '1234-5678', )
assert trace_record_account_number(all_lines, 0) is None
def test_trace_record_account_number_valid_nine_digits():
all_lines = (' ' * 87 + '123456789', )
assert trace_record_account_number(all_lines, 0) is None
def test_trace_record_account_number_valid_blank():
all_lines = (' ' * 87 + ' ' * 9, ) # for credit card transactions the account number can be blank
assert trace_record_account_number(all_lines, 0) is None
def test_trace_record_account_number_valid_employee_benefits_card():
all_lines = (' ' * 87 + ' 999999', ) # for employee benefits card transactions, account number must be '999999'
assert trace_record_account_number(all_lines, 0) is None
def test_trace_record_account_number_invalid_left_justified():
all_lines = (' ' * 87 + '123456 ', )
assert trace_record_account_number(all_lines, 0) is not None
def test_trace_record_account_number_invalid_bad_character():
all_lines = (' ' * 87 + ' x23456', )
assert trace_record_account_number(all_lines, 0) is not None
def test_trace_record_account_number_invalid_all_zeroes():
all_lines = (' ' * 87 + '0' * 9, )
assert trace_record_account_number(all_lines, 0) is not None
def test_remitter_name_valid():
all_lines = (' ' * 96 + 'X' * 16, )
assert remitter_name(all_lines, 0) is None
def test_remitter_name_invalid_blank():
all_lines = (' ' * 96 + ' ' * 16, )
assert remitter_name(all_lines, 0) is not None
def test_remitter_name_invalid_right_justified():
all_lines = (' ' * 96 + ' ' * 15 + 'X', )
assert remitter_name(all_lines, 0) is not None
def test_withholding_tax_valid_zero():
all_lines = (' ' * 112 + '0' * 8, )
assert withholding_tax(all_lines, 0) is None
def test_withholding_tax_valid_non_zero():
all_lines = (' ' * 112 + '12345678', )
assert withholding_tax(all_lines, 0) is None
def test_withholding_tax_invalid():
all_lines = (' ' * 112 + '1234567X', )
assert withholding_tax(all_lines, 0) is not None
| gpl-3.0 | 6,060,362,650,671,938,000 | 30.598425 | 118 | 0.639048 | false |
Pulgama/supriya | supriya/examples/grey_wash/sessions/chants.py | 1 | 4782 | import supriya
from .. import project_settings, synthdefs
class SessionFactory(supriya.nonrealtime.SessionFactory):
### CLASS VARIABLES ###
release_time = 15
### SESSION ###
def __session__(self, initial_seed=0, layer_count=10, minutes=2, **kwargs):
self.buffers = []
session = supriya.Session(
input_bus_channel_count=self.input_bus_channel_count,
output_bus_channel_count=self.output_bus_channel_count,
)
with session.at(0):
for say in self.libretto:
buffer_ = session.add_buffer(channel_count=1, file_path=say)
self.buffers.append(buffer_)
for i in range(layer_count):
session.inscribe(
self.global_pattern, duration=60 * minutes, seed=initial_seed + i
)
with session.at(0):
session.add_synth(
synthdef=synthdefs.compressor_synthdef,
add_action="ADD_TO_TAIL",
duration=session.duration + self.release_time,
pregain=0,
)
session.set_rand_seed(initial_seed)
return session
@property
def libretto(self):
libretto = []
text = "videoconferencing"
for voice in ["Daniel", "Tessa", "Karen", "Thomas"]:
libretto.append(supriya.Say(text, voice=voice))
return libretto
### GLOBAL PATTERN ###
@property
def global_pattern(self):
global_pattern = supriya.patterns.Pgpar(
[self.source_pattern, self.effect_pattern], release_time=self.release_time
)
global_pattern = global_pattern.with_bus(release_time=self.release_time)
return global_pattern
### SOURCE PATTERNS ###
@property
def source_pattern(self):
source_pattern = self.one_shot_player_pattern
source_pattern = source_pattern.with_group(release_time=self.release_time)
source_pattern = source_pattern.with_effect(
synthdef=synthdefs.compressor_synthdef,
release_time=self.release_time,
pregain=3,
)
return source_pattern
@property
def one_shot_player_pattern(self):
return supriya.patterns.Pbind(
synthdef=synthdefs.one_shot_player_synthdef,
add_action=supriya.AddAction.ADD_TO_HEAD,
buffer_id=supriya.patterns.Prand(self.buffers, repetitions=None),
delta=5,
duration=0,
gain=supriya.patterns.Pwhite(-12, 12),
pan=supriya.patterns.Pwhite(-1, 1.0),
rate=2 ** supriya.patterns.Pwhite(-1, 0.25),
)
### EFFECT PATTERNS ###
@property
def chorus_pattern(self):
return supriya.patterns.Pbindf(
self.fx_pattern,
synthdef=synthdefs.windowed_chorus_factory.build(
name="chorus8", iterations=8
),
frequency=supriya.patterns.Pwhite() * 2,
gain=3,
)
@property
def effect_pattern(self):
effect_pattern = supriya.patterns.Ppar(
[
self.chorus_pattern,
self.freeverb_pattern,
self.chorus_pattern,
self.pitchshift_pattern,
]
)
effect_pattern = effect_pattern.with_group(release_time=self.release_time)
effect_pattern = effect_pattern.with_effect(
synthdef=synthdefs.compressor_synthdef,
release_time=self.release_time,
pregain=3,
)
return effect_pattern
@property
def freeverb_pattern(self):
return supriya.patterns.Pbindf(
self.fx_pattern,
synthdef=synthdefs.windowed_freeverb_synthdef,
damping=supriya.patterns.Pwhite() ** 0.25,
gain=3,
level=supriya.patterns.Pwhite(0.0, 0.25),
room_size=supriya.patterns.Pwhite() ** 0.25,
)
@property
def fx_pattern(self):
return supriya.patterns.Pbind(
add_action=supriya.AddAction.ADD_TO_TAIL,
delta=supriya.patterns.Pwhite(0, 10),
duration=supriya.patterns.Pwhite(5, 30),
level=supriya.patterns.Pwhite(0.25, 1.0),
)
@property
def pitchshift_pattern(self):
return supriya.patterns.Pbindf(
self.fx_pattern,
synthdef=synthdefs.windowed_pitchshift_synthdef,
gain=3,
pitch_dispersion=supriya.patterns.Pwhite(0.0, 0.02),
pitch_shift=supriya.patterns.Pwhite(-12.0, 12.0),
time_dispersion=supriya.patterns.Pwhite(),
window_size=supriya.patterns.Pwhite(0.1, 2.0),
)
chants = SessionFactory.from_project_settings(project_settings)
| mit | -1,790,003,712,891,903,000 | 31.753425 | 86 | 0.580301 | false |
Lencerf/BiliDan | bilidan.py | 1 | 24251 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Biligrab-Danmaku2ASS
#
# Author: Beining@ACICFG https://github.com/cnbeining
# Author: StarBrilliant https://github.com/m13253
#
# Biligrab is licensed under MIT licence
# Permission has been granted for the use of Danmaku2ASS in Biligrab
#
# Copyright (c) 2014
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
if sys.version_info < (3, 0):
sys.stderr.write('ERROR: Python 3.0 or newer version is required.\n')
sys.exit(1)
import argparse
import gzip
import json
import hashlib
import io
import logging
import math
import os
import re
import subprocess
import tempfile
import urllib.parse
import urllib.request
import xml.dom.minidom
import zlib
USER_AGENT_PLAYER = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0.2) Gecko/20100101 Firefox/6.0.2 Fengfan/1.0'
USER_AGENT_API = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0.2) Gecko/20100101 Firefox/6.0.2 Fengfan/1.0'
APPKEY = '452d3958f048c02a' # From some source
APPSEC = '' # We shall not release this from now
BILIGRAB_HEADER = {'User-Agent': USER_AGENT_API, 'Cache-Control': 'no-cache', 'Pragma': 'no-cache'}
def biligrab(url, *, debug=False, verbose=False, media=None, comment=None, cookie=None, quality=None, source=None, keep_fps=False, mpvflags=[], d2aflags={}, fakeip=None):
url_get_metadata = 'http://api.bilibili.com/view?'
url_get_comment = 'http://comment.bilibili.com/%(cid)s.xml'
if source == 'overseas':
url_get_media = 'http://interface.bilibili.com/v_cdn_play?'
else:
url_get_media = 'http://interface.bilibili.com/playurl?'
def parse_url(url):
'''Parse a bilibili.com URL
Return value: (aid, pid)
'''
regex = re.compile('(http:/*[^/]+/video/)?av(\\d+)(/|/index.html|/index_(\\d+).html)?(\\?|#|$)')
regex_match = regex.match(url)
if not regex_match:
raise ValueError('Invalid URL: %s' % url)
aid = regex_match.group(2)
pid = regex_match.group(4) or '1'
return aid, pid
def fetch_video_metadata(aid, pid):
'''Fetch video metadata
Arguments: aid, pid
Return value: {'cid': cid, 'title': title}
'''
req_args = {'type': 'json', 'appkey': APPKEY, 'id': aid, 'page': pid}
#req_args['sign'] = bilibili_hash(req_args)
req_args['sign'] = ''
_, response = fetch_url(url_get_metadata+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_API, cookie=cookie)
try:
response = dict(json.loads(response.decode('utf-8', 'replace')))
except (TypeError, ValueError):
raise ValueError('Can not get \'cid\' from %s' % url)
if 'error' in response:
logging.error('Error message: %s' % response.get('error'))
if 'cid' not in response:
raise ValueError('Can not get \'cid\' from %s' % url)
return response
def get_media_urls(cid, *, fuck_you_bishi_mode=False):
'''Request the URLs of the video
Arguments: cid
Return value: [media_urls]
'''
if source in {None, 'overseas'}:
user_agent = USER_AGENT_API if not fuck_you_bishi_mode else USER_AGENT_PLAYER
req_args = {'appkey': APPKEY, 'cid': cid}
if quality is not None:
req_args['quality'] = quality
#req_args['sign'] = bilibili_hash(req_args)
req_args['sign'] = ''
_, response = fetch_url(url_get_media+urllib.parse.urlencode(req_args), user_agent=user_agent, cookie=cookie, fakeip=fakeip)
media_urls = [str(k.wholeText).strip() for i in xml.dom.minidom.parseString(response.decode('utf-8', 'replace')).getElementsByTagName('durl') for j in i.getElementsByTagName('url')[:1] for k in j.childNodes if k.nodeType == 4]
if not fuck_you_bishi_mode and media_urls == ['http://static.hdslb.com/error.mp4']:
logging.error('Detected User-Agent block. Switching to fuck-you-bishi mode.')
return get_media_urls(cid, fuck_you_bishi_mode=True)
elif source == 'html5':
req_args = {'aid': aid, 'page': pid}
logging.warning('HTML5 video source is experimental and may not always work.')
_, response = fetch_url('http://www.bilibili.com/m/html5?'+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_PLAYER)
response = json.loads(response.decode('utf-8', 'replace'))
media_urls = [dict.get(response, 'src')]
if not media_urls[0]:
media_urls = []
if not fuck_you_bishi_mode and media_urls == ['http://static.hdslb.com/error.mp4']:
logging.error('Failed to request HTML5 video source. Retrying.')
return get_media_urls(cid, fuck_you_bishi_mode=True)
elif source == 'flvcd':
req_args = {'kw': url}
if quality is not None:
if quality == 3:
req_args['quality'] = 'high'
elif quality >= 4:
req_args['quality'] = 'super'
_, response = fetch_url('http://www.flvcd.com/parse.php?'+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_PLAYER)
resp_match = re.search('<input type="hidden" name="inf" value="([^"]+)"', response.decode('gbk', 'replace'))
if resp_match:
media_urls = resp_match.group(1).rstrip('|').split('|')
else:
media_urls = []
elif source == 'bilipr':
req_args = {'cid': cid}
quality_arg = '1080' if quality is not None and quality >= 4 else '720'
logging.warning('BilibiliPr video source is experimental and may not always work.')
resp_obj, response = fetch_url('http://pr.lolly.cc/P%s?%s' % (quality_arg, urllib.parse.urlencode(req_args)), user_agent=USER_AGENT_PLAYER)
if resp_obj.getheader('Content-Type', '').startswith('text/xml'):
media_urls = [str(k.wholeText).strip() for i in xml.dom.minidom.parseString(response.decode('utf-8', 'replace')).getElementsByTagName('durl') for j in i.getElementsByTagName('url')[:1] for k in j.childNodes if k.nodeType == 4]
else:
media_urls = []
else:
assert source in {None, 'overseas', 'html5', 'flvcd', 'bilipr'}
if len(media_urls) == 0 or media_urls == ['http://static.hdslb.com/error.mp4']:
raise ValueError('Can not get valid media URLs.')
return media_urls
def get_video_size(media_urls):
'''Determine the resolution of the video
Arguments: [media_urls]
Return value: (width, height)
'''
try:
if media_urls[0].startswith('http:') or media_urls[0].startswith('https:'):
ffprobe_command = ['ffprobe', '-icy', '0', '-loglevel', 'repeat+warning' if verbose else 'repeat+error', '-print_format', 'json', '-select_streams', 'v', '-show_streams', '-timeout', '60000000', '-user-agent', USER_AGENT_PLAYER, '--', media_urls[0]]
else:
ffprobe_command = ['ffprobe', '-loglevel', 'repeat+warning' if verbose else 'repeat+error', '-print_format', 'json', '-select_streams', 'v', '-show_streams', '--', media_urls[0]]
log_command(ffprobe_command)
ffprobe_process = subprocess.Popen(ffprobe_command, stdout=subprocess.PIPE)
try:
ffprobe_output = json.loads(ffprobe_process.communicate()[0].decode('utf-8', 'replace'))
except KeyboardInterrupt:
logging.warning('Cancelling getting video size, press Ctrl-C again to terminate.')
ffprobe_process.terminate()
return 0, 0
width, height, widthxheight = 0, 0, 0
for stream in dict.get(ffprobe_output, 'streams') or []:
if dict.get(stream, 'width')*dict.get(stream, 'height') > widthxheight:
width, height = dict.get(stream, 'width'), dict.get(stream, 'height')
return width, height
except Exception as e:
log_or_raise(e, debug=debug)
return 0, 0
def convert_comments(cid, video_size):
'''Convert comments to ASS subtitle format
Arguments: cid
Return value: comment_out -> file
'''
_, resp_comment = fetch_url(url_get_comment % {'cid': cid}, cookie=cookie)
comment_in = io.StringIO(resp_comment.decode('utf-8', 'replace'))
comment_out = tempfile.NamedTemporaryFile(mode='w', encoding='utf-8-sig', newline='\r\n', prefix='tmp-danmaku2ass-', suffix='.ass', delete=False)
logging.info('Invoking Danmaku2ASS, converting to %s' % comment_out.name)
d2a_args = dict({'stage_width': video_size[0], 'stage_height': video_size[1], 'font_face': 'PingFangSC-Regular', 'font_size': math.ceil(video_size[1]/23), 'text_opacity': 0.8, 'duration_marquee': min(max(6.75*video_size[0]/video_size[1]-4, 3.0), 8.0), 'duration_still': 5.0}, **d2aflags)
for i, j in ((('stage_width', 'stage_height', 'reserve_blank'), int), (('font_size', 'text_opacity', 'comment_duration', 'duration_still', 'duration_marquee'), float)):
for k in i:
if k in d2aflags:
d2a_args[k] = j(d2aflags[k])
try:
danmaku2ass.Danmaku2ASS([comment_in], comment_out, **d2a_args)
except Exception as e:
log_or_raise(e, debug=debug)
logging.error('Danmaku2ASS failed, comments are disabled.')
comment_out.flush()
comment_out.close() # Close the temporary file early to fix an issue related to Windows NT file sharing
return comment_out
def launch_player(video_metadata, media_urls, comment_out, is_playlist=False, increase_fps=True):
'''Launch MPV media player
Arguments: video_metadata, media_urls, comment_out
Return value: player_exit_code -> int
'''
mpv_version_master = tuple(check_env.mpv_version.split('-', 1)[0].split('.'))
mpv_version_gte_0_10 = mpv_version_master >= ('0', '10') or (len(mpv_version_master) >= 2 and len(mpv_version_master[1]) >= 3) or mpv_version_master[0] == 'git'
mpv_version_gte_0_6 = mpv_version_gte_0_10 or mpv_version_master >= ('0', '6') or (len(mpv_version_master) >= 2 and len(mpv_version_master[1]) >= 2) or mpv_version_master[0] == 'git'
mpv_version_gte_0_4 = mpv_version_gte_0_6 or mpv_version_master >= ('0', '4') or (len(mpv_version_master) >= 2 and len(mpv_version_master[1]) >= 2) or mpv_version_master[0] == 'git'
logging.debug('Compare mpv version: %s %s 0.10' % (check_env.mpv_version, '>=' if mpv_version_gte_0_10 else '<'))
logging.debug('Compare mpv version: %s %s 0.6' % (check_env.mpv_version, '>=' if mpv_version_gte_0_6 else '<'))
logging.debug('Compare mpv version: %s %s 0.4' % (check_env.mpv_version, '>=' if mpv_version_gte_0_4 else '<'))
if increase_fps: # If hardware decoding (without -copy suffix) is used, do not increase fps
for i in mpvflags:
i = i.split('=', 1)
if 'vdpau' in i or 'vaapi' in i or 'vda' in i:
increase_fps = False
break
command_line = ['mpv']
if video_resolution[0] >= 1280 or video_resolution[1] >= 720:
command_line += ['--fs', '--autofit', '950x540']
if mpv_version_gte_0_6:
command_line += ['--cache-file', 'TMP']
if increase_fps and mpv_version_gte_0_6: # Drop frames at vo side but not at decoder side to prevent A/V sync issues
command_line += ['--framedrop', 'vo']
command_line += ['--http-header-fields', 'User-Agent: '+USER_AGENT_PLAYER.replace(',', '\\,')]
if mpv_version_gte_0_6:
if mpv_version_gte_0_10:
command_line += ['--force-media-title', video_metadata.get('title', url)]
else:
command_line += ['--media-title', video_metadata.get('title', url)]
if is_playlist or len(media_urls) > 1:
command_line += ['--merge-files']
if mpv_version_gte_0_4:
command_line += ['--no-video-aspect', '--sub-ass', '--sub-file', comment_out.name]
else:
command_line += ['--no-aspect', '--ass', '--sub', comment_out.name]
if increase_fps:
if mpv_version_gte_0_6:
command_line += ['--vf', 'lavfi="fps=fps=60:round=down"']
else: # Versions < 0.6 have an A/V sync related issue
command_line += ['--vf', 'lavfi="fps=fps=50:round=down"']
command_line += mpvflags
if is_playlist:
command_line += ['--playlist']
else:
command_line += ['--']
command_line += media_urls
log_command(command_line)
player_process = subprocess.Popen(command_line)
try:
player_process.wait()
except KeyboardInterrupt:
logging.info('Terminating media player...')
try:
player_process.terminate()
try:
player_process.wait(timeout=2)
except subprocess.TimeoutExpired:
logging.info('Killing media player by force...')
player_process.kill()
except Exception:
pass
raise
return player_process.returncode
aid, pid = parse_url(url)
logging.info('Loading video info...')
video_metadata = fetch_video_metadata(aid, pid)
logging.info('Got video cid: %s' % video_metadata['cid'])
logging.info('Loading video content...')
if media is None:
media_urls = get_media_urls(video_metadata['cid'])
else:
media_urls = [media]
logging.info('Got media URLs:'+''.join(('\n %d: %s' % (i+1, j) for i, j in enumerate(media_urls))))
logging.info('Determining video resolution...')
video_size = get_video_size(media_urls)
video_resolution = video_size # backup original resolution
logging.info('Video resolution: %sx%s' % video_size)
if video_size[0] > 0 and video_size[1] > 0:
video_size = (video_size[0]*1080/video_size[1], 1080) # Simply fix ASS resolution to 1080p
else:
log_or_raise(ValueError('Can not get video size. Comments may be wrongly positioned.'), debug=debug)
video_size = (1920, 1080)
logging.info('Loading comments...')
if comment is None:
comment_out = convert_comments(video_metadata['cid'], video_size)
else:
comment_out = open(comment, 'r')
comment_out.close()
logging.info('Launching media player...')
player_exit_code = launch_player(video_metadata, media_urls, comment_out, increase_fps=not keep_fps)
if comment is None and player_exit_code == 0:
os.remove(comment_out.name)
return player_exit_code
def fetch_url(url, *, user_agent=USER_AGENT_PLAYER, cookie=None, fakeip=None):
'''Fetch HTTP URL
Arguments: url, user_agent, cookie
Return value: (response_object, response_data) -> (http.client.HTTPResponse, bytes)
'''
logging.debug('Fetch: %s' % url)
req_headers = {'User-Agent': user_agent, 'Accept-Encoding': 'gzip, deflate'}
if cookie:
req_headers['Cookie'] = cookie
if fakeip:
req_headers['X-Forwarded-For'] = fakeip
req_headers['Client-IP'] = fakeip
req = urllib.request.Request(url=url, headers=req_headers)
response = urllib.request.urlopen(req, timeout=120)
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = gzip.GzipFile(fileobj=response).read()
elif content_encoding == 'deflate':
decompressobj = zlib.decompressobj(-zlib.MAX_WBITS)
data = decompressobj.decompress(response.read())+decompressobj.flush()
else:
data = response.read()
return response, data
def bilibili_hash(args):
'''Calculate API signature hash
Arguments: {request_paramter: value}
Return value: hash_value -> str
'''
return hashlib.md5((urllib.parse.urlencode(sorted(args.items()))+APPSEC).encode('utf-8')).hexdigest() # Fuck you bishi
def check_env(debug=False):
'''Check the system environment to make sure dependencies are set up correctly
Return value: is_successful -> bool
'''
global danmaku2ass, requests
retval = True
try:
import danmaku2ass
except ImportError as e:
danmaku2ass_filename = os.path.abspath(os.path.join(__file__, '..', 'danmaku2ass.py'))
logging.error('Automatically downloading \'danmaku2ass.py\'\n from https://github.com/m13253/danmaku2ass\n to %s' % danmaku2ass_filename)
try:
danmaku2ass_downloaded = fetch_url('https://github.com/m13253/danmaku2ass/raw/master/danmaku2ass.py')
with open(danmaku2ass_filename, 'wb') as f:
f.write(danmaku2ass_downloaded[1])
del danmaku2ass_downloaded
except Exception as e:
logging.error('Can not download Danmaku2ASS module automatically (%s), please get it yourself.' % e)
retval = False
if retval:
try:
import danmaku2ass
danmaku2ass.Danmaku2ASS
except (AttributeError, ImportError) as e:
logging.error('Danmaku2ASS module is not working (%s), please update it at https://github.com/m13253/danmaku2ass' % e)
retval = False
try:
mpv_process = subprocess.Popen(('mpv', '--version'), stdout=subprocess.PIPE, env=dict(os.environ, MPV_VERBOSE='-1'))
mpv_output = mpv_process.communicate()[0].decode('utf-8', 'replace').splitlines()
for line in mpv_output:
if line.startswith('[cplayer] mpv '):
check_env.mpv_version = line.split(' ', 3)[2]
logging.debug('Detected mpv version: %s' % check_env.mpv_version)
break
else:
log_or_raise(RuntimeError('Can not detect mpv version.'), debug=debug)
check_env.mpv_version = 'git-'
except OSError as e:
logging.error('Please install \'mpv\' as the media player.')
retval = False
try:
mpv_process = subprocess.Popen(('mpv', '--vf', 'lavfi=help'), stdout=subprocess.DEVNULL)
mpv_process.wait()
if mpv_process.returncode != 0:
logging.error('mpv is not configured to enable \'lavfi\' filter. (mpv or ffmpeg may be too old)')
retval = False
except OSError as e:
logging.error('mpv is not configured to enable \'lavfi\' filter. (mpv or ffmpeg may be too old)')
retval = False
try:
subprocess.Popen(('ffprobe', '-version'), stdout=subprocess.DEVNULL)
except OSError as e:
logging.error('Please install \'ffprobe\' from FFmpeg ultilities.')
retval = False
return retval
def log_command(command_line):
'''Log the command line to be executed, escaping correctly
'''
logging.debug('Executing: '+' '.join('\''+i+'\'' if ' ' in i or '?' in i or '&' in i or '"' in i else i for i in command_line))
def log_or_raise(exception, debug=False):
'''Log exception if debug == False, or raise it if debug == True
'''
if debug:
raise exception
else:
logging.error(str(exception))
class MyArgumentFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
'''Patch the default argparse.HelpFormatter so that '\\n' is correctly handled
'''
return [i for line in text.splitlines() for i in argparse.HelpFormatter._split_lines(self, line, width)]
def main():
if len(sys.argv) == 1:
sys.argv.append('--help')
parser = argparse.ArgumentParser(formatter_class=MyArgumentFormatter)
parser.add_argument('-c', '--cookie', help='Import Cookie at bilibili.com, type document.cookie at JavaScript console to acquire it')
parser.add_argument('-d', '--debug', action='store_true', help='Stop execution immediately when an error occures')
parser.add_argument('-m', '--media', help='Specify local media file to play with remote comments')
parser.add_argument('--comment', help='Specify local ASS comment file to play with remote media')
parser.add_argument('-q', '--quality', type=int, help='Specify video quality, -q 1 for the lowest, -q 4 for HD')
parser.add_argument('-s', '--source', help='Specify the source of video provider.\n' +
'Available values:\n' +
'default: Default source\n' +
'overseas: CDN acceleration for users outside china\n' +
'flvcd: Video parsing service provided by FLVCD.com\n' +
'html5: Low quality video provided by m.acg.tv for mobile users')
parser.add_argument('-f', '--fakeip', help='Fake ip for bypassing restrictions.')
parser.add_argument('-v', '--verbose', action='store_true', help='Print more debugging information')
parser.add_argument('--hd', action='store_true', help='Shorthand for -q 4')
parser.add_argument('--keep-fps', action='store_true', help='Use the same framerate as the video to animate comments, instead of increasing to 60 fps')
parser.add_argument('--mpvflags', metavar='FLAGS', default='', help='Parameters passed to mpv, formed as \'--option1=value1 --option2=value2\'')
parser.add_argument('--d2aflags', '--danmaku2assflags', metavar='FLAGS', default='', help='Parameters passed to Danmaku2ASS, formed as \'option1=value1,option2=value2\'')
parser.add_argument('url', metavar='URL', nargs='+', help='Bilibili video page URL (http://www.bilibili.com/video/av*/)')
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG if args.verbose else logging.INFO)
if not check_env(debug=args.debug):
return 2
quality = args.quality if args.quality is not None else 4 if args.hd else None
source = args.source if args.source != 'default' else None
if source not in {None, 'overseas', 'html5', 'flvcd', 'bilipr'}:
raise ValueError('invalid value specified for --source, see --help for more information')
mpvflags = args.mpvflags.split()
d2aflags = dict((i.split('=', 1) if '=' in i else [i, ''] for i in args.d2aflags.split(','))) if args.d2aflags else {}
fakeip = args.fakeip if args.fakeip else None
retval = 0
for url in args.url:
try:
retval = retval or biligrab(url, debug=args.debug, verbose=args.verbose, media=args.media, comment=args.comment, cookie=args.cookie, quality=quality, source=source, keep_fps=args.keep_fps, mpvflags=mpvflags, d2aflags=d2aflags, fakeip=args.fakeip)
except OSError as e:
logging.error(e)
retval = retval or e.errno
if args.debug:
raise
except Exception as e:
logging.error(e)
retval = retval or 1
if args.debug:
raise
return retval
if __name__ == '__main__':
sys.exit(main())
| mit | 6,329,866,537,232,969,000 | 48.27439 | 295 | 0.610857 | false |
DayGitH/Python-Challenges | DailyProgrammer/20120405A.py | 1 | 1079 | """
1000 Lockers Problem.
In an imaginary high school there exist 1000 lockers labelled 1, 2, ..., 1000. All of them are closed. 1000 students
are to "toggle" a locker's state. * The first student toggles all of them * The second one toggles every other one
(i.e, 2, 4, 6, ...) * The third one toggles the multiples of 3 (3, 6, 9, ...) and so on until all students have
finished.
To toggle means to close the locker if it is open, and to open it if it's closed.
How many and which lockers are open in the end?
Thanks to ladaghini for submitting this challenge to /r/dailyprogrammer_ideas!
"""
import math
N = 1000
working_list = [False] * N
for i in range(1,1000+1):
for n, j in enumerate(working_list):
if n%i == 0:
working_list[n] = not working_list[n]
for n, j in enumerate(working_list):
if j:
print(n)
print(working_list.count(True))
"""
/u/prophile's solution
requires dev to already know that the solution is all squares between 0 and N
"""
opens = [n*n for n in range(int(math.sqrt(N)) + 1) if n*n < N]
print(len(opens), opens)
| mit | 1,479,764,557,927,629,600 | 30.735294 | 116 | 0.68304 | false |
jeroanan/GameCollection | Tests/Interactors/Genre/TestDeleteGenreInteractor.py | 1 | 1656 | # copyright (c) David Wilson 2015
# This file is part of Icarus.
# Icarus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Icarus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Icarus. If not, see <http://www.gnu.org/licenses/>.
from Genre import Genre
from Interactors.GenreInteractors import DeleteGenreInteractor
from Interactors.Interactor import Interactor
from Tests.Interactors.InteractorTestBase import InteractorTestBase
class TestDeleteGenreInteractor(InteractorTestBase):
"""Unit tests for the DeleteGenreInteractor class"""
def setUp(self):
"""setUp function for all unit tests in this class"""
super().setUp()
self.__target = DeleteGenreInteractor()
self.__target.persistence = self.persistence
def test_is_interactor(self):
"""Test that DeleteGenreInteractor is derived from Interactor"""
self.assertIsInstance(self.__target, Interactor)
def test_execute_calls_persistence(self):
"""Test that calling DeleteGenreInteractor.execute calls persistence.delete_genre"""
g = Genre.from_dict({"id": "id"})
self.__target.execute(genre=g)
self.persistence.delete_genre.assert_called_with(g.id)
| gpl-3.0 | -2,025,991,190,891,741,200 | 40.4 | 92 | 0.737319 | false |
coddingtonbear/django-location | location/tests/test_icloud.py | 1 | 5932 | import calendar
import datetime
from django.contrib.gis.geos import Point
from django.utils.timezone import utc
from mock import MagicMock, patch
import pyicloud
from location import models
from location.tests.base import BaseTestCase
from location.consumers import icloud
class iCloudTest(BaseTestCase):
def setUp(self):
super(iCloudTest, self).setUp()
self.arbitrary_username = 'arbitrary_username'
self.arbitrary_password = 'arbitrary_password'
self.arbitrary_device_id = 'arbitrary_device_id'
self.user_settings = models.LocationConsumerSettings.objects.create(
user=self.user,
icloud_enabled=True,
icloud_username=self.arbitrary_username,
icloud_password=self.arbitrary_password,
icloud_device_id=self.arbitrary_device_id,
icloud_timezone='UTC'
)
icloud.SETTINGS['icloud']['max_wait_seconds'] = 0.5
icloud.SETTINGS['icloud']['request_interval_seconds'] = 0.1
self.icloud_consumer = icloud.iCloudConsumer(self.user_settings)
def test_get_location_data_unknown_device_id(self):
with patch('pyicloud.PyiCloudService.__init__') as init_mock:
init_mock.return_value = None
pyicloud.PyiCloudService.devices = {}
with self.assertRaises(icloud.UnknownDeviceException):
self.icloud_consumer.get_location_data()
def test_get_location_data(self):
arbitrary_location_data = {
'somewhere': 'around',
'here': True
}
with patch('pyicloud.PyiCloudService.__init__') as init_mock:
init_mock.return_value = None
mock_device = MagicMock()
mock_device.location.return_value = arbitrary_location_data
pyicloud.PyiCloudService.devices = {}
pyicloud.PyiCloudService.devices[self.arbitrary_device_id] = (
mock_device
)
self.icloud_consumer.data_is_accurate = MagicMock()
self.icloud_consumer.data_is_accurate.return_value = True
actual_data = self.icloud_consumer.get_location_data()
mock_device.location.assert_called_with()
self.assertEqual(
actual_data,
arbitrary_location_data
)
def test_get_location_data_inaccurate(self):
arbitrary_location_data = {
'somewhere': 'around',
'here': True
}
with patch('pyicloud.PyiCloudService.__init__') as init_mock:
init_mock.return_value = None
mock_device = MagicMock()
mock_device.location.return_value = arbitrary_location_data
pyicloud.PyiCloudService.devices = {}
pyicloud.PyiCloudService.devices[self.arbitrary_device_id] = (
mock_device
)
self.icloud_consumer.data_is_accurate = MagicMock()
self.icloud_consumer.data_is_accurate.return_value = False
with self.assertRaises(icloud.LocationUnavailableException):
self.icloud_consumer.get_location_data()
def test_data_is_accurate(self):
accurate_data = {
'locationFinished': True,
'isInaccurate': False,
'isOld': False,
'horizontalAccuracy': 1,
}
actual_result = self.icloud_consumer.data_is_accurate(accurate_data)
self.assertTrue(
actual_result,
)
def test_location_not_finished(self):
failure_data = {
'locationFinished': False,
'isInaccurate': False,
'isOld': False,
'horizontalAccuracy': 1,
}
actual_result = self.icloud_consumer.data_is_accurate(failure_data)
self.assertFalse(
actual_result,
)
def test_location_inaccurate(self):
failure_data = {
'locationFinished': True,
'isInaccurate': True,
'isOld': False,
'horizontalAccuracy': 1,
}
actual_result = self.icloud_consumer.data_is_accurate(failure_data)
self.assertFalse(
actual_result,
)
def test_location_is_old(self):
failure_data = {
'locationFinished': True,
'isInaccurate': False,
'isOld': True,
'horizontalAccuracy': 1,
}
actual_result = self.icloud_consumer.data_is_accurate(failure_data)
self.assertFalse(
actual_result,
)
def test_insufficient_horizontal_accuracy(self):
failure_data = {
'locationFinished': True,
'isInaccurate': False,
'isOld': True,
'horizontalAccuracy': (
icloud.SETTINGS['icloud']['min_horizontal_accuracy'] * 2
),
}
actual_result = self.icloud_consumer.data_is_accurate(failure_data)
self.assertFalse(
actual_result,
)
def test_update_location(self):
arbitrary_time = datetime.datetime(2013, 3, 2).replace(tzinfo=utc)
arbitrary_latitude = 50
arbitrary_longitude = 75
mock_location_data = {
'timeStamp': calendar.timegm(arbitrary_time.timetuple()) * 1000,
'longitude': arbitrary_longitude,
'latitude': arbitrary_latitude,
}
self.icloud_consumer.get_location_data = MagicMock()
self.icloud_consumer.get_location_data.return_value = (
mock_location_data
)
self.icloud_consumer.update_location()
snapshot = models.LocationSnapshot.objects.get()
self.assertIsNotNone(snapshot.source)
self.assertEqual(
snapshot.location,
Point(arbitrary_longitude, arbitrary_latitude)
)
self.assertEqual(
snapshot.date,
arbitrary_time,
)
| mit | 575,589,324,565,348,400 | 31.415301 | 76 | 0.590695 | false |
callowayproject/django-objectpermissions | example/simpleapp/models.py | 1 | 1253 | from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
class SimpleText(models.Model):
"""A Testing app"""
firstname = models.CharField(blank=True, max_length=255)
lastname = models.CharField(blank=True, max_length=255)
favorite_color = models.CharField(blank=True, max_length=255)
def __unicode__(self):
return self.firstname
class SimpleTaggedItem(models.Model):
tag = models.SlugField()
simple_text = models.ForeignKey(SimpleText)
def __unicode__(self):
return self.tag
import objectpermissions
permissions = ['perm1', 'perm2', 'perm3', 'perm4']
objectpermissions.register(SimpleText, permissions)
objectpermissions.register(SimpleTaggedItem, permissions)
from django.contrib import admin
from objectpermissions.admin import TabularUserPermInline, StackedUserPermInline
class SimpleTaggedItemInline(admin.TabularInline):
model = SimpleTaggedItem
class SimpleTextAdmin(admin.ModelAdmin):
list_display = ('firstname','lastname','favorite_color')
inlines = [SimpleTaggedItemInline, TabularUserPermInline, ]
admin.site.register(SimpleText, SimpleTextAdmin) | apache-2.0 | 3,383,614,336,839,751,000 | 32.891892 | 80 | 0.769354 | false |
thorwhalen/ut | parse/web/parsing_templates.py | 1 | 2359 | __author__ = 'thor'
import ut as ms
import re
import requests
from bs4 import BeautifulSoup
import os
import ut.pfile.to
def get_multiple_template_dicts(source):
templates = dict()
if isinstance(source, str):
if not re.compile('\n|\t').match(source) and len(source) < 150: # assume it's a filepath or url...
if os.path.exists(source):
source = ms.pfile.to.string(source)
else:
source = requests.get(source).text # ... and get the html
soup = BeautifulSoup(source)
table_soup_list = soup.find_all('table')
print("Found %d tables..." % len(table_soup_list))
for table_soup in table_soup_list:
try:
tt = mk_simple_template_dict(table_soup)
templates[tt['table']['id']] = tt
except Exception:
raise
print("... could extract a template from %d of these" % len(templates))
return templates
def mk_simple_template_dict(table_soup):
'''
Tries to create a template dict from html containing a table (should feed it with soup.find('table') for example)
This function assumes that all thead cells are formated the same, and all tbody rows are formated the same
'''
# global table attributes
bb = table_soup
glob = dict()
glob['id'] = bb.attrs.get('id')
glob['summary'] = ''
glob['style'] = parse_style(bb.attrs.get('style'))
glob
# thead attributes
bb = table_soup.find('thead').find('th')
thead = dict()
thead['scope'] = bb.attrs.get('scope')
thead['style'] = parse_style(bb.attrs.get('style'))
thead
# tbody attributes
bb = table_soup.find('tbody').find('tr').find('td')
tbody = dict()
tbody['style'] = parse_style(bb.attrs.get('style'))
tbody
return {'table': glob, 'thead': thead, 'tbody': tbody}
def parse_style(style_string):
if style_string:
style_dict = dict()
t = re.compile('[^:]+:[^;]+;').findall(style_string.replace('\n','').replace('\t',''))
t = [x.replace(';','') for x in t]
t = [x.split(':') for x in t]
for i in range(len(t)):
for ii in range(len(t[i])):
t[i][ii] = t[i][ii].strip()
style_dict = dict()
for ti in t:
style_dict[ti[0]] = ti[1]
return style_dict
else:
return None | mit | -3,465,566,294,807,742,500 | 30.891892 | 117 | 0.577363 | false |
jyr/japos-client | views/login.py | 1 | 4357 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import wx
from platform import Platform
from openings import Opening_view
from controllers.auth import Auth_controller
# begin wxGlade: extracode
# end wxGlade
class Login_view(wx.Frame):
def __init__(self, parent, id):
# begin wxGlade: Login.__init__
img = Platform("/img/logo.png")
wx.Frame.__init__(self, parent, id, style=wx.DEFAULT_FRAME_STYLE ^(wx.MAXIMIZE_BOX))
self.controller = Auth_controller()
self.p_data = wx.Panel(self, -1)
self.s_username_staticbox = wx.StaticBox(self.p_data, -1, "")
self.s_password_staticbox = wx.StaticBox(self.p_data, -1, "")
self.s_connect_staticbox = wx.StaticBox(self.p_data, -1, "")
self.s_data_staticbox = wx.StaticBox(self.p_data, -1, "")
self.p_header = wx.Panel(self, -1)
self.img_logo = wx.StaticBitmap(self.p_header, -1, wx.Bitmap(img.string, wx.BITMAP_TYPE_ANY))
self.l_japos = wx.StaticText(self.p_header, -1, "JAPOS", style=wx.ALIGN_CENTRE)
self.static_line_1 = wx.StaticLine(self.p_header, -1, style=wx.LI_VERTICAL)
self.l_username = wx.StaticText(self.p_data, -1, "Username: ")
self.cb_username = wx.ComboBox(self.p_data, -1, choices=[], style=wx.CB_DROPDOWN)
self.l_password = wx.StaticText(self.p_data, -1, "Password: ")
self.tc_password = wx.TextCtrl(self.p_data, -1, "", style=wx.TE_PASSWORD)
self.b_login = wx.Button(self.p_data, -1, "Login")
self.Bind(wx.EVT_BUTTON, self.OnAuth, id = self.b_login.GetId())
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: Login.__set_properties
self.SetTitle("Login")
self.l_japos.SetForegroundColour(wx.Colour(255, 255, 255))
self.l_japos.SetFont(wx.Font(20, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
self.static_line_1.SetMinSize((251, 1))
self.static_line_1.SetBackgroundColour(wx.Colour(255, 255, 255))
self.p_header.SetBackgroundColour(wx.Colour(47, 47, 47))
self.l_username.SetFont(wx.Font(15, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
self.l_password.SetFont(wx.Font(15, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
# end wxGlade
def __do_layout(self):
# begin wxGlade: Login.__do_layout
self.s_login = s_login = wx.BoxSizer(wx.VERTICAL)
s_data = wx.StaticBoxSizer(self.s_data_staticbox, wx.VERTICAL)
s_connect = wx.StaticBoxSizer(self.s_connect_staticbox, wx.HORIZONTAL)
s_password = wx.StaticBoxSizer(self.s_password_staticbox, wx.HORIZONTAL)
s_username = wx.StaticBoxSizer(self.s_username_staticbox, wx.HORIZONTAL)
s_header = wx.BoxSizer(wx.VERTICAL)
s_header.Add(self.img_logo, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 10)
s_header.Add(self.l_japos, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 0)
s_header.Add(self.static_line_1, 0, wx.ALL|wx.EXPAND, 5)
self.p_header.SetSizer(s_header)
s_login.Add(self.p_header, 0, wx.EXPAND, 0)
s_username.Add(self.l_username, 0, 0, 0)
s_username.Add(self.cb_username, 1, 0, 0)
s_data.Add(s_username, 1, wx.EXPAND, 0)
s_password.Add(self.l_password, 0, 0, 0)
s_password.Add(self.tc_password, 1, 0, 0)
s_data.Add(s_password, 1, wx.EXPAND, 0)
s_connect.Add(self.b_login, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 1)
s_data.Add(s_connect, 1, wx.EXPAND, 0)
self.p_data.SetSizer(s_data)
s_login.Add(self.p_data, 1, wx.EXPAND, 0)
self.SetSizer(s_login)
s_login.Fit(self)
self.Layout()
self.Centre()
# end wxGlade
def OnAuth(self, evt):
username = self.cb_username.GetValue().encode('utf-8')
password = self.tc_password.GetValue().encode('utf-8')
print password
try:
self.valid = self.controller.auth(username, password)
if self.valid:
self.p_data.Destroy()
self.p_header.Destroy()
opening = Opening_view(self, -1)
else:
self.controller.error()
except: #japos.crews.models.DoesNotExist:
self.controller.error()
def main():
app = wx.PySimpleApp(0)
f_login = Login_view(None, -1)
f_login.Show()
app.MainLoop()
# end of class Login | gpl-2.0 | -9,159,458,240,173,351,000 | 40.903846 | 102 | 0.626348 | false |
ethereum/dapp-bin | scrypt/scrypt.se.py | 1 | 6861 | data smix_intermediates[2**160](pos, stored[1024][4], state[8])
event TestLog6(h:bytes32)
macro blockmix($_inp):
with inp = $_inp:
with X = string(64):
mcopy(X, inp + 64, 64)
X[0] = ~xor(X[0], inp[0])
X[1] = ~xor(X[1], inp[1])
log(type=TestLog, 1, msg.gas)
X = salsa20(X)
log(type=TestLog, 2, msg.gas)
inp[4] = X[0]
inp[5] = X[1]
X[0] = ~xor(X[0], inp[2])
X[1] = ~xor(X[1], inp[3])
X = salsa20(X)
inp[6] = X[0]
inp[7] = X[1]
inp[0] = inp[4]
inp[1] = inp[5]
inp[2] = inp[6]
inp[3] = inp[7]
inp
macro endianflip($x):
with $y = string(len($x)):
with $i = 0:
with $L = len($y):
while $i < $L:
with $d = mload($x - 28 + $i):
mcopylast4($y + $i - 28, byte(31, $d) * 2**24 + byte(30, $d) * 2**16 + byte(29, $d) * 2**8 + byte(28, $d))
$i += 4
$y
macro mcopylast4($to, $frm):
~mstore($to, (~mload($to) & sub(0, 2**32)) + ($frm & 0xffffffff))
roundz = text("\x04\x00\x0c\x07\x08\x04\x00\x09\x0c\x08\x04\x0d\x00\x0c\x08\x12\x09\x05\x01\x07\x0d\x09\x05\x09\x01\x0d\x09\x0d\x05\x01\x0d\x12\x0e\x0a\x06\x07\x02\x0e\x0a\x09\x06\x02\x0e\x0d\x0a\x06\x02\x12\x03\x0f\x0b\x07\x07\x03\x0f\x09\x0b\x07\x03\x0d\x0f\x0b\x07\x12\x01\x00\x03\x07\x02\x01\x00\x09\x03\x02\x01\x0d\x00\x03\x02\x12\x06\x05\x04\x07\x07\x06\x05\x09\x04\x07\x06\x0d\x05\x04\x07\x12\x0b\x0a\x09\x07\x08\x0b\x0a\x09\x09\x08\x0b\x0d\x0a\x09\x08\x12\x0c\x0f\x0e\x07\x0d\x0c\x0f\x09\x0e\x0d\x0c\x0d\x0f\x0e\x0d\x12")
macro salsa20($x):
with b = string(64):
b[0] = $x[0]
b[1] = $x[1]
b = endianflip(b)
with x = string(64):
x[0] = b[0]
x[1] = b[1]
with i = 0:
with refpos = roundz:
while i < 4:
with destination = x + (~mload(refpos - 31) & 255) * 4 - 28:
with bb = ~mload(refpos - 28) & 255:
with a = (mload(x + (~mload(refpos-30) & 255) * 4 - 28) + mload(x + (~mload(refpos-29) & 255) * 4 - 28)) & 0xffffffff:
with oldval = mload(destination):
mcopylast4(destination, ~xor(oldval, ~or(a * 2**bb, a / 2**(32 - bb))))
refpos += 4
if refpos == roundz + 128:
i += 1
refpos = roundz
i = 0
while i < 64:
oldval = mload(b + i - 28) & 0xffffffff
newval = (oldval + mload(x + i - 28)) & 0xffffffff
mcopylast4(b + i - 28, newval)
i += 4
endianflip(b)
event TestLog(a:uint256, b:uint256)
event TestLog2(a:str)
event TestLog3(a:str, x:uint256)
event TestLog4(a:str, x:uint256, y:uint256)
event TestLog5(x:uint256, v1:bytes32, v2:bytes32, v3:bytes32, v4:bytes32)
def smix(b:str):
with h = mod(sha3(b:str), 2**160):
with x = string(256):
mcopy(x, b, 128)
with i = self.smix_intermediates[h].pos:
k = 0
while k < if(i > 0, 8, 0):
x[k] = self.smix_intermediates[h].state[k]
k += 1
while i < 2048 and msg.gas > 450000:
if i < 1024:
self.smix_intermediates[h].stored[i][0] = x[0]
self.smix_intermediates[h].stored[i][1] = x[1]
self.smix_intermediates[h].stored[i][2] = x[2]
self.smix_intermediates[h].stored[i][3] = x[3]
x = blockmix(x)
# if i == 1023:
# log(type=TestLog2, x)
else:
j = div(x[2], 256**31) + (div(x[2], 256**30) & 3) * 256
x[0] = ~xor(x[0], self.smix_intermediates[h].stored[j][0])
x[1] = ~xor(x[1], self.smix_intermediates[h].stored[j][1])
x[2] = ~xor(x[2], self.smix_intermediates[h].stored[j][2])
x[3] = ~xor(x[3], self.smix_intermediates[h].stored[j][3])
x = blockmix(x)
i += 1
k = 0
while k < 8:
self.smix_intermediates[h].state[k] = x[k]
k += 1
self.smix_intermediates[h].pos = i
# log(type=TestLog2, x)
if i == 2048:
with b = string(128):
mcopy(b, x, 128)
return(b:str)
else:
return(text(""):str)
event BlockMixInput(data:str)
def scrypt(pass:str): #implied: pass=salt, n=1024, r=1, p=1, dklen=32
b = self.pbkdf2(pass, pass, 128, outchars=128)
b = self.smix(b, outchars=128)
if not len(b):
return(0:bytes32)
o = self.pbkdf2(pass, b, 32, outchars=32)
return(o[0]:bytes32)
macro hmac_sha256($_key, $message): #implied: c=1, hash=sha256
with key = $_key:
if len(key) > 64:
key = [sha256(key:str)]
key[-1] = 32
if len(key) < 64:
with _o = string(64):
mcopy(_o, key, len(key))
key = _o
with o_key_pad_left = ~xor(0x5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c, key[0]):
with o_key_pad_right = ~xor(0x5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c, key[1]):
with padded_msg = string(len($message) + 64):
padded_msg[0] = ~xor(0x3636363636363636363636363636363636363636363636363636363636363636, key[0])
padded_msg[1] = ~xor(0x3636363636363636363636363636363636363636363636363636363636363636, key[1])
mcopy(padded_msg + 64, $message, len($message))
sha256([o_key_pad_left, o_key_pad_right, sha256(padded_msg:str)]:arr)
def hmac_sha256(key:str, msg:str):
return(hmac_sha256(key, msg):bytes32)
def pbkdf2(pass:str, salt:str, dklen): #implied: c=1, hash=sha256
o = string(dklen)
i = 0
while i * 32 < len(o):
o[i] = chain_prf(pass, salt, i + 1)
i += 1
return(o:str)
macro chain_prf($pass, $salt, $i):
with ext_salt = string(len($salt) + 4):
$j = $i
mcopy(ext_salt, $salt, len($salt))
mcopy(ext_salt + len($salt), ref($j) + 28, 4)
hmac_sha256($pass, ext_salt)
| mit | 6,643,061,788,805,022,000 | 41.351852 | 529 | 0.457805 | false |
Migwi-Ndungu/bc-9-Pomodoro-Timer | dbase/db_script.py | 1 | 2781 | import sqlite3 as lite
import sys
#statusuuid
# active = 37806757-4391-4c40-8cae-6bbfd71e893e
# pending = 0eaec4f3-c524-40ab-b295-2db5cb7a0770
# finished = f82db8cc-a969-4495-bffd-bb0ce0ba877a
# running = 6c25b6d2-75cc-42c3-9c8c-ccf7b54ba585
#sounduuid
# on = 510b9503-7899-4d69-83c0-690342daf271
# off = 05797a63-51f5-4c1d-9068-215c593bba8d
def initialize_n_create_db():
'''
This is a script that creates a database called pomodoro with a table
called timer_details.The timer_details table is populated with dummy
data that will be used for testing
'''
try:
print 'Initialize database Creation'
con = lite.connect(r'pomodoro.db')
cur = con.cursor()
cur.executescript("""
DROP TABLE IF EXISTS timer_details;
CREATE TABLE timer_details(uuid TEXT PRIMARY KEY, title TEXT,
start_time INTEGER, duration INTEGER,
shortbreak INTEGER, longbreak INTEGER,
cycle INTEGER, statusuuid TEXT, sounduuid TEXT);
INSERT INTO timer_details VALUES('12f63828-e21a-40c1-ab43-5f4dd5a5dd8a',
'presentn1', 1472004636, -9300, -10600, -10200, 1,
'0eaec4f3-c524-40ab-b295-2db5cb7a0770',
'510b9503-7899-4d69-83c0-690342daf271');
INSERT INTO timer_details VALUES('d57037fe-df12-4ca5-abff-1dd626cba2b5',
'presentn2', 1472015436, -9000, -10500, -9960, 2,
'37806757-4391-4c40-8cae-6bbfd71e893e',
'510b9503-7899-4d69-83c0-690342daf271');
INSERT INTO timer_details VALUES('8cb1795f-a50b-40a6-b2b7-6843602ad95c',
'exercise', 1472015536, -10200, -10560, -9600, 0,
'0eaec4f3-c524-40ab-b295-2db5cb7a0770',
'05797a63-51f5-4c1d-9068-215c593bba8d');
INSERT INTO timer_details VALUES('78d9d2bc-6fd3-4fad-94cc-b706aa91f57e',
'learning', 1472015636, -9900, -10500, -9900, 2,
'37806757-4391-4c40-8cae-6bbfd71e893e',
'510b9503-7899-4d69-83c0-690342daf271');
INSERT INTO timer_details VALUES('9bffb77d-569f-491e-8713-7bad9adfefa6',
'revision', 1472015736, -10500, -10440, -9900, 1,
'f82db8cc-a969-4495-bffd-bb0ce0ba877a',
'05797a63-51f5-4c1d-9068-215c593bba8d');
""")
con.commit()
print 'Database timer_details creation finished succes!!'
except lite.Error, e:
print 'Error %s ocurred : Database Creation failed!!!'%e.arg[0]
if __name__ == '__main__':
initialize_n_create_db()
| mit | -8,329,316,937,531,565,000 | 40.507463 | 85 | 0.588637 | false |
Nuevosmedios/ADL_LRS | lrs/util/Authorization.py | 1 | 4915 | import base64
from functools import wraps
from django.conf import settings
from django.contrib.auth import authenticate
from vendor.xapi.lrs.exceptions import Unauthorized, OauthUnauthorized, BadRequest
from vendor.xapi.lrs.models import Token, Agent
from vendor.xapi.oauth_provider.utils import send_oauth_error
from vendor.xapi.oauth_provider.consts import ACCEPTED
from django.contrib.auth.models import User
# A decorator, that can be used to authenticate some requests at the site.
def auth(func):
@wraps(func)
def inner(request, *args, **kwargs):
# Note: The cases involving OAUTH_ENABLED are here if OAUTH_ENABLED is switched from true to false
# after a client has performed the handshake. (Not likely to happen, but could)
auth_type = request['auth']['type']
# There is an http auth_type request and http auth is enabled
if auth_type == 'http' and settings.HTTP_AUTH_ENABLED:
http_auth_helper(request)
# There is an http auth_type request and http auth is not enabled
elif auth_type == 'http' and not settings.HTTP_AUTH_ENABLED:
raise BadRequest("HTTP authorization is not enabled. To enable, set the HTTP_AUTH_ENABLED flag to true in settings")
# There is an oauth auth_type request and oauth is enabled
elif auth_type == 'oauth' and settings.OAUTH_ENABLED:
oauth_helper(request)
# There is an oauth auth_type request and oauth is not enabled
elif auth_type == 'oauth' and not settings.OAUTH_ENABLED:
raise BadRequest("OAuth is not enabled. To enable, set the OAUTH_ENABLED flag to true in settings")
# There is no auth_type request and there is some sort of auth enabled
elif auth_type == 'none' and (settings.HTTP_AUTH_ENABLED or settings.OAUTH_ENABLED):
raise Unauthorized("Auth is enabled but no authentication was sent with the request.")
# There is no auth_type request and no auth is enabled
elif auth_type == 'none' and not (settings.HTTP_AUTH_ENABLED or settings.OAUTH_ENABLED):
request['auth'] = None
return func(request, *args, **kwargs)
return inner
def http_auth_helper(request):
if request['headers'].has_key('Authorization'):
auth = request['headers']['Authorization'].split()
if not request['is_authenticated']:
if len(auth) == 2:
if auth[0].lower() == 'basic':
# Currently, only basic http auth is used.
uname, passwd = base64.b64decode(auth[1]).split(':')
user = authenticate(username=uname, password=passwd)
if user:
# If the user successfully logged in, then add/overwrite
# the user object of this request.
request['auth']['id'] = user
else:
raise Unauthorized("Authorization failed, please verify your username and password")
else:
user = User.objects.get(username = request['logged_user'])
request['auth']['id'] = user
else:
# The username/password combo was incorrect, or not provided.
raise Unauthorized("Authorization header missing")
def oauth_helper(request):
consumer = request['auth']['oauth_consumer']
token = request['auth']['oauth_token']
# Make sure consumer has been accepted by system
if consumer.status != ACCEPTED:
raise OauthUnauthorized(send_oauth_error("%s has not been authorized" % str(consumer.name)))
# make sure the token is an approved access token
if token.token_type != Token.ACCESS or not token.is_approved:
raise OauthUnauthorized(send_oauth_error("The access token is not valid"))
user = token.user
user_name = user.username
if user.email.startswith('mailto:'):
user_email = user.email
else:
user_email = 'mailto:%s' % user.email
consumer = token.consumer
members = [
{
"account":{
"name":consumer.key,
"homePage":"lrs://XAPI/OAuth/token/"
},
"objectType": "Agent",
"oauth_identifier": "anonoauth:%s" % (consumer.key)
},
{
"name":user_name,
"mbox":user_email,
"objectType": "Agent"
}
]
kwargs = {"objectType":"Group", "member":members,"oauth_identifier": "anongroup:%s-%s" % (consumer.key, user_email)}
# create/get oauth group and set in dictionary
oauth_group, created = Agent.objects.oauth_group(**kwargs)
request['auth']['id'] = oauth_group
| apache-2.0 | -1,585,675,415,629,788,400 | 48.153061 | 128 | 0.59939 | false |
nibrahim/PlasTeX | setup.py | 1 | 2773 | #!/usr/bin/env python
from distutils.core import setup
templates = ['*.html','*.htm','*.xml','*.zpt','*.zpts']
images = ['*.gif','*.png','*.jpg','*.jpeg','*.js','*.htc']
styles = ['*.css']
setup(name="plasTeX",
description="LaTeX document processing framework",
version="0.9.3",
author="Kevin D. Smith",
author_email="[email protected]",
#url="",
packages = [
'plasTeX',
'plasTeX.Base',
'plasTeX.Base.LaTeX',
'plasTeX.Base.TeX',
'plasTeX.ConfigManager',
'plasTeX.DOM',
'plasTeX.Imagers',
'plasTeX.Packages',
'plasTeX.Renderers',
'plasTeX.Renderers.XHTML',
'plasTeX.Renderers.XHTML.Themes',
'plasTeX.Renderers.XHTML.Themes.default',
'plasTeX.Renderers.XHTML.Themes.default.icons',
'plasTeX.Renderers.XHTML.Themes.default.styles',
'plasTeX.Renderers.XHTML.Themes.python',
'plasTeX.Renderers.XHTML.Themes.python.icons',
'plasTeX.Renderers.XHTML.Themes.plain',
'plasTeX.Renderers.XHTML.Themes.minimal',
'plasTeX.Renderers.DocBook',
'plasTeX.Renderers.DocBook.Themes.default',
'plasTeX.Renderers.DocBook.Themes.book',
'plasTeX.Renderers.DocBook.Themes.article',
'plasTeX.Renderers.ManPage',
'plasTeX.Renderers.Text',
'plasTeX.Renderers.ZPT',
'plasTeX.Renderers.PageTemplate',
'plasTeX.Renderers.PageTemplate.simpletal',
'plasTeX.Renderers.S5',
'plasTeX.Renderers.S5.Themes',
'plasTeX.Renderers.S5.Themes.default',
'plasTeX.Renderers.S5.Themes.default.ui',
'plasTeX.Renderers.S5.Themes.default.ui.default',
],
package_data = {
'plasTeX': ['*.xml'],
'plasTeX.Base.LaTeX': ['*.xml','*.txt'],
'plasTeX.Renderers.DocBook': templates,
'plasTeX.Renderers.DocBook.Themes.default': templates,
'plasTeX.Renderers.DocBook.Themes.book': templates,
'plasTeX.Renderers.DocBook.Themes.article': templates,
'plasTeX.Renderers.XHTML': templates,
'plasTeX.Renderers.XHTML.Themes.default': templates,
'plasTeX.Renderers.XHTML.Themes.default.icons': images,
'plasTeX.Renderers.XHTML.Themes.default.styles': styles,
'plasTeX.Renderers.XHTML.Themes.python': templates+styles,
'plasTeX.Renderers.XHTML.Themes.python.icons': images,
'plasTeX.Renderers.XHTML.Themes.plain': templates,
'plasTeX.Renderers.S5': templates,
'plasTeX.Renderers.S5.Themes.default': templates,
'plasTeX.Renderers.S5.Themes.default.ui.default': templates+styles+images,
},
scripts=['plasTeX/plastex','plasTeX/Imagers/cgpdfpng'],
)
| mit | -4,225,879,163,444,563,500 | 39.779412 | 83 | 0.621349 | false |
ResearchSoftwareInstitute/greendatatranslator | src/greentranslator/python-client/test/test_default_api.py | 1 | 2301 | # coding: utf-8
"""
Environmental Exposures API
Environmental Exposures API
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.apis.default_api import DefaultApi
class TestDefaultApi(unittest.TestCase):
""" DefaultApi unit test stubs """
def setUp(self):
self.api = swagger_client.apis.default_api.DefaultApi()
def tearDown(self):
pass
def test_exposures_exposure_type_coordinates_get(self):
"""
Test case for exposures_exposure_type_coordinates_get
Get exposure location(s) as latitude, longitude coordinates
"""
pass
def test_exposures_exposure_type_dates_get(self):
"""
Test case for exposures_exposure_type_dates_get
Get exposure start date and end date range for exposure type
"""
pass
def test_exposures_exposure_type_scores_get(self):
"""
Test case for exposures_exposure_type_scores_get
Get exposure score for a given environmental factor at exposure location(s)
"""
pass
def test_exposures_exposure_type_values_get(self):
"""
Test case for exposures_exposure_type_values_get
Get exposure value for a given environmental factor at exposure location(s)
"""
pass
def test_exposures_get(self):
"""
Test case for exposures_get
Get list of exposure types
"""
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 2,802,689,798,953,937,000 | 25.448276 | 83 | 0.670143 | false |
dgraziotin/dycapo | server/models/__init__.py | 1 | 1083 | """
Copyright 2010 Daniel Graziotin <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from server.models.location import Location
from server.models.person import Person
from server.models.modality import Modality
from server.models.preferences import Preferences
from server.models.trip import Trip
from server.models.participation import Participation
from server.models.response import Response
from server.models.search import Search
__all__ = ['Location', 'Person', 'Preferences', 'Trip', 'Participation', 'Response', 'Modality', 'Search']
| apache-2.0 | 4,621,754,905,166,811,000 | 40.653846 | 106 | 0.77193 | false |
artemrizhov/django-mail-templated | docs/conf.py | 1 | 9652 | # -*- coding: utf-8 -*-
#
# Django Mail Templated documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 17 21:51:15 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('_ext'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinxcontrib.napoleon',
'djangodocs',
]
intersphinx_mapping = {
'django': ('http://django.readthedocs.org/en/stable', None),
'python': ('https://docs.python.org/3', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Mail Templated'
copyright = u'2016, Artem Rizhov'
author = u'Artem Rizhov'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2.4'
# The full version, including alpha/beta/rc tags.
release = u'2.4.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoMailTemplateddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DjangoMailTemplated.tex', u'Django Mail Templated Documentation',
u'Artem Rizhov', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'djangomailtemplated', u'Django Mail Templated Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DjangoMailTemplated', u'Django Mail Templated Documentation',
author, 'DjangoMailTemplated', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | -4,400,840,093,290,225,700 | 31.718644 | 83 | 0.706486 | false |
kaushik94/sympy | sympy/functions/__init__.py | 2 | 4965 | """A functions module, includes all the standard functions.
Combinatorial - factorial, fibonacci, harmonic, bernoulli...
Elementary - hyperbolic, trigonometric, exponential, floor and ceiling, sqrt...
Special - gamma, zeta,spherical harmonics...
"""
from sympy.functions.combinatorial.factorials import (factorial, factorial2,
rf, ff, binomial, RisingFactorial, FallingFactorial, subfactorial)
from sympy.functions.combinatorial.numbers import (carmichael, fibonacci, lucas, tribonacci,
harmonic, bernoulli, bell, euler, catalan, genocchi, partition)
from sympy.functions.elementary.miscellaneous import (sqrt, root, Min, Max,
Id, real_root, cbrt)
from sympy.functions.elementary.complexes import (re, im, sign, Abs,
conjugate, arg, polar_lift, periodic_argument, unbranched_argument,
principal_branch, transpose, adjoint, polarify, unpolarify)
from sympy.functions.elementary.trigonometric import (sin, cos, tan,
sec, csc, cot, sinc, asin, acos, atan, asec, acsc, acot, atan2)
from sympy.functions.elementary.exponential import (exp_polar, exp, log,
LambertW)
from sympy.functions.elementary.hyperbolic import (sinh, cosh, tanh, coth,
sech, csch, asinh, acosh, atanh, acoth, asech, acsch)
from sympy.functions.elementary.integers import floor, ceiling, frac
from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold
from sympy.functions.special.error_functions import (erf, erfc, erfi, erf2,
erfinv, erfcinv, erf2inv, Ei, expint, E1, li, Li, Si, Ci, Shi, Chi,
fresnels, fresnelc)
from sympy.functions.special.gamma_functions import (gamma, lowergamma,
uppergamma, polygamma, loggamma, digamma, trigamma, multigamma)
from sympy.functions.special.zeta_functions import (dirichlet_eta, zeta,
lerchphi, polylog, stieltjes)
from sympy.functions.special.tensor_functions import (Eijk, LeviCivita,
KroneckerDelta)
from sympy.functions.special.singularity_functions import SingularityFunction
from sympy.functions.special.delta_functions import DiracDelta, Heaviside
from sympy.functions.special.bsplines import bspline_basis, bspline_basis_set, interpolating_spline
from sympy.functions.special.bessel import (besselj, bessely, besseli, besselk,
hankel1, hankel2, jn, yn, jn_zeros, hn1, hn2, airyai, airybi, airyaiprime, airybiprime, marcumq)
from sympy.functions.special.hyper import hyper, meijerg, appellf1
from sympy.functions.special.polynomials import (legendre, assoc_legendre,
hermite, chebyshevt, chebyshevu, chebyshevu_root, chebyshevt_root,
laguerre, assoc_laguerre, gegenbauer, jacobi, jacobi_normalized)
from sympy.functions.special.spherical_harmonics import Ynm, Ynm_c, Znm
from sympy.functions.special.elliptic_integrals import (elliptic_k,
elliptic_f, elliptic_e, elliptic_pi)
from sympy.functions.special.beta_functions import beta
from sympy.functions.special.mathieu_functions import (mathieus, mathieuc,
mathieusprime, mathieucprime)
ln = log
__all__ = [
'factorial', 'factorial2', 'rf', 'ff', 'binomial', 'RisingFactorial',
'FallingFactorial', 'subfactorial',
'carmichael', 'fibonacci', 'lucas', 'tribonacci', 'harmonic', 'bernoulli',
'bell', 'euler', 'catalan', 'genocchi', 'partition',
'sqrt', 'root', 'Min', 'Max', 'Id', 'real_root', 'cbrt',
're', 'im', 'sign', 'Abs', 'conjugate', 'arg', 'polar_lift',
'periodic_argument', 'unbranched_argument', 'principal_branch',
'transpose', 'adjoint', 'polarify', 'unpolarify',
'sin', 'cos', 'tan', 'sec', 'csc', 'cot', 'sinc', 'asin', 'acos', 'atan',
'asec', 'acsc', 'acot', 'atan2',
'exp_polar', 'exp', 'ln', 'log', 'LambertW',
'sinh', 'cosh', 'tanh', 'coth', 'sech', 'csch', 'asinh', 'acosh', 'atanh',
'acoth', 'asech', 'acsch',
'floor', 'ceiling', 'frac',
'Piecewise', 'piecewise_fold',
'erf', 'erfc', 'erfi', 'erf2', 'erfinv', 'erfcinv', 'erf2inv', 'Ei',
'expint', 'E1', 'li', 'Li', 'Si', 'Ci', 'Shi', 'Chi', 'fresnels',
'fresnelc',
'gamma', 'lowergamma', 'uppergamma', 'polygamma', 'loggamma', 'digamma',
'trigamma', 'multigamma',
'dirichlet_eta', 'zeta', 'lerchphi', 'polylog', 'stieltjes',
'Eijk', 'LeviCivita', 'KroneckerDelta',
'SingularityFunction',
'DiracDelta', 'Heaviside',
'bspline_basis', 'bspline_basis_set', 'interpolating_spline',
'besselj', 'bessely', 'besseli', 'besselk', 'hankel1', 'hankel2', 'jn',
'yn', 'jn_zeros', 'hn1', 'hn2', 'airyai', 'airybi', 'airyaiprime',
'airybiprime', 'marcumq',
'hyper', 'meijerg', 'appellf1',
'legendre', 'assoc_legendre', 'hermite', 'chebyshevt', 'chebyshevu',
'chebyshevu_root', 'chebyshevt_root', 'laguerre', 'assoc_laguerre',
'gegenbauer', 'jacobi', 'jacobi_normalized',
'Ynm', 'Ynm_c', 'Znm',
'elliptic_k', 'elliptic_f', 'elliptic_e', 'elliptic_pi',
'beta',
'mathieus', 'mathieuc', 'mathieusprime', 'mathieucprime',
]
| bsd-3-clause | -5,652,737,146,578,908,000 | 44.136364 | 104 | 0.69003 | false |
gem/oq-engine | openquake/hazardlib/gsim/bindi_2011.py | 1 | 14198 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2021 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`BindiEtAl2011`.
"""
import numpy as np
from scipy.constants import g
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, PGV, SA
def _compute_distance(rup, dists, C):
"""
Compute the second term of the equation 1 described on paragraph 3:
``c1 + c2 * (M-Mref) * log(sqrt(Rjb ** 2 + h ** 2)/Rref) -
c3*(sqrt(Rjb ** 2 + h ** 2)-Rref)``
"""
mref = 5.0
rref = 1.0
rval = np.sqrt(dists.rjb ** 2 + C['h'] ** 2)
return (C['c1'] + C['c2'] * (rup.mag - mref)) *\
np.log10(rval / rref) - C['c3'] * (rval - rref)
def _compute_magnitude(rup, C):
"""
Compute the third term of the equation 1:
e1 + b1 * (M-Mh) + b2 * (M-Mh)**2 for M<=Mh
e1 + b3 * (M-Mh) otherwise
"""
m_h = 6.75
b_3 = 0.0
if rup.mag <= m_h:
return C["e1"] + (C['b1'] * (rup.mag - m_h)) +\
(C['b2'] * (rup.mag - m_h) ** 2)
else:
return C["e1"] + (b_3 * (rup.mag - m_h))
def _get_delta(coeffs, imt, mag):
# Get the coefficients needed to compute the delta used for scaling
tmp = coeffs['a']*mag**2. + coeffs['b']*mag + coeffs['c']
return tmp
def _get_fault_type_dummy_variables(rup):
"""
Fault type (Strike-slip, Normal, Thrust/reverse) is
derived from rake angle.
Rakes angles within 30 of horizontal are strike-slip,
angles from 30 to 150 are reverse, and angles from
-30 to -150 are normal.
Note that the 'Unspecified' case is not considered,
because rake is always given.
"""
U, SS, NS, RS = 0, 0, 0, 0
if np.abs(rup.rake) <= 30.0 or (180.0 - np.abs(rup.rake)) <= 30.0:
# strike-slip
SS = 1
elif rup.rake > 30.0 and rup.rake < 150.0:
# reverse
RS = 1
else:
# normal
NS = 1
return U, SS, NS, RS
def _get_mechanism(rup, C):
"""
Compute the fifth term of the equation 1 described on paragraph :
Get fault type dummy variables, see Table 1
"""
U, SS, NS, RS = _get_fault_type_dummy_variables(rup)
return C['f1'] * NS + C['f2'] * RS + C['f3'] * SS
def _get_site_amplification(sites, C):
"""
Compute the fourth term of the equation 1 described on paragraph :
The functional form Fs in Eq. (1) represents the site amplification and
it is given by FS = sj Cj , for j = 1,...,5, where sj are the
coefficients to be determined through the regression analysis,
while Cj are dummy variables used to denote the five different EC8
site classes
"""
ssa, ssb, ssc, ssd, sse = _get_site_type_dummy_variables(sites)
return (C['sA'] * ssa) + (C['sB'] * ssb) + (C['sC'] * ssc) + \
(C['sD'] * ssd) + (C['sE'] * sse)
def _get_site_type_dummy_variables(sites):
"""
Get site type dummy variables, five different EC8 site classes
he recording sites are classified into 5 classes,
based on the shear wave velocity intervals in the uppermost 30 m, Vs30,
according to the EC8 (CEN 2003):
class A: Vs30 > 800 m/s
class B: Vs30 = 360 − 800 m/s
class C: Vs30 = 180 - 360 m/s
class D: Vs30 < 180 m/s
class E: 5 to 20 m of C- or D-type alluvium underlain by
stiffer material with Vs30 > 800 m/s.
"""
ssa = np.zeros(len(sites.vs30))
ssb = np.zeros(len(sites.vs30))
ssc = np.zeros(len(sites.vs30))
ssd = np.zeros(len(sites.vs30))
sse = np.zeros(len(sites.vs30))
# Class E Vs30 = 0 m/s. We fixed this value to define class E
idx = (np.fabs(sites.vs30) < 1E-10)
sse[idx] = 1.0
# Class D; Vs30 < 180 m/s.
idx = (sites.vs30 >= 1E-10) & (sites.vs30 < 180.0)
ssd[idx] = 1.0
# SClass C; 180 m/s <= Vs30 <= 360 m/s.
idx = (sites.vs30 >= 180.0) & (sites.vs30 < 360.0)
ssc[idx] = 1.0
# Class B; 360 m/s <= Vs30 <= 800 m/s.
idx = (sites.vs30 >= 360.0) & (sites.vs30 < 800)
ssb[idx] = 1.0
# Class A; Vs30 > 800 m/s.
idx = (sites.vs30 >= 800.0)
ssa[idx] = 1.0
return ssa, ssb, ssc, ssd, sse
def _get_stddevs(C, stddev_types, num_sites):
"""
Return standard deviations as defined in table 1.
"""
stddevs = []
for stddev_type in stddev_types:
if stddev_type == const.StdDev.TOTAL:
stddevs.append(C['SigmaTot'] + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTRA_EVENT:
stddevs.append(C['SigmaW'] + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTER_EVENT:
stddevs.append(C['SigmaB'] + np.zeros(num_sites))
return stddevs
class BindiEtAl2011(GMPE):
"""
Implements GMPE developed by D.Bindi, F.Pacor, L.Luzi, R.Puglia,
M.Massa, G. Ameri, R. Paolucci and published as "Ground motion
prediction equations derived from the Italian strong motion data",
Bull Earthquake Eng, DOI 10.1007/s10518-011-9313-z.
SA are given up to 2 s.
The regressions are developed considering the geometrical mean of the
as-recorded horizontal components
"""
#: Supported tectonic region type is 'active shallow crust' because the
#: equations have been derived from data from Italian database ITACA, as
#: explained in the 'Introduction'.
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Set of :mod:`intensity measure types <openquake.hazardlib.imt>`
#: this GSIM can calculate. A set should contain classes from module
#: :mod:`openquake.hazardlib.imt`.
DEFINED_FOR_INTENSITY_MEASURE_TYPES = {PGA, PGV, SA}
#: Supported intensity measure component is the geometric mean of two
#: horizontal components
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
#: Supported standard deviation types are inter-event, intra-event
#: and total, page 1904
DEFINED_FOR_STANDARD_DEVIATION_TYPES = {
const.StdDev.TOTAL, const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT}
#: Required site parameter is only Vs30
REQUIRES_SITES_PARAMETERS = {'vs30'}
#: Required rupture parameters are magnitude and rake (eq. 1).
REQUIRES_RUPTURE_PARAMETERS = {'rake', 'mag'}
#: Required distance measure is RRup (eq. 1).
REQUIRES_DISTANCES = {'rjb'}
sgn = 0
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
imean = (_compute_magnitude(rup, C) +
_compute_distance(rup, dists, C) +
_get_site_amplification(sites, C) +
_get_mechanism(rup, C))
istddevs = _get_stddevs(C, stddev_types, len(sites.vs30))
# Convert units to g,
# but only for PGA and SA (not PGV):
if imt.string.startswith(('PGA', 'SA')):
mean = np.log((10.0 ** (imean - 2.0)) / g)
else:
# PGV:
mean = np.log(10.0 ** imean)
# Return stddevs in terms of natural log scaling
stddevs = np.log(10.0 ** np.array(istddevs))
# mean_LogNaturale = np.log((10 ** mean) * 1e-2 / g)
if self.sgn:
mean += self.sgn * _get_delta(self.COEFFS_DELTA[imt], imt, rup.mag)
return mean, stddevs
#: Coefficients from SA from Table 1
#: Coefficients from PGA e PGV from Table 5
COEFFS = CoeffsTable(sa_damping=5, table="""
IMT e1 c1 c2 h c3 b1 b2 sA sB sC sD sE f1 f2 f3 f4 SigmaB SigmaW SigmaTot
pgv 2.305 -1.5170 0.3260 7.879 0.000000 0.2360 -0.00686 0.0 0.2050 0.269 0.321 0.428 -0.0308 0.0754 -0.0446 0.0 0.194 0.270 0.332
pga 3.672 -1.9400 0.4130 10.322 0.000134 -0.2620 -0.07070 0.0 0.1620 0.240 0.105 0.570 -0.0503 0.1050 -0.0544 0.0 0.172 0.290 0.337
0.04 3.725 -1.9760 0.4220 9.445 0.000270 -0.3150 -0.07870 0.0 0.1610 0.240 0.060 0.614 -0.0442 0.1060 -0.0615 0.0 0.154 0.307 0.343
0.07 3.906 -2.0500 0.4460 9.810 0.000758 -0.3750 -0.07730 0.0 0.1540 0.235 0.057 0.536 -0.0454 0.1030 -0.0576 0.0 0.152 0.324 0.358
0.10 3.796 -1.7940 0.4150 9.500 0.002550 -0.2900 -0.06510 0.0 0.1780 0.247 0.037 0.599 -0.0656 0.1110 -0.0451 0.0 0.154 0.328 0.363
0.15 3.799 -1.5210 0.3200 9.163 0.003720 -0.0987 -0.05740 0.0 0.1740 0.240 0.148 0.740 -0.0755 0.1230 -0.0477 0.0 0.179 0.318 0.365
0.20 3.750 -1.3790 0.2800 8.502 0.003840 0.0094 -0.05170 0.0 0.1560 0.234 0.115 0.556 -0.0733 0.1060 -0.0328 0.0 0.209 0.320 0.382
0.25 3.699 -1.3400 0.2540 7.912 0.003260 0.0860 -0.04570 0.0 0.1820 0.245 0.154 0.414 -0.0568 0.1100 -0.0534 0.0 0.212 0.308 0.374
0.30 3.753 -1.4140 0.2550 8.215 0.002190 0.1240 -0.04350 0.0 0.2010 0.244 0.213 0.301 -0.0564 0.0877 -0.0313 0.0 0.218 0.290 0.363
0.35 3.600 -1.3200 0.2530 7.507 0.002320 0.1540 -0.04370 0.0 0.2200 0.257 0.243 0.235 -0.0523 0.0905 -0.0382 0.0 0.221 0.283 0.359
0.40 3.549 -1.2620 0.2330 6.760 0.002190 0.2250 -0.04060 0.0 0.2290 0.255 0.226 0.202 -0.0565 0.0927 -0.0363 0.0 0.210 0.279 0.349
0.45 3.550 -1.2610 0.2230 6.775 0.001760 0.2920 -0.03060 0.0 0.2260 0.271 0.237 0.181 -0.0597 0.0886 -0.0289 0.0 0.204 0.284 0.350
0.50 3.526 -1.1810 0.1840 5.992 0.001860 0.3840 -0.02500 0.0 0.2180 0.280 0.263 0.168 -0.0599 0.0850 -0.0252 0.0 0.203 0.283 0.349
0.60 3.561 -1.2300 0.1780 6.382 0.001140 0.4360 -0.02270 0.0 0.2190 0.296 0.355 0.142 -0.0559 0.0790 -0.0231 0.0 0.203 0.283 0.348
0.70 3.485 -1.1720 0.1540 5.574 0.000942 0.5290 -0.01850 0.0 0.2100 0.303 0.496 0.134 -0.0461 0.0896 -0.0435 0.0 0.212 0.283 0.354
0.80 3.325 -1.1150 0.1630 4.998 0.000909 0.5450 -0.02150 0.0 0.2100 0.304 0.621 0.150 -0.0457 0.0795 -0.0338 0.0 0.213 0.284 0.355
0.90 3.318 -1.1370 0.1540 5.231 0.000483 0.5630 -0.02630 0.0 0.2120 0.315 0.680 0.154 -0.0351 0.0715 -0.0364 0.0 0.214 0.286 0.357
1.00 3.264 -1.1140 0.1400 5.002 0.000254 0.5990 -0.02700 0.0 0.2210 0.332 0.707 0.152 -0.0298 0.0660 -0.0362 0.0 0.222 0.283 0.360
1.25 2.896 -0.9860 0.1730 4.340 0.000783 0.5790 -0.03360 0.0 0.2440 0.365 0.717 0.183 -0.0207 0.0614 -0.0407 0.0 0.227 0.290 0.368
1.50 2.675 -0.9600 0.1920 4.117 0.000802 0.5750 -0.03530 0.0 0.2510 0.375 0.667 0.203 -0.0140 0.0505 -0.0365 0.0 0.218 0.303 0.373
1.75 2.584 -1.0060 0.2050 4.505 0.000427 0.5740 -0.03710 0.0 0.2520 0.357 0.593 0.220 0.00154 0.0370 -0.0385 0.0 0.219 0.305 0.376
2.00 2.537 -1.0090 0.1930 4.373 0.000164 0.5970 -0.03670 0.0 0.2450 0.352 0.540 0.226 0.00512 0.0350 -0.0401 0.0 0.211 0.308 0.373
2.50 2.425 -1.0290 0.1790 4.484 -0.000348 0.6550 -0.02620 0.0 0.2440 0.336 0.460 0.229 0.00561 0.0275 -0.0331 0.0 0.212 0.309 0.375
2.75 2.331 -1.0430 0.1830 4.581 -0.000617 0.6780 -0.01820 0.0 0.2320 0.335 0.416 0.232 0.01350 0.0263 -0.0398 0.0 0.203 0.310 0.370
4.00 2.058 -1.0840 0.2000 4.876 -0.000843 0.6740 -0.00621 0.0 0.1950 0.300 0.350 0.230 0.02950 0.0255 -0.0550 0.0 0.197 0.300 0.359
""")
COEFFS_DELTA = CoeffsTable(sa_damping=5, table="""
imt a b c
pga 0.101 -1.136 3.555
pgv 0.066 -0.741 2.400
0.05 0.105 -1.190 3.691
0.1 0.112 -1.284 4.001
0.15 0.094 -1.033 3.177
0.2 0.085 -0.907 2.831
0.3 0.086 -0.927 2.869
0.4 0.088 -0.974 3.076
0.5 0.083 -0.916 2.933
0.75 0.073 -0.808 2.628
1.00 0.066 -0.736 2.420
2.00 0.041 -0.512 1.888
3.00 0.050 -0.616 2.193
4.00 0.076 -0.906 3.046
""")
class BindiEtAl2011Ita19Low(BindiEtAl2011):
"""
Implements the lower term of the ITA19 backbone model.
"""
sgn = -1
class BindiEtAl2011Ita19Upp(BindiEtAl2011):
"""
Implements the upper term of the ITA19 backbone model.
"""
sgn = +1
| agpl-3.0 | 1,384,862,206,094,783,200 | 46.006623 | 194 | 0.551564 | false |
yuichi-nadawaki/sakurakocity | sakurakocity/plugins/listen.py | 1 | 3366 | # -*- coding: utf-8 -*-
from slackbot.bot import listen_to
from slackbot.bot import respond_to
import random
from .dictionaries import *
import datetime
@listen_to('らこしてぃ|さく|らこすて')
def rakosute(message):
message.send(random.choice(['なんだ?', 'よんだ?']))
@listen_to('よしよし')
def yoshiyoshi(message):
message.send(random.choice(['よしよしまきゎ']))
@listen_to('ちゎ|ちわ|ちぁ|ちあ')
def chiwa(message):
message.send(random.choice(['ちゎ!']))
@listen_to('のゎ|まきゎ|まきわ|のわ|のゎ|ちゎしてぃ|のゎしてぃ|のゎたしてぃ')
def nowa(message):
message.send(random.choice(['ちゎしてぃ!']))
@listen_to('らふこふ')
def listen(message):
message.send('らこしてぃだぞ')
@listen_to('ありがと')
def thankyou(message):
message.react('まきちゎ')
@listen_to('user_info')
def user_info(message):
user = get_user(message)
message.send(str(user))
@listen_to('しごおわ')
def shigoowa(message):
user = get_user(message)
message.send(user_dict[user['name']] + 'おつかれさまきゎだぞ。:こちたまん:')
@listen_to('結婚して|けっこんして|marrige')
@respond_to('結婚して|けっこんして|marrige')
def marrige_count(message):
diff_d = diff_day(day_dict['marrige_day'], datetime.date.today())
message.send('結婚して' + str(diff_d) + u'日だぞ。')
print(diff_year(day_dict['marrige_day'], datetime.date.today()))
@listen_to('付き合って|つきあって|couple|カップル')
@respond_to('付き合って|つきあって|couple|カップル')
def couple_count(message):
diff_d = diff_day(day_dict['couple_day'], datetime.date.today())
message.send('付き合って' + str(diff_d) + u'日だぞ。')
@listen_to('何の日|なんのひ')
@respond_to('何の日|なんのひ')
def what_day(message):
today = datetime.date.today()
if today.month == 3 and today.day == 7:
message.send('記念日だぞ')
if today.month == 10 and today.day == 10:
message.send('プロポーズの日だぞ')
if today.month == 2 and today.day == 4:
message.send('結婚式の日だぞ')
if today.month == 1 and today.day == 1:
message.send('まきちゎの誕生日だぞ')
if today.month == 1 and today.day == 13:
message.send('ゆきちゎの誕生日だぞ')
else:
message.send('ん?')
@listen_to('anniv')
@respond_to('anniv')
def anniversary(message):
message.send(str(day_dict))
@listen_to('何日目')
@respond_to('何日目')
def day_count(message):
diff_couple = diff_day(day_dict['couple_day'], datetime.date.today())
diff_marrige = diff_day(day_dict['marrige_day'], datetime.date.today())
message.send('付き合って' + str(diff_couple + 1) + u'日目、結婚して' + str(diff_marrige + 1) + u'日目だぞ。')
def diff_day(d1: datetime.date, d2: datetime.date) -> int:
if d1 > d2:
d1, d2 = d2, d1
return (d2 - d1).days
def diff_month(d1: datetime.date, d2: datetime.date) -> int:
if d1 > d2:
d1, d2 = d2, d1
return (d2.year - d1.year) * 12 + d2.month - d1.month
def diff_year(d1: datetime.date, d2: datetime.date) -> float:
if d1 > d2:
d1, d2 = d2, d1
diff_m = (d2.year - d1.year) * 12 + d2.month - d1.month
return diff_m/12
| apache-2.0 | 2,115,304,670,349,832,200 | 28.061224 | 96 | 0.638343 | false |
hillwithsmallfields/qs | financial/finperiodic.py | 1 | 2706 | #!/usr/bin/python
# Program to detect periodic payments and spot gaps in them
import argparse
import csv
import datetime
import os
import qsutils
# See notes in finconv.py for config file format
secs_per_day = 24 * 60 * 60
def finperiodic_setup(app_data, input_format):
return ['payee'], {}
def finperiodic_row(timestamp, row, output_rows, scratch):
timestamp = datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S")
payee = row['payee']
amount = row.get('amount',
row.get('debits',
row.get('credits')))
if payee in scratch:
scratch[payee][timestamp] = amount
else:
scratch[payee] = {timestamp: amount}
def finperiodic_tidyup(columns, rows, scratch):
for payee, transactions in scratch.iteritems():
# print payee, transactions
dates = sorted(transactions.keys())
prev = dates[0]
intervals = []
for when in dates[1:]:
interval = int((when-prev).total_seconds() / secs_per_day)
if interval > 0: # ignore further transactions on the same day
intervals.append(interval)
prev = when
if len(intervals) > 1:
counts = {}
for interval in intervals:
counts[interval] = counts.get(interval, 0) + 1
print payee
for k in sorted(counts.keys()):
print " ", k, counts[k]
total = sum(counts)
approx_weekly = sum(counts[6:8]) / total
approx_monthly = sum(counts[26:34]) / total
print "approx_weekly", approx_weekly
print "approx_monthly", approx_monthly
return None, None
def main():
parser = qsutils.program_argparser()
parser.add_argument("input_files", nargs='*')
args = parser.parse_args()
config = qsutils.program_load_config(args)
parser.add_argument("-o", "--output")
parser.add_argument("-f", "--format",
default=None)
parser.add_argument("input_file")
args = parser.parse_args()
# todo: deduce format of input file; should normally be financisto, or have similar data
qsutils.process_fin_csv({'args': args,
'config': qsutils.load_config(
args.verbose,
None,
None,
qsutils.program_load_config(args),
*args.config or ())},
finperiodic_setup,
finperiodic_row,
finperiodic_tidyup)
if __name__ == "__main__":
main()
| gpl-3.0 | -6,405,568,724,214,841,000 | 32.825 | 92 | 0.538803 | false |
channprj/wiki.chann.kr-source | plugin/pelican-page-hierarchy/page_hierarchy.py | 1 | 3165 | from pelican import signals, contents
import os.path
from copy import copy
from itertools import chain
'''
This plugin creates a URL hierarchy for pages that matches the
directory hierarchy of their sources.
'''
class UnexpectedException(Exception): pass
def get_path(page, settings):
''' Return the dirname relative to PAGE_PATHS prefix. '''
path = os.path.split(page.get_relative_source_path())[0] + '/'
path = path.replace( os.path.sep, '/' )
# Try to lstrip the longest prefix first
for prefix in sorted(settings['PAGE_PATHS'], key=len, reverse=True):
if not prefix.endswith('/'): prefix += '/'
if path.startswith(prefix):
return path[len(prefix):-1]
raise UnexpectedException('Page outside of PAGE_PATHS ?!?')
def in_default_lang(page):
# page.in_default_lang property is undocumented (=unstable) interface
return page.lang == page.settings['DEFAULT_LANG']
def override_metadata(content_object):
if type(content_object) is not contents.Page:
return
page = content_object
path = get_path(page, page.settings)
def _override_value(page, key):
metadata = copy(page.metadata)
# We override the slug to include the path up to the filename
metadata['slug'] = os.path.join(path, page.slug)
# We have to account for non-default language and format either,
# e.g., PAGE_SAVE_AS or PAGE_LANG_SAVE_AS
infix = '' if in_default_lang(page) else 'LANG_'
return page.settings['PAGE_' + infix + key.upper()].format(**metadata)
for key in ('save_as', 'url'):
if not hasattr(page, 'override_' + key):
setattr(page, 'override_' + key, _override_value(page, key))
def set_relationships(generator):
def _all_pages():
return chain(generator.pages, generator.translations)
# initialize parents and children lists
for page in _all_pages():
page.parent = None
page.parents = []
page.children = []
# set immediate parents and children
for page in _all_pages():
# Parent of /a/b/ is /a/, parent of /a/b.html is /a/
parent_url = os.path.dirname(page.url[:-1])
if parent_url: parent_url += '/'
for page2 in _all_pages():
if page2.url == parent_url and page2 != page:
page.parent = page2
page2.children.append(page)
# If no parent found, try the parent of the default language page
if not page.parent and not in_default_lang(page):
for page2 in generator.pages:
if (page.slug == page2.slug and
os.path.dirname(page.source_path) ==
os.path.dirname(page2.source_path)):
# Only set the parent but not the children, obviously
page.parent = page2.parent
# set all parents (ancestors)
for page in _all_pages():
p = page
while p.parent:
page.parents.insert(0, p.parent)
p = p.parent
def register():
signals.content_object_init.connect(override_metadata)
signals.page_generator_finalized.connect(set_relationships)
| mit | -176,065,135,195,253,900 | 36.235294 | 78 | 0.624013 | false |
epinna/weevely3 | tests/test_file_read.py | 1 | 2238 | from tests.base_test import BaseTest
from testfixtures import log_capture
from tests import config
from core.sessions import SessionURL
from core import modules
from core import messages
import subprocess
import tempfile
import datetime
import logging
import os
def setUpModule():
subprocess.check_output("""
BASE_FOLDER="{config.base_folder}/test_file_read/"
rm -rf "$BASE_FOLDER"
mkdir -p "$BASE_FOLDER"
echo -n 'OK' > "$BASE_FOLDER/ok.test"
echo -n 'KO' > "$BASE_FOLDER/ko.test"
# Set ko.test to ---x--x--x 0111 execute, should be no readable
chmod 0111 "$BASE_FOLDER/ko.test"
""".format(
config = config
), shell=True)
class FileRead(BaseTest):
def setUp(self):
session = SessionURL(self.url, self.password, volatile = True)
modules.load_modules(session)
self.run_argv = modules.loaded['file_read'].run_argv
def test_read_php(self):
# Simple download
self.assertEqual(self.run_argv(['test_file_read/ok.test']), b'OK')
# Downoad binary. Skip check cause I don't know the remote content, and
# the md5 check is already done inside file_download.
self.assertTrue(self.run_argv(['/bin/ls']))
# Download of an unreadable file
self.assertEqual(self.run_argv(['test_file_read/ko.test']), None)
# Download of an remote unexistant file
self.assertEqual(self.run_argv(['bogus']), None)
def test_read_allvectors(self):
for vect in modules.loaded['file_download'].vectors.get_names():
self.assertEqual(self.run_argv(['-vector', vect, 'test_file_read/ok.test']), b'OK')
def test_read_sh(self):
# Simple download
self.assertEqual(self.run_argv(['-vector', 'base64', 'test_file_read/ok.test']), b'OK')
# Downoad binary. Skip check cause I don't know the remote content, and
# the md5 check is already done inside file_download.
self.assertTrue(self.run_argv(['-vector', 'base64', '/bin/ls']))
# Download of an unreadable file
self.assertEqual(self.run_argv(['-vector', 'base64', 'test_file_read/ko.test']), None)
# Download of an remote unexistant file
self.assertEqual(self.run_argv(['-vector', 'base64', 'bogus']), None)
| gpl-3.0 | 322,385,826,608,075,500 | 31.434783 | 95 | 0.663539 | false |
valdt/tallefjant | labbar/lab4/tictactoe_functions.py | 1 | 1496 | # -*- coding: utf-8 -*-
import random,sys,time
def isBoxBusy(gamePlan,row, col, EMPTY):
if gamePlan[row][col] == EMPTY:
return False
return True
def computerSelectABox(gamePlan,sign,EMPTY):
size = len(gamePlan)
print("\n---Datorns tur ("+str(sign)+")---")
row = random.randrange(0,size)
col = random.randrange(0,size)
while isBoxBusy(gamePlan, row, col,EMPTY):
row = random.randrange(0,size)
col = random.randrange(0,size)
print("Ange raden:",end = " ")
sys.stdout.flush()
time.sleep(0.6)
print(row)
print("Ange kolumnen:",end = " ")
sys.stdout.flush()
time.sleep(1)
print(col)
time.sleep(0.6)
return row,col
def count(spelplan,x,y, xr, yr, tecken):
if -1<x+xr<len(spelplan) and -1<y+yr<len(spelplan):
if spelplan[x+xr][y+yr] != tecken :
return 0
else:
return 1+count(spelplan,x+xr,y+yr,xr,yr,tecken)
else:
return 0
def lookForWinner(spelplan,x,y,VINRAD):
t=spelplan[x][y]
if (count(spelplan,x,y,1,0,t) + count(spelplan,x,y,-1,0,t)+1>=VINRAD):
return True
if (count(spelplan,x,y,0,1,t) + count(spelplan,x,y,0,-1,t)+1>=VINRAD):
return True
if (count(spelplan,x,y,1,1,t) + count(spelplan,x,y,-1,-1,t)+1>=VINRAD):
return True
if (count(spelplan,x,y,-1,1,t) + count(spelplan,x,y,1,-1,t)+1>=VINRAD):
return True
else: return False
if __name__=="__main__":
pass | gpl-3.0 | 6,601,901,868,171,264,000 | 27.788462 | 79 | 0.576872 | false |
praveenv253/polyproject | tests/test_polyproject.py | 1 | 1338 | #!/usr/bin/env python3
from __future__ import print_function, division
import numpy as np
from polyproject import polyproject
def test_diamond():
a = np.array([[2, 1],
[1, 2],
[2, 3],
[3, 2]])
point = np.array([0, 0])
from scipy.spatial import ConvexHull
c = ConvexHull(a)
x, _ = polyproject(point, polyhedron=c.equations)
assert np.allclose(x, np.array([1.5, 1.5]))
def test_square():
a = np.array([[1, 1],
[1, 2],
[2, 1],
[2, 2]])
point = np.array([0, 0])
x, _ = polyproject(point, vertices=a)
assert np.allclose(x, np.array([1, 1]))
def test_value_error():
import pytest
point = np.array([0, 0])
with pytest.raises(ValueError, message='Expecting ValueError'):
polyproject(point)
#def test_plot():
# import matplotlib.pyplot as plt
# a = np.array([[2, 1],
# [1, 2],
# [2, 3],
# [3, 2]])
# point = np.array([0, 0])
#
# from scipy.spatial import ConvexHull, convex_hull_plot_2d
# c = ConvexHull(a)
#
# x, _ = polyproject(point, polyhedron=c.equations)
#
# convex_hull_plot_2d(c, plt.gca())
# plt.plot([point[0], x[0]], [point[1], x[1]], 'C1x-')
#
# # TODO: What do we assert?
| mit | 2,402,585,770,296,189,000 | 21.3 | 67 | 0.516442 | false |
Schluucht/Destiny | destiny/test/test_api_call.py | 1 | 3437 | import destiny.settings as settings
from destiny.main.api_call import do_query, get_challenger, get_league_by_summoner, get_acount_id, get_matchlist, \
get_match, get_timeline, get_champion
import pytest
from destiny.main.destinyexception import DestinyApiCallException
@pytest.fixture
def id_summoner():
return 56947948
@pytest.fixture
def id_account():
return 209493252
@pytest.fixture
def id_match():
return 3181575441
def test_do_query():
"""
Tests `api_call.do_query` function.
Use the function against prepared urls and check that the returned results are not empty.
"""
urls = {
"timelines":
settings.REGION + "/lol/match/v3/timelines/by-match/3181575441?api_key=" + settings.API_KEY,
"matches":
settings.REGION + "/lol/match/v3/matches/3181575441?api_key=" + settings.API_KEY,
"summoners":
settings.REGION + "/lol/summoner/v3/summoners/56947948?api_key=" + settings.API_KEY,
"matchlist":
settings.REGION + "/lol/match/v3/matchlists/by-account/209493252/recent?api_key=" + settings.API_KEY
}
for _type, url in urls.items():
assert len(do_query(url)) > 0
with pytest.raises(DestinyApiCallException) as DE401:
url_401 = "https://euw1.api.riotgames.com//lol/unauthorized/"
do_query(url_401)
assert DE401.value.err_code == 401
with pytest.raises(DestinyApiCallException) as DE404:
url_404 = "https://euw1.api.riotgames.com//lol/match/v3/matches/31815751235441?api_key=" + settings.API_KEY
do_query(url_404)
assert DE404.value.err_code == 404
with pytest.raises(DestinyApiCallException) as DE403:
url_403 = "https://euw1.api.riotgames.com//lol/match/v3/matches/31815751235441?api_key=invalid"
do_query(url_403)
assert DE403.value.err_code == 403
def test_get_challenger():
"""
Tests `api_call.get_challenger()` function.
Tests if the returned dict contains something.
:return:
"""
assert len(get_challenger()) > 0
def test_get_league_by_summoner(id_summoner):
"""
API documentation: https://developer.riotgames.com/api-methods/#league-v3/GET_getAllLeaguesForSummoner
:param id_summoner:
:return:
"""
assert len(get_league_by_summoner(id_summoner)) > 0
def test_get_acount_id(id_summoner):
"""
API documentation: https://developer.riotgames.com/api-methods/#summoner-v3/GET_getBySummonerId
:param id_summoner:
:return:
"""
assert len(get_acount_id(id_summoner)) > 0
def test_get_matchlist(id_account):
"""
API documentation: https://developer.riotgames.com/api-methods/#match-v3/GET_getRecentMatchlist
:param id_account:
:return:
"""
assert len(get_matchlist(id_account)) > 0
def test_get_match(id_match):
"""
API documentation: https://developer.riotgames.com/api-methods/#match-v3/GET_getMatch
:param id_match:
:return:
"""
assert len(get_match(id_match)) > 0
def test_get_timeline(id_match):
"""
API documentation: https://developer.riotgames.com/api-methods/#match-v3/GET_getMatchTimeline
:param id_match:
:return:
"""
assert len(get_timeline(id_match)) > 0
def test_get_champion():
"""
API documentation: https://developer.riotgames.com/api-methods/#static-data-v3/GET_getChampionList
:return:
"""
assert len(get_champion()) > 0
| mit | -6,181,509,911,528,919,000 | 26.496 | 115 | 0.668606 | false |
DTOcean/dtocean-core | tests/test_data_definitions_cartesianlistdict.py | 1 | 5501 | import pytest
from aneris.control.factory import InterfaceFactory
from dtocean_core.core import (AutoFileInput,
AutoFileOutput,
AutoQuery,
Core)
from dtocean_core.data import CoreMetaData
from dtocean_core.data.definitions import (CartesianListDict,
CartesianListDictColumn)
def test_CartesianListDict_available():
new_core = Core()
all_objs = new_core.control._store._structures
assert "CartesianListDict" in all_objs.keys()
def test_CartesianListDict():
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test"})
test = CartesianListDict()
raw = {"a": [(0, 1), (1, 2)], "b": [(3, 4), (4, 5)]}
a = test.get_data(raw, meta)
b = test.get_value(a)
assert len(b) == 2
assert b["a"][0][0] == 0
assert b["a"][0][1] == 1
raw = {"a": [(0, 1, -1), (1, 2, -2)], "b": [(3, 4, -3), (4, 5, -5)]}
a = test.get_data(raw, meta)
b = test.get_value(a)
assert len(b) == 2
assert b["a"][0][0] == 0
assert b["a"][0][1] == 1
assert b["a"][0][2] == -1
raw = {"a": [(0, 1, -1, 1)]}
with pytest.raises(ValueError):
test.get_data(raw, meta)
def test_get_None():
test = CartesianListDict()
result = test.get_value(None)
assert result is None
@pytest.mark.parametrize("fext", [".csv", ".xls", ".xlsx"])
def test_CartesianListDict_auto_file(tmpdir, fext):
test_path = tmpdir.mkdir("sub").join("test{}".format(fext))
test_path_str = str(test_path)
raws = [{"a": [(0, 1), (1, 2)], "b": [(3, 4), (4, 5)]},
{"a": [(0, 1, -1), (1, 2, -2)], "b": [(3, 4, -3), (4, 5, -5)]}]
for raw in raws:
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test"})
test = CartesianListDict()
fout_factory = InterfaceFactory(AutoFileOutput)
FOutCls = fout_factory(meta, test)
fout = FOutCls()
fout._path = test_path_str
fout.data.result = test.get_data(raw, meta)
fout.connect()
assert len(tmpdir.listdir()) == 1
fin_factory = InterfaceFactory(AutoFileInput)
FInCls = fin_factory(meta, test)
fin = FInCls()
fin._path = test_path_str
fin.connect()
result = test.get_data(fin.data.result, meta)
assert len(result) == 2
assert result["a"][0][0] == 0
assert result["a"][0][1] == 1
def test_CartesianListDictColumn_available():
new_core = Core()
all_objs = new_core.control._store._structures
assert "CartesianListDictColumn" in all_objs.keys()
def test_CartesianListDictColumn_auto_db(mocker):
raws = [{"a": [(0, 1), (1, 2)], "b": [(3, 4), (4, 5)]},
{"a": [(0, 1, -1), (1, 2, -2)], "b": [(3, 4, -3), (4, 5, -5)]}]
for raw in raws:
mock_lists = [raw.keys(), raw.values()]
mocker.patch('dtocean_core.data.definitions.get_all_from_columns',
return_value=mock_lists,
autospec=True)
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"tables": ["mock.mock", "name", "position"]})
test = CartesianListDictColumn()
query_factory = InterfaceFactory(AutoQuery)
QueryCls = query_factory(meta, test)
query = QueryCls()
query.meta.result = meta
query.connect()
result = test.get_data(query.data.result, meta)
assert len(result) == 2
assert result["a"][0][0] == 0
assert result["a"][0][1] == 1
def test_CartesianListDictColumn_auto_db_empty(mocker):
mock_lists = [[], []]
mocker.patch('dtocean_core.data.definitions.get_all_from_columns',
return_value=mock_lists,
autospec=True)
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"tables": ["mock.mock", "position"]})
test = CartesianListDictColumn()
query_factory = InterfaceFactory(AutoQuery)
QueryCls = query_factory(meta, test)
query = QueryCls()
query.meta.result = meta
query.connect()
assert query.data.result is None
def test_CartesianListDictColumn_auto_db_none(mocker):
mock_lists = [[None, None], [None, None]]
mocker.patch('dtocean_core.data.definitions.get_all_from_columns',
return_value=mock_lists,
autospec=True)
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"tables": ["mock.mock", "position"]})
test = CartesianListDictColumn()
query_factory = InterfaceFactory(AutoQuery)
QueryCls = query_factory(meta, test)
query = QueryCls()
query.meta.result = meta
query.connect()
assert query.data.result is None
| gpl-3.0 | 2,841,235,507,680,374,300 | 27.35567 | 75 | 0.497728 | false |
jianwei1216/my-scripts | mytest/python/operator.py | 1 | 1290 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# 位运算符
print " & | ^ ~ << >>"
# 逻辑运算符
a = 10
b = 20
if (a and b):
print 'a and b = ', a and b
if (a or b):
print 'a or b = ', a or b
a = 0
if (not a):
print 'not a = ', not a
a = 21
b = 10
c = 0
# 赋值运算符
print "赋值运算符"
c = a + b
c += a
print "1 - c = ", c
c *= a
print "2 - c = ", c
c /= a
print "3 - c = ", c
c = 2
c %= c
print "4 - c = ", c
c **= a
print "5 - c = ", c
c //= a
print "6 - c = ", c
# 比较运算符
print "比较运算符"
if (a == b):
print "1 - a == b"
else:
print "1 - a != b"
if (a != b):
print "2 - a != b"
else:
print "2 - a == b"
if (a <> b):
print "3 - a != b"
else:
print "3 - a == b"
if (a < b):
print "4 - a < b"
else:
print "4 - a >= b"
if (a > b):
print "5 - a > b"
else:
print "5 - a <= b"
if (a >= b):
print "6 - a >= b"
else:
print "6 - a < b"
if (a <= b):
print "7 - a <= b"
else:
print "7 - a > b"
# + - * ** / // 运算符
print "+ - * / ** //"
c = a + b
print "1 - c = ", c # 31
c = a - b
print "2 - c = ", c # 11
c = a * b
print "3 - c = ", c # 210
c = a / b
print "4 - c = ", c # 2
a = 2
b = 3
c = a ** b
print "6 - c = ", c # 8
a = 10
b = 5
c = a // b
print "7 - c = ", c # 2
| gpl-2.0 | -8,295,208,516,471,838,000 | 10.566038 | 31 | 0.381729 | false |
vassilux/odin | pyodin/sys/tools/asterisk.py | 1 | 1268 | #
#
#
#
import sys
import os
import socket
import fcntl
import struct
import subprocess
#asterisk bin place
ASTERISK_BIN="/usr/sbin/asterisk"
def _run_asterisk_command(command):
pipe = subprocess.Popen(['/usr/sbin/asterisk', '-nrx', command], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = pipe.communicate()[0]
try:
pipe.terminate()
except:
pass
return result
def get_asterisk_version():
result = _run_asterisk_command("core show version")
version = result.split(" ")[1]
return version
def get_asterisk_times():
result = _run_asterisk_command("core show uptime")
uptime="0"
reloadtime = "0"
try:
uptime = result.split("\n")[0].split(":")[1]
reloadtime = result.split("\n")[1].split(":")[1]
except Exception, e:
pass
else:
pass
finally:
pass
info = {}
info['uptime'] = uptime
info['reloadtime'] = reloadtime
return info
def get_asterisk_active_channels():
pass
def get_asterisk_calls():
result = _run_asterisk_command("core show calls")
active="-1"
processed="-1"
try:
active = result.split("\n")[0].split(" ")[0]
processed = result.split("\n")[1].split(" ")[0]
except Exception, e:
pass
else:
pass
finally:
pass
info = {}
info['active'] = active
info['processed'] = processed
return info
| mit | -5,730,188,208,835,077,000 | 17.376812 | 113 | 0.671136 | false |
gnachman/iTerm2 | api/library/python/iterm2/iterm2/mainmenu.py | 1 | 23375 | """Defines interfaces for accessing menu items."""
import enum
import iterm2.api_pb2
import iterm2.rpc
import typing
class MenuItemException(Exception):
"""A problem was encountered while selecting a menu item."""
class MenuItemState:
"""Describes the current state of a menu item."""
def __init__(self, checked: bool, enabled: bool):
self.__checked = checked
self.__enabled = enabled
@property
def checked(self):
"""Is the menu item checked? A `bool` property."""
return self.__checked
@property
def enabled(self):
"""
Is the menu item enabled (i.e., it can be selected)? A `bool`
property.
"""
return self.__enabled
class MenuItemIdentifier:
def __init__(self, title, identifier):
self.__title = title
self.__identifier = identifier
@property
def title(self) -> str:
return self.__title
@property
def identifier(self) -> typing.Optional[str]:
return self.__identifier
def _encode(self):
# Encodes to a key binding parameter.
if self.__identifier is None:
return self.__title
return self.__title + "\n" + self.__identifier
class MainMenu:
"""Represents the app's main menu."""
@staticmethod
async def async_select_menu_item(connection, identifier: str):
"""Selects a menu item.
:param identifier: A string. See list of identifiers in :doc:`menu_ids`
:throws MenuItemException: if something goes wrong.
.. seealso:: Example ":ref:`zoom_on_screen_example`"
"""
response = await iterm2.rpc.async_menu_item(
connection, identifier, False)
status = response.menu_item_response.status
# pylint: disable=no-member
if status != iterm2.api_pb2.MenuItemResponse.Status.Value("OK"):
raise MenuItemException(
iterm2.api_pb2.MenuItemResponse.Status.Name(status))
@staticmethod
async def async_get_menu_item_state(
connection, identifier: str) -> MenuItemState:
"""Queries a menu item for its state.
:param identifier: A string. See list of identifiers in :doc:`menu_ids`
:throws MenuItemException: if something goes wrong.
"""
response = await iterm2.rpc.async_menu_item(
connection, identifier, True)
status = response.menu_item_response.status
# pylint: disable=no-member
if status != iterm2.api_pb2.MenuItemResponse.Status.Value("OK"):
raise MenuItemException(
iterm2.api_pb2.MenuItemResponse.Status.Name(status))
return iterm2.MenuItemState(
response.menu_item_response.checked,
response.menu_item_response.enabled)
class iTerm2(enum.Enum):
ABOUT_ITERM2 = MenuItemIdentifier("About iTerm2", "About iTerm2")
SHOW_TIP_OF_THE_DAY = MenuItemIdentifier("Show Tip of the Day", "Show Tip of the Day")
CHECK_FOR_UPDATES = MenuItemIdentifier("Check For Updates…", "Check For Updates…")
TOGGLE_DEBUG_LOGGING = MenuItemIdentifier("Toggle Debug Logging", "Toggle Debug Logging")
COPY_PERFORMANCE_STATS = MenuItemIdentifier("Copy Performance Stats", "Copy Performance Stats")
CAPTURE_GPU_FRAME = MenuItemIdentifier("Capture GPU Frame", "Capture Metal Frame")
PREFERENCES = MenuItemIdentifier("Preferences...", "Preferences...")
HIDE_ITERM2 = MenuItemIdentifier("Hide iTerm2", "Hide iTerm2")
HIDE_OTHERS = MenuItemIdentifier("Hide Others", "Hide Others")
SHOW_ALL = MenuItemIdentifier("Show All", "Show All")
SECURE_KEYBOARD_ENTRY = MenuItemIdentifier("Secure Keyboard Entry", "Secure Keyboard Entry")
MAKE_ITERM2_DEFAULT_TERM = MenuItemIdentifier("Make iTerm2 Default Term", "Make iTerm2 Default Term")
MAKE_TERMINAL_DEFAULT_TERM = MenuItemIdentifier("Make Terminal Default Term", "Make Terminal Default Term")
INSTALL_SHELL_INTEGRATION = MenuItemIdentifier("Install Shell Integration", "Install Shell Integration")
QUIT_ITERM2 = MenuItemIdentifier("Quit iTerm2", "Quit iTerm2")
class Shell(enum.Enum):
NEW_WINDOW = MenuItemIdentifier("New Window", "New Window")
NEW_WINDOW_WITH_CURRENT_PROFILE = MenuItemIdentifier("New Window with Current Profile", "New Window with Current Profile")
NEW_TAB = MenuItemIdentifier("New Tab", "New Tab")
NEW_TAB_WITH_CURRENT_PROFILE = MenuItemIdentifier("New Tab with Current Profile", "New Tab with Current Profile")
DUPLICATE_TAB = MenuItemIdentifier("Duplicate Tab", "Duplicate Tab")
SPLIT_HORIZONTALLY_WITH_CURRENT_PROFILE = MenuItemIdentifier("Split Horizontally with Current Profile", "Split Horizontally with Current Profile")
SPLIT_VERTICALLY_WITH_CURRENT_PROFILE = MenuItemIdentifier("Split Vertically with Current Profile", "Split Vertically with Current Profile")
SPLIT_HORIZONTALLY = MenuItemIdentifier("Split Horizontally…", "Split Horizontally…")
SPLIT_VERTICALLY = MenuItemIdentifier("Split Vertically…", "Split Vertically…")
SAVE_SELECTED_TEXT = MenuItemIdentifier("Save Selected Text…", "Save Selected Text…")
CLOSE = MenuItemIdentifier("Close", "Close")
CLOSE_TERMINAL_WINDOW = MenuItemIdentifier("Close Terminal Window", "Close Terminal Window")
CLOSE_ALL_PANES_IN_TAB = MenuItemIdentifier("Close All Panes in Tab", "Close All Panes in Tab")
UNDO_CLOSE = MenuItemIdentifier("Undo Close", "Undo Close")
class BroadcastInput(enum.Enum):
SEND_INPUT_TO_CURRENT_SESSION_ONLY = MenuItemIdentifier("Send Input to Current Session Only", "Broadcast Input.Send Input to Current Session Only")
BROADCAST_INPUT_TO_ALL_PANES_IN_ALL_TABS = MenuItemIdentifier("Broadcast Input to All Panes in All Tabs", "Broadcast Input.Broadcast Input to All Panes in All Tabs")
BROADCAST_INPUT_TO_ALL_PANES_IN_CURRENT_TAB = MenuItemIdentifier("Broadcast Input to All Panes in Current Tab", "Broadcast Input.Broadcast Input to All Panes in Current Tab")
TOGGLE_BROADCAST_INPUT_TO_CURRENT_SESSION = MenuItemIdentifier("Toggle Broadcast Input to Current Session", "Broadcast Input.Toggle Broadcast Input to Current Session")
SHOW_BACKGROUND_PATTERN_INDICATOR = MenuItemIdentifier("Show Background Pattern Indicator", "Broadcast Input.Show Background Pattern Indicator")
class tmux(enum.Enum):
DETACH = MenuItemIdentifier("Detach", "tmux.Detach")
FORCE_DETACH = MenuItemIdentifier("Force Detach", "tmux.Force Detach")
NEW_TMUX_WINDOW = MenuItemIdentifier("New Tmux Window", "tmux.New Tmux Window")
NEW_TMUX_TAB = MenuItemIdentifier("New Tmux Tab", "tmux.New Tmux Tab")
PAUSE_PANE = MenuItemIdentifier("Pause Pane", "trmux.Pause Pane")
DASHBOARD = MenuItemIdentifier("Dashboard", "tmux.Dashboard")
PAGE_SETUP = MenuItemIdentifier("Page Setup...", "Page Setup...")
class Print(enum.Enum):
SCREEN = MenuItemIdentifier("Screen", "Print.Screen")
SELECTION = MenuItemIdentifier("Selection", "Print.Selection")
BUFFER = MenuItemIdentifier("Buffer", "Print.Buffer")
class Edit(enum.Enum):
UNDO = MenuItemIdentifier("Undo", "Undo")
REDO = MenuItemIdentifier("Redo", "Redo")
CUT = MenuItemIdentifier("Cut", "Cut")
COPY = MenuItemIdentifier("Copy", "Copy")
COPY_WITH_STYLES = MenuItemIdentifier("Copy with Styles", "Copy with Styles")
COPY_WITH_CONTROL_SEQUENCES = MenuItemIdentifier("Copy with Control Sequences", "Copy with Control Sequences")
COPY_MODE = MenuItemIdentifier("Copy Mode", "Copy Mode")
PASTE = MenuItemIdentifier("Paste", "Paste")
class PasteSpecial(enum.Enum):
ADVANCED_PASTE = MenuItemIdentifier("Advanced Paste…", "Paste Special.Advanced Paste…")
PASTE_SELECTION = MenuItemIdentifier("Paste Selection", "Paste Special.Paste Selection")
PASTE_FILE_BASE64ENCODED = MenuItemIdentifier("Paste File Base64-Encoded", "Paste Special.Paste File Base64-Encoded")
PASTE_SLOWLY = MenuItemIdentifier("Paste Slowly", "Paste Special.Paste Slowly")
PASTE_FASTER = MenuItemIdentifier("Paste Faster", "Paste Special.Paste Faster")
PASTE_SLOWLY_FASTER = MenuItemIdentifier("Paste Slowly Faster", "Paste Special.Paste Slowly Faster")
PASTE_SLOWER = MenuItemIdentifier("Paste Slower", "Paste Special.Paste Slower")
PASTE_SLOWLY_SLOWER = MenuItemIdentifier("Paste Slowly Slower", "Paste Special.Paste Slowly Slower")
WARN_BEFORE_MULTILINE_PASTE = MenuItemIdentifier("Warn Before Multi-Line Paste", "Paste Special.Warn Before Multi-Line Paste")
PROMPT_TO_CONVERT_TABS_TO_SPACES_WHEN_PASTING = MenuItemIdentifier("Prompt to Convert Tabs to Spaces when Pasting", "Paste Special.Prompt to Convert Tabs to Spaces when Pasting")
LIMIT_MULTILINE_PASTE_WARNING_TO_SHELL_PROMPT = MenuItemIdentifier("Limit Multi-Line Paste Warning to Shell Prompt", "Paste Special.Limit Multi-Line Paste Warning to Shell Prompt")
WARN_BEFORE_PASTING_ONE_LINE_ENDING_IN_A_NEWLINE_AT_SHELL_PROMPT = MenuItemIdentifier("Warn Before Pasting One Line Ending in a Newline at Shell Prompt", "Paste Special.Warn Before Pasting One Line Ending in a Newline at Shell Prompt")
OPEN_SELECTION = MenuItemIdentifier("Open Selection", "Open Selection")
JUMP_TO_SELECTION = MenuItemIdentifier("Jump to Selection", "Find.Jump to Selection")
SELECT_ALL = MenuItemIdentifier("Select All", "Select All")
SELECTION_RESPECTS_SOFT_BOUNDARIES = MenuItemIdentifier("Selection Respects Soft Boundaries", "Selection Respects Soft Boundaries")
SELECT_OUTPUT_OF_LAST_COMMAND = MenuItemIdentifier("Select Output of Last Command", "Select Output of Last Command")
SELECT_CURRENT_COMMAND = MenuItemIdentifier("Select Current Command", "Select Current Command")
class Find(enum.Enum):
FIND = MenuItemIdentifier("Find...", "Find.Find...")
FIND_NEXT = MenuItemIdentifier("Find Next", "Find.Find Next")
FIND_PREVIOUS = MenuItemIdentifier("Find Previous", "Find.Find Previous")
USE_SELECTION_FOR_FIND = MenuItemIdentifier("Use Selection for Find", "Find.Use Selection for Find")
FIND_GLOBALLY = MenuItemIdentifier("Find Globally...", "Find.Find Globally...")
FIND_URLS = MenuItemIdentifier("Find URLs", "Find.Find URLs")
class MarksandAnnotations(enum.Enum):
SET_MARK = MenuItemIdentifier("Set Mark", "Marks and Annotations.Set Mark")
JUMP_TO_MARK = MenuItemIdentifier("Jump to Mark", "Marks and Annotations.Jump to Mark")
NEXT_MARK = MenuItemIdentifier("Next Mark", "Marks and Annotations.Next Mark")
PREVIOUS_MARK = MenuItemIdentifier("Previous Mark", "Marks and Annotations.Previous Mark")
ADD_ANNOTATION_AT_CURSOR = MenuItemIdentifier("Add Annotation at Cursor", "Marks and Annotations.Add Annotation at Cursor")
NEXT_ANNOTATION = MenuItemIdentifier("Next Annotation", "Marks and Annotations.Next Annotation")
PREVIOUS_ANNOTATION = MenuItemIdentifier("Previous Annotation", "Marks and Annotations.Previous Annotation")
class Alerts(enum.Enum):
ALERT_ON_NEXT_MARK = MenuItemIdentifier("Alert on Next Mark", "Marks and Annotations.Alerts.Alert on Next Mark")
SHOW_MODAL_ALERT_BOX = MenuItemIdentifier("Show Modal Alert Box", "Marks and Annotations.Alerts.Show Modal Alert Box")
POST_NOTIFICATION = MenuItemIdentifier("Post Notification", "Marks and Annotations.Alerts.Post Notification")
CLEAR_BUFFER = MenuItemIdentifier("Clear Buffer", "Clear Buffer")
CLEAR_SCROLLBACK_BUFFER = MenuItemIdentifier("Clear Scrollback Buffer", "Clear Scrollback Buffer")
CLEAR_TO_START_OF_SELECTION = MenuItemIdentifier("Clear to Start of Selection", "Clear to Start of Selection")
CLEAR_TO_LAST_MARK = MenuItemIdentifier("Clear to Last Mark", "Clear to Last Mark")
class View(enum.Enum):
SHOW_TABS_IN_FULLSCREEN = MenuItemIdentifier("Show Tabs in Fullscreen", "Show Tabs in Fullscreen")
TOGGLE_FULL_SCREEN = MenuItemIdentifier("Toggle Full Screen", "Toggle Full Screen")
USE_TRANSPARENCY = MenuItemIdentifier("Use Transparency", "Use Transparency")
ZOOM_IN_ON_SELECTION = MenuItemIdentifier("Zoom In on Selection", "Zoom In on Selection")
ZOOM_OUT = MenuItemIdentifier("Zoom Out", "Zoom Out")
FIND_CURSOR = MenuItemIdentifier("Find Cursor", "Find Cursor")
SHOW_CURSOR_GUIDE = MenuItemIdentifier("Show Cursor Guide", "Show Cursor Guide")
SHOW_TIMESTAMPS = MenuItemIdentifier("Show Timestamps", "Show Timestamps")
SHOW_ANNOTATIONS = MenuItemIdentifier("Show Annotations", "Show Annotations")
AUTO_COMMAND_COMPLETION = MenuItemIdentifier("Auto Command Completion", "Auto Command Completion")
COMPOSER = MenuItemIdentifier("Composer", "Composer")
OPEN_QUICKLY = MenuItemIdentifier("Open Quickly", "Open Quickly")
MAXIMIZE_ACTIVE_PANE = MenuItemIdentifier("Maximize Active Pane", "Maximize Active Pane")
MAKE_TEXT_BIGGER = MenuItemIdentifier("Make Text Bigger", "Make Text Bigger")
MAKE_TEXT_NORMAL_SIZE = MenuItemIdentifier("Make Text Normal Size", "Make Text Normal Size")
RESTORE_TEXT_AND_SESSION_SIZE = MenuItemIdentifier("Restore Text and Session Size", "Restore Text and Session Size")
MAKE_TEXT_SMALLER = MenuItemIdentifier("Make Text Smaller", "Make Text Smaller")
SIZE_CHANGES_UPDATE_PROFILE = MenuItemIdentifier("Size Changes Update Profile", "Size Changes Update Profile")
START_INSTANT_REPLAY = MenuItemIdentifier("Start Instant Replay", "Start Instant Replay")
class Session(enum.Enum):
EDIT_SESSION = MenuItemIdentifier("Edit Session…", "Edit Session…")
RUN_COPROCESS = MenuItemIdentifier("Run Coprocess…", "Run Coprocess…")
STOP_COPROCESS = MenuItemIdentifier("Stop Coprocess", "Stop Coprocess")
RESTART_SESSION = MenuItemIdentifier("Restart Session", "Restart Session")
OPEN_AUTOCOMPLETE = MenuItemIdentifier("Open Autocomplete…", "Open Autocomplete…")
OPEN_COMMAND_HISTORY = MenuItemIdentifier("Open Command History…", "Open Command History…")
OPEN_RECENT_DIRECTORIES = MenuItemIdentifier("Open Recent Directories…", "Open Recent Directories…")
OPEN_PASTE_HISTORY = MenuItemIdentifier("Open Paste History…", "Open Paste History…")
class Triggers(enum.Enum):
ADD_TRIGGER = MenuItemIdentifier("Add Trigger…", "Add Trigger")
EDIT_TRIGGERS = MenuItemIdentifier("Edit Triggers", "Edit Triggers")
ENABLE_TRIGGERS_IN_INTERACTIVE_APPS = MenuItemIdentifier("Enable Triggers in Interactive Apps", "Enable Triggers in Interactive Apps")
ENABLE_ALL = MenuItemIdentifier("Enable All", "Triggers.Enable All")
DISABLE_ALL = MenuItemIdentifier("Disable All", "Triggers.Disable All")
RESET = MenuItemIdentifier("Reset", "Reset")
RESET_CHARACTER_SET = MenuItemIdentifier("Reset Character Set", "Reset Character Set")
class Log(enum.Enum):
LOG_TO_FILE = MenuItemIdentifier("Log to File", "Log.Toggle")
IMPORT_RECORDING = MenuItemIdentifier("Import Recording", "Log.ImportRecording")
EXPORT_RECORDING = MenuItemIdentifier("Export Recording", "Log.ExportRecording")
SAVE_CONTENTS = MenuItemIdentifier("Save Contents…", "Log.SaveContents")
class TerminalState(enum.Enum):
ALTERNATE_SCREEN = MenuItemIdentifier("Alternate Screen", "Alternate Screen")
FOCUS_REPORTING = MenuItemIdentifier("Focus Reporting", "Focus Reporting")
MOUSE_REPORTING = MenuItemIdentifier("Mouse Reporting", "Mouse Reporting")
PASTE_BRACKETING = MenuItemIdentifier("Paste Bracketing", "Paste Bracketing")
APPLICATION_CURSOR = MenuItemIdentifier("Application Cursor", "Application Cursor")
APPLICATION_KEYPAD = MenuItemIdentifier("Application Keypad", "Application Keypad")
STANDARD_KEY_REPORTING_MODE = MenuItemIdentifier("Standard Key Reporting Mode", "Terminal State.Standard Key Reporting")
MODIFYOTHERKEYS_MODE_1 = MenuItemIdentifier("modifyOtherKeys Mode 1", "Terminal State.Report Modifiers like xterm 1")
MODIFYOTHERKEYS_MODE_2 = MenuItemIdentifier("modifyOtherKeys Mode 2", "Terminal State.Report Modifiers like xterm 2")
CSI_U_MODE = MenuItemIdentifier("CSI u Mode", "Terminal State.Report Modifiers with CSI u")
RAW_KEY_REPORTING_MODE = MenuItemIdentifier("Raw Key Reporting Mode", "Terminal State.Raw Key Reporting")
RESET = MenuItemIdentifier("Reset", "Reset Terminal State")
BURY_SESSION = MenuItemIdentifier("Bury Session", "Bury Session")
class Scripts(enum.Enum):
class Manage(enum.Enum):
NEW_PYTHON_SCRIPT = MenuItemIdentifier("New Python Script", "New Python Script")
OPEN_PYTHON_REPL = MenuItemIdentifier("Open Python REPL", "Open Interactive Window")
MANAGE_DEPENDENCIES = MenuItemIdentifier("Manage Dependencies…", "Manage Dependencies")
INSTALL_PYTHON_RUNTIME = MenuItemIdentifier("Install Python Runtime", "Install Python Runtime")
REVEAL_SCRIPTS_IN_FINDER = MenuItemIdentifier("Reveal Scripts in Finder", "Reveal in Finder")
IMPORT = MenuItemIdentifier("Import…", "Import Script")
EXPORT = MenuItemIdentifier("Export…", "Export Script")
CONSOLE = MenuItemIdentifier("Console", "Script Console")
class Profiles(enum.Enum):
OPEN_PROFILES = MenuItemIdentifier("Open Profiles…", "Open Profiles…")
PRESS_OPTION_FOR_NEW_WINDOW = MenuItemIdentifier("Press Option for New Window", "Press Option for New Window")
OPEN_IN_NEW_WINDOW = MenuItemIdentifier("Open In New Window", "Open In New Window")
class Toolbelt(enum.Enum):
SHOW_TOOLBELT = MenuItemIdentifier("Show Toolbelt", "Show Toolbelt")
SET_DEFAULT_WIDTH = MenuItemIdentifier("Set Default Width", "Set Default Width")
class Window(enum.Enum):
MINIMIZE = MenuItemIdentifier("Minimize", "Minimize")
ZOOM = MenuItemIdentifier("Zoom", "Zoom")
EDIT_TAB_TITLE = MenuItemIdentifier("Edit Tab Title", "Edit Tab Title")
EDIT_WINDOW_TITLE = MenuItemIdentifier("Edit Window Title", "Edit Window Title")
class WindowStyle(enum.Enum):
NORMAL = MenuItemIdentifier("Normal", "Window Style.Normal")
FULL_SCREEN = MenuItemIdentifier("Full Screen", "Window Style.Full Screen")
MAXIMIZED = MenuItemIdentifier("Maximized", "Window Style.Maximized")
NO_TITLE_BAR = MenuItemIdentifier("No Title Bar", "Window Style.No Title Bar")
FULLWIDTH_BOTTOM_OF_SCREEN = MenuItemIdentifier("Full-Width Bottom of Screen", "Window Style.FullWidth Bottom of Screen")
FULLWIDTH_TOP_OF_SCREEN = MenuItemIdentifier("Full-Width Top of Screen", "Window Style.FullWidth Top of Screen")
FULLHEIGHT_LEFT_OF_SCREEN = MenuItemIdentifier("Full-Height Left of Screen", "Window Style..FullHeight Left of Screen")
FULLHEIGHT_RIGHT_OF_SCREEN = MenuItemIdentifier("Full-Height Right of Screen", "Window Style.FullHeight Right of Screen")
BOTTOM_OF_SCREEN = MenuItemIdentifier("Bottom of Screen", "Window Style.Bottom of Screen")
TOP_OF_SCREEN = MenuItemIdentifier("Top of Screen", "Window Style.Top of Screen")
LEFT_OF_SCREEN = MenuItemIdentifier("Left of Screen", "Window Style.Left of Screen")
RIGHT_OF_SCREEN = MenuItemIdentifier("Right of Screen", "Window Style.Right of Screen")
MERGE_ALL_WINDOWS = MenuItemIdentifier("Merge All Windows", "Merge All Windows")
ARRANGE_WINDOWS_HORIZONTALLY = MenuItemIdentifier("Arrange Windows Horizontally", "Arrange Windows Horizontally")
ARRANGE_SPLIT_PANES_EVENLY = MenuItemIdentifier("Arrange Split Panes Evenly", "Arrange Split Panes Evenly")
MOVE_SESSION_TO_WINDOW = MenuItemIdentifier("Move Session to Window", "Move Session to Window")
SAVE_WINDOW_ARRANGEMENT = MenuItemIdentifier("Save Window Arrangement", "Save Window Arrangement")
SAVE_CURRENT_WINDOW_AS_ARRANGEMENT = MenuItemIdentifier("Save Current Window as Arrangement", "Save Current Window as Arrangement")
class SelectSplitPane(enum.Enum):
SELECT_PANE_ABOVE = MenuItemIdentifier("Select Pane Above", "Select Split Pane.Select Pane Above")
SELECT_PANE_BELOW = MenuItemIdentifier("Select Pane Below", "Select Split Pane.Select Pane Below")
SELECT_PANE_LEFT = MenuItemIdentifier("Select Pane Left", "Select Split Pane.Select Pane Left")
SELECT_PANE_RIGHT = MenuItemIdentifier("Select Pane Right", "Select Split Pane.Select Pane Right")
NEXT_PANE = MenuItemIdentifier("Next Pane", "Select Split Pane.Next Pane")
PREVIOUS_PANE = MenuItemIdentifier("Previous Pane", "Select Split Pane.Previous Pane")
class ResizeSplitPane(enum.Enum):
MOVE_DIVIDER_UP = MenuItemIdentifier("Move Divider Up", "Resize Split Pane.Move Divider Up")
MOVE_DIVIDER_DOWN = MenuItemIdentifier("Move Divider Down", "Resize Split Pane.Move Divider Down")
MOVE_DIVIDER_LEFT = MenuItemIdentifier("Move Divider Left", "Resize Split Pane.Move Divider Left")
MOVE_DIVIDER_RIGHT = MenuItemIdentifier("Move Divider Right", "Resize Split Pane.Move Divider Right")
class ResizeWindow(enum.Enum):
DECREASE_HEIGHT = MenuItemIdentifier("Decrease Height", "Resize Window.Decrease Height")
INCREASE_HEIGHT = MenuItemIdentifier("Increase Height", "Resize Window.Increase Height")
DECREASE_WIDTH = MenuItemIdentifier("Decrease Width", "Resize Window.Decrease Width")
INCREASE_WIDTH = MenuItemIdentifier("Increase Width", "Resize Window.Increase Width")
SELECT_NEXT_TAB = MenuItemIdentifier("Select Next Tab", "Select Next Tab")
SELECT_PREVIOUS_TAB = MenuItemIdentifier("Select Previous Tab", "Select Previous Tab")
MOVE_TAB_LEFT = MenuItemIdentifier("Move Tab Left", "Move Tab Left")
MOVE_TAB_RIGHT = MenuItemIdentifier("Move Tab Right", "Move Tab Right")
PASSWORD_MANAGER = MenuItemIdentifier("Password Manager", "Password Manager")
PIN_HOTKEY_WINDOW = MenuItemIdentifier("Pin Hotkey Window", "Pin Hotkey Window")
BRING_ALL_TO_FRONT = MenuItemIdentifier("Bring All To Front", "Bring All To Front")
class Help(enum.Enum):
ITERM2_HELP = MenuItemIdentifier("iTerm2 Help", "iTerm2 Help")
COPY_MODE_SHORTCUTS = MenuItemIdentifier("Copy Mode Shortcuts", "Copy Mode Shortcuts")
OPEN_SOURCE_LICENSES = MenuItemIdentifier("Open Source Licenses", "Open Source Licenses")
GPU_RENDERER_AVAILABILITY = MenuItemIdentifier("GPU Renderer Availability", "GPU Renderer Availability")
| gpl-2.0 | -4,293,014,743,592,645,000 | 64.867232 | 247 | 0.692799 | false |
hoxmark/TDT4501-Specialization-Project | reinforcement/datasets/digit/model.py | 1 | 4930 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from pprint import pprint
import time
from utils import pairwise_distances, batchify
from config import opt, data, loaders
class SimpleClassifier(nn.Module):
def __init__(self):
super(SimpleClassifier, self).__init__()
self.input_size = 64
# TODO params
self.hidden_size = 256
self.output_size = 10
self.relu = nn.ReLU()
self.fc1 = nn.Linear(self.input_size, self.hidden_size)
self.fc3 = nn.Linear(self.hidden_size, self.output_size)
self.reset()
if opt.cuda:
self.cuda()
def reset(self):
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc3.weight)
def forward(self, inp):
if opt.cuda:
inp = inp.cuda()
output = self.fc1(inp)
output = self.relu(output)
output = self.fc3(output)
return output
def train_model(self, train_data, epochs):
optimizer = optim.Adadelta(self.parameters(), 0.1)
criterion = nn.CrossEntropyLoss()
self.train()
size = len(train_data[0])
if size > 0:
for e in range(epochs):
avg_loss = 0
corrects = 0
for i, (features, targets) in enumerate(batchify(train_data)):
features = torch.FloatTensor(features)
targets = torch.LongTensor(targets)
if opt.cuda:
features, targets = features.cuda(), targets.cuda()
output = self.forward(features)
optimizer.zero_grad()
loss = criterion(output, targets)
loss.backward()
optimizer.step()
avg_loss += loss.item()
corrects += (torch.max(output, 1)
[1].view(targets.size()) == targets).sum()
avg_loss = avg_loss / opt.batch_size
accuracy = 100.0 * corrects / size
def predict_prob(self, inp):
with torch.no_grad():
output = self.forward(inp)
output = torch.nn.functional.softmax(output, dim=1)
return output
def validate(self, data):
corrects, avg_loss = 0, 0
with torch.no_grad():
for i, (features, targets) in enumerate(batchify(data)):
features = torch.FloatTensor(features)
targets = torch.LongTensor(targets)
if opt.cuda:
features = features.cuda()
targets = targets.cuda()
logit = self.forward(features)
loss = torch.nn.functional.cross_entropy(logit, targets, size_average=False)
avg_loss += loss.item()
corrects += (torch.max(logit, 1)[1].view(targets.size()) == targets).sum()
size = len(data[0])
avg_loss = avg_loss / size
accuracy = 100.0 * float(corrects) / float(size)
metrics = {
'accuracy': accuracy,
'avg_loss': avg_loss,
'performance': accuracy
}
return metrics
def performance_validate(self, data):
return self.validate(data)
def get_state(self, index):
img = torch.Tensor(data["train"][0][index])
if opt.cuda:
img = img.cuda()
preds = self.forward(img)
state = torch.cat((img, preds)).view(1, -1)
return state
def encode_episode_data(self):
pass
# images = []
# # for i, (features, targets) in enumerate(loaders["train_loader"]):
# all_states = torch.Tensor(data["train"][0])
# for i, (features, targets) in enumerate(batchify(data["train"])):
# features = Variable(torch.FloatTensor(features))
# preds = self.predict_prob(features)
# images.append(preds)
#
# images = torch.cat(images, dim=0)
#
# # data["all_predictions"] = images
# data["all_states"] = torch.cat((all_states, images.cpu()), dim=1)
def query(self, index):
# current_state = data["all_states"][index].view(1, -1)
# all_states = data["all_states"]
# current_all_dist = pairwise_distances(current_state, all_states)
# similar_indices = torch.topk(current_all_dist, opt.selection_radius, 1, largest=False)[1]
# similar_indices = similar_indices.data[0].cpu().numpy()
# for idx in similar_indices:
self.add_index(index)
return [index]
def add_index(self, index):
image = data["train"][0][index]
caption = data["train"][1][index]
data["active"][0].append(image)
data["active"][1].append(caption)
| mit | -8,798,934,896,517,320,000 | 33.236111 | 99 | 0.541988 | false |
qgis/QGIS-Django | qgis-app/styles/migrations/0002_auto_20201108_0521.py | 1 | 1337 | # Generated by Django 2.2 on 2020-11-08 05:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('styles', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='style',
managers=[
],
),
migrations.CreateModel(
name='StyleReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('review_date', models.DateTimeField(auto_now_add=True, help_text='The review date. Automatically added on style review.', verbose_name='Reviewed on')),
('reviewer', models.ForeignKey(help_text='The user who reviewed this style.', on_delete=django.db.models.deletion.CASCADE, related_name='styles_reviewed_by', to=settings.AUTH_USER_MODEL, verbose_name='Reviewed by')),
('style', models.ForeignKey(blank=True, help_text='The type of this style, this will automatically be read from the XML file.', null=True, on_delete=django.db.models.deletion.CASCADE, to='styles.Style', verbose_name='Style')),
],
),
]
| gpl-2.0 | -1,898,174,964,722,884,000 | 43.566667 | 242 | 0.642483 | false |
SuperSuperSuperSuper5/everyweak-auto-punch | test/mobile_module.py | 1 | 3052 | #!/usr/bin/python
#coding:utf-8
import time
import requests
import send_mail
import random_position
import all_headers
import sys
def main(status):
"""
The main function
"""
mail_addr = ""
name = ""
host_addr = "[email protected]"
# Now we get the token and userid first
get_token_headers = all_headers.gth("get_headers")
#get_token_headers["Content-Length"] = ""
#get_token_headers["User-Agent"] = ""
get_token_data = all_headers.gth("get_data")
#get_token_data["system"] = ""
#get_token_data["password"] = ""
#get_token_data["account"] = ""
#get_token_data["serialNumber"] = ""
#get_token_data["version"] = ""
#get_token_data["model"] = ""
token_req = requests.post("http://www.ddtech.com.cn:7777/mobile/login", headers=get_token_headers, data=get_token_data)
#print(token_req.status_code)
if token_req.status_code == 200:
#print("Get the token is ok")
token = token_req.json()['data'][0]['token']
userid = token_req.json()['data'][0]['userid']
else:
send_mail.send_mail(to_addr=mail_addr, subject="The program want login but failed", text="LOGIN TIME: %s\nHTTP code: %d" % (time.strftime('%Y-%m-%d-%H:%M:%S'), token_req.status_code))
send_mail.send_mail(to_addr=host_addr, subject="%s program want login but failed" % name, text="LOGIN TIME: %s\nHTTP code: %d" % (time.strftime('%Y-%m-%d-%H:%M:%S'), token_req.status_code))
return 1
# Now we send the da ka package
pos_headers = all_headers.gth("pos_headers")
#pos_headers["Content-Length"] = ""
#pos_headers["User-Agent"] = ""
position = random_position.get_position()
pos_data = all_headers.gth("pos_data")
pos_data["token"] = token
pos_data["userId"] = userid
pos_data["longitude"] = position[0]
pos_data["latitude"] = position[1]
#pos_data["isStart"] = "%s" % status
#pos_data["from"] = "IOS"
pos_req = requests.post("http://www.ddtech.com.cn:7777/mobile/busUserClock/saveOrUpdateNewUserClock", headers=pos_headers, data=pos_data)
if pos_req.status_code == 200:
send_mail.send_mail(to_addr=mail_addr, subject="Checked in success", text="CHECK IN TIME: %s\nHTTP code: %d" % (time.strftime('%Y-%m-%d-%H:%M:%S'), pos_req.status_code))
send_mail.send_mail(to_addr=host_addr, subject="%s checked in success" % name, text="CHECK IN TIME: %s\nHTTP code: %d" % (time.strftime('%Y-%m-%d-%H:%M:%S'), pos_req.status_code))
else:
send_mail.send_mail(to_addr=mail_addr, subject="Checked in failure", text="CHECK IN TIME: %s\nHTTP code: %d" % (time.strftime('%Y-%m-%d-%H:%M:%S'), pos_req.status_code))
send_mail.send_mail(to_addr=host_addr, subject="%s checked in failure" % name, text="CHECK IN TIME: %s\nHTTP code: %d" % (time.strftime('%Y-%m-%d-%H:%M:%S'), pos_req.status_code))
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit(0)
status = sys.argv[1]
if status == "up":
main("1")
elif status == "down":
main("0")
| gpl-3.0 | -8,474,352,799,188,116,000 | 38.128205 | 197 | 0.612713 | false |
kuscsik/naclports | lib/naclports/package_index.py | 1 | 4237 | # Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import hashlib
import configuration
import naclports
import package
EXTRA_KEYS = [ 'BIN_URL', 'BIN_SIZE', 'BIN_SHA1' ]
VALID_KEYS = naclports.binary_package.VALID_KEYS + EXTRA_KEYS
REQUIRED_KEYS = naclports.binary_package.REQUIRED_KEYS + EXTRA_KEYS
DEFAULT_INDEX = os.path.join(naclports.NACLPORTS_ROOT, 'lib', 'prebuilt.txt')
def VerifyHash(filename, sha1):
"""Return True if the sha1 of the given file match the sha1 passed in."""
with open(filename) as f:
file_sha1 = hashlib.sha1(f.read()).hexdigest()
return sha1 == file_sha1
def WriteIndex(index_filename, binaries):
"""Create a package index file from set of binaries on disk.
Returns:
A PackageIndex object based on the contents of the newly written file.
"""
# Write index to a temporary file and then rename it, to avoid
# leaving a partial index file on disk.
tmp_name = index_filename + '.tmp'
with open(tmp_name, 'w') as output_file:
for i, (filename, url) in enumerate(binaries):
package = naclports.binary_package.BinaryPackage(filename)
with open(filename) as f:
sha1 = hashlib.sha1(f.read()).hexdigest()
if i != 0:
output_file.write('\n')
output_file.write(package.GetPkgInfo())
output_file.write('BIN_URL=%s\n' % url)
output_file.write('BIN_SIZE=%s\n' % os.path.getsize(filename))
output_file.write('BIN_SHA1=%s\n' % sha1)
os.rename(tmp_name, index_filename)
return IndexFromFile(index_filename)
def IndexFromFile(filename):
with open(filename) as f:
contents = f.read()
return PackageIndex(filename, contents)
def GetCurrentIndex():
return IndexFromFile(DEFAULT_INDEX)
class PackageIndex(object):
"""In memory representation of a package index file.
This class is used to read a package index of disk and stores
it in memory as dictionary keys on package name + configuration.
"""
def __init__(self, filename, index_data):
self.filename = filename
self.packages = {}
self.ParseIndex(index_data)
def Contains(self, package_name, config):
"""Returns True if the index contains the given package in the given
configuration, False otherwise."""
return (package_name, config) in self.packages
def Installable(self, package_name, config):
"""Returns True if the index contains the given package and it is
installable in the currently configured SDK."""
info = self.packages.get((package_name, config))
if not info:
return False
version = naclports.GetSDKVersion()
if info['BUILD_SDK_VERSION'] != version:
naclports.Trace('Prebuilt package was built with different SDK version: '
'%s vs %s' % (info['BUILD_SDK_VERSION'], version))
return False
return True
def Download(self, package_name, config):
PREBUILT_ROOT = os.path.join(package.PACKAGES_ROOT, 'prebuilt')
if not os.path.exists(PREBUILT_ROOT):
os.makedirs(PREBUILT_ROOT)
info = self.packages[(package_name, config)]
filename = os.path.join(PREBUILT_ROOT, os.path.basename(info['BIN_URL']))
if os.path.exists(filename):
if VerifyHash(filename, info['BIN_SHA1']):
return filename
naclports.Log('Downloading prebuilt binary ...')
naclports.DownloadFile(filename, info['BIN_URL'])
if not VerifyHash(filename, info['BIN_SHA1']):
raise naclports.Error('Unexepected SHA1: %s' % filename)
return filename
def ParseIndex(self, index_data):
if not index_data:
return
for pkg_info in index_data.split('\n\n'):
info = naclports.ParsePkgInfo(pkg_info, self.filename,
VALID_KEYS, EXTRA_KEYS)
debug = info['BUILD_CONFIG'] == 'debug'
config = configuration.Configuration(info['BUILD_ARCH'],
info['BUILD_TOOLCHAIN'],
debug)
key = (info['NAME'], config)
if key in self.packages:
naclports.Error('package index contains duplicate: %s' % str(key))
self.packages[key] = info
| bsd-3-clause | -2,090,408,763,854,018,000 | 34.605042 | 79 | 0.666981 | false |
puruckertom/poptox | poptox/loons/loons_description.py | 1 | 1357 |
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import os
class loonsDescriptionPage(webapp.RequestHandler):
def get(self):
text_file2 = open('loons/loons_text.txt','r')
xx = text_file2.read()
templatepath = os.path.dirname(__file__) + '/../templates/'
html = template.render(templatepath + '01pop_uberheader.html', {'title':'Ubertool'})
html = html + template.render(templatepath + '02pop_uberintroblock_wmodellinks.html', {'model':'loons','page':'description'})
html = html + template.render (templatepath + '03pop_ubertext_links_left.html', {})
html = html + template.render(templatepath + '04ubertext_start.html', {
'model_page':'',
'model_attributes':'Loons Population Model','text_paragraph':xx})
html = html + template.render(templatepath + '04ubertext_end.html', {})
html = html + template.render(templatepath + '05pop_ubertext_links_right.html', {})
html = html + template.render(templatepath + '06pop_uberfooter.html', {'links': ''})
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', loonsDescriptionPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| unlicense | 8,686,122,703,163,753,000 | 45.793103 | 133 | 0.638172 | false |
nop33/indico-plugins | livesync/indico_livesync/simplify.py | 1 | 6947 | # This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import itertools
from collections import defaultdict
from sqlalchemy.orm import joinedload
from indico.core.db import db
from indico.modules.categories.models.categories import Category
from indico.modules.events.models.events import Event
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.contributions.models.subcontributions import SubContribution
from indico.util.struct.enum import IndicoEnum
from indico_livesync.models.queue import ChangeType, EntryType
class SimpleChange(int, IndicoEnum):
deleted = 1
created = 2
updated = 4
def process_records(records):
"""Converts queue entries into object changes.
:param records: an iterable containing `LiveSyncQueueEntry` objects
:return: a dict mapping object references to `SimpleChange` bitsets
"""
changes = defaultdict(int)
cascaded_update_records = set()
cascaded_delete_records = set()
for record in records:
if record.change != ChangeType.deleted and record.object is None:
# Skip entries which are not deletions but have no corresponding objects.
# Probably they are updates for objects that got deleted afterwards.
continue
if record.change == ChangeType.created:
assert record.type != EntryType.category
changes[record.object] |= SimpleChange.created
elif record.change == ChangeType.deleted:
assert record.type != EntryType.category
cascaded_delete_records.add(record)
elif record.change in {ChangeType.moved, ChangeType.protection_changed}:
cascaded_update_records.add(record)
elif record.change == ChangeType.data_changed:
assert record.type != EntryType.category
changes[record.object] |= SimpleChange.updated
for obj in _process_cascaded_category_contents(cascaded_update_records):
changes[obj] |= SimpleChange.updated
for obj in _process_cascaded_event_contents(cascaded_delete_records):
changes[obj] |= SimpleChange.deleted
return changes
def _process_cascaded_category_contents(records):
"""
Travel from categories to subcontributions, flattening the whole event structure.
Yields everything that it finds (except for elements whose protection has changed
but are not inheriting their protection settings from anywhere).
:param records: queue records to process
"""
category_prot_records = {rec.category_id for rec in records if rec.type == EntryType.category
and rec.change == ChangeType.protection_changed}
category_move_records = {rec.category_id for rec in records if rec.type == EntryType.category
and rec.change == ChangeType.moved}
changed_events = set()
category_prot_records -= category_move_records # A move already implies sending the whole record
# Protection changes are handled differently, as there may not be the need to re-generate the record
if category_prot_records:
for categ in Category.find(Category.id.in_(category_prot_records)):
cte = categ.get_protection_parent_cte()
# Update only children that inherit
inheriting_categ_children = (Event.query
.join(cte, db.and_((Event.category_id == cte.c.id),
(cte.c.protection_parent == categ.id))))
inheriting_direct_children = Event.find((Event.category_id == categ.id) & Event.is_inheriting)
changed_events.update(itertools.chain(inheriting_direct_children, inheriting_categ_children))
# Add move operations and explicitly-passed event records
if category_move_records:
changed_events.update(Event.find(Event.category_chain_overlaps(category_move_records)))
for elem in _process_cascaded_event_contents(records, additional_events=changed_events):
yield elem
def _process_cascaded_event_contents(records, additional_events=None):
"""
Flatten a series of records into its most basic elements (subcontribution level).
Yields results.
:param records: queue records to process
:param additional_events: events whose content will be included in addition to those
found in records
"""
changed_events = additional_events or set()
changed_contributions = set()
changed_subcontributions = set()
session_records = {rec.session_id for rec in records if rec.type == EntryType.session}
contribution_records = {rec.contrib_id for rec in records if rec.type == EntryType.contribution}
subcontribution_records = {rec.subcontrib_id for rec in records if rec.type == EntryType.subcontribution}
event_records = {rec.event_id for rec in records if rec.type == EntryType.event}
if event_records:
changed_events.update(Event.find(Event.id.in_(event_records)))
for event in changed_events:
yield event
# Sessions are added (explicitly changed only, since they don't need to be sent anywhere)
if session_records:
changed_contributions.update(Contribution
.find(Contribution.session_id.in_(session_records), ~Contribution.is_deleted))
# Contributions are added (implictly + explicitly changed)
changed_event_ids = {ev.id for ev in changed_events}
condition = Contribution.event_id.in_(changed_event_ids) & ~Contribution.is_deleted
if contribution_records:
condition = db.or_(condition, Contribution.id.in_(contribution_records))
contrib_query = Contribution.find(condition).options(joinedload('subcontributions'))
for contribution in contrib_query:
yield contribution
changed_subcontributions.update(contribution.subcontributions)
# Same for subcontributions
if subcontribution_records:
changed_subcontributions.update(SubContribution
.find(SubContribution.contribution_id.in_(subcontribution_records)))
for subcontrib in changed_subcontributions:
yield subcontrib
| gpl-3.0 | -887,382,400,869,119,900 | 42.149068 | 115 | 0.702174 | false |
ncliam/serverpos | openerp/custom_modules/website_sms_authentication/controllers/__init__.py | 1 | 1067 | # -*- coding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2015-Today Julius Network Solutions SARL <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import main
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,163,223,188,907,162,000 | 43.458333 | 79 | 0.617619 | false |
RayleighChen/SummerVac | yang/学习/0724-高阶函数1.py | 1 | 2113 | Python 2.7.13 (v2.7.13:a06454b1afa1, Dec 17 2016, 20:42:59) [MSC v.1500 32 bit (Intel)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> #¸ß½×º¯Êý
>>> x=abs(-10)
>>> x
10
>>> f=abs
>>> f
<built-in function abs>
>>> f(-9)
9
>>> #´«È뺯Êý
>>> #¼ÈÈ»±äÁ¿¿ÉÒÔÖ¸Ïòº¯Êý£¬º¯ÊýµÄ²ÎÊýÄܽÓÊÕ±äÁ¿£¬ÄÇôһ¸öº¯Êý¾Í¿ÉÒÔ½ÓÊÕÁíÒ»¸öº¯Êý×÷Ϊ²ÎÊý£¬ÕâÖÖº¯Êý¾Í³ÆÖ®Îª¸ß½×º¯Êý
>>> def add(x,y,f):
return f(x)+f(y)
>>> add(-4,-9,abs)
13
>>> #mapºÍreduce
>>> def f(x):
return x*x
>>> r=map(f,[1,2,3,4,5,6,7,8,9])
>>> list(r)
[1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> list(map(str,[1,2,3,4,5,6]))
['1', '2', '3', '4', '5', '6']
>>> #ÔÙ¿´reduceµÄÓ÷¨¡£reduce°ÑÒ»¸öº¯Êý×÷ÓÃÔÚÒ»¸öÐòÁÐ[x1, x2, x3, ...]ÉÏ£¬Õâ¸öº¯Êý±ØÐë½ÓÊÕÁ½¸ö²ÎÊý£¬reduce°Ñ½á¹û¼ÌÐøºÍÐòÁеÄÏÂÒ»¸öÔªËØ×öÀÛ»ý¼ÆË㣬ÆäЧ¹û¾ÍÊÇ
>>> reduce(f, [x1, x2, x3, x4]) = f(f(f(x1, x2), x3), x4£©
SyntaxError: invalid syntax
>>> #£¿
>>> #±È·½Ëµ¶ÔÒ»¸öÐòÁÐÇóºÍ£¬¾Í¿ÉÒÔÓÃreduceʵÏÖ£º
>>> from functools import reduce
>>> def add(x,y):
return x+y
>>> reduce(add,[1,3,5,7,9,11])
36
>>> #ÇóºÍÔËËã¿ÉÒÔÖ±½ÓÓÃPythonÄÚ½¨º¯Êýsum()£¬Ã»±ØÒª¶¯ÓÃreduce.µ«ÊÇÈç¹ûÒª°ÑÐòÁÐ[1, 3, 5, 7, 9]±ä»»³ÉÕûÊý13579£¬reduce¾Í¿ÉÒÔÅÉÉÏÓ󡣺
>>> from functools import reduce
>>> def fn(x,y):
return x*10+y
>>> reduce(fn,[1,3,5,6,4])
13564
>>> def char2num(s):
return {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}[s]
>>> reduce(fn,map(char2num,'123467'))
123467
>>> #ÕûÀí³ÉÒ»¸östr2intµÄº¯Êý¾ÍÊÇ£º
>>> def str2int(s):
def fn(x,y):
return x*10+y
def char2num(s):
return {'0':0,'1':1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}[s]
return reduce(fn,map(char2num,s))
>>> str2int('9830')
9830
>>> #»¹¿ÉÒÔÓÃlambdaº¯Êý½øÒ»²½¼ò»¯³É£º
>>> def char2num(s):
return {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}[s]
def str2int(s):
SyntaxError: invalid syntax
>>> def char2num(s):
return {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}[s]
>>> def str2int(s):
return reduce(lambda x,y:x*10+y,map(char2num,s))
>>> str2int('32331434657680')
32331434657680L
>>>
| gpl-2.0 | -8,190,772,357,273,260,000 | 26.441558 | 156 | 0.557028 | false |
stxent/kmodgen | packages/sop.py | 1 | 3193 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# sop.py
# Copyright (C) 2016 xent
# Project is distributed under the terms of the GNU General Public License v3.0
import math
from wrlconv import model
import primitives
class SOP:
BODY_CHAMFER = primitives.hmils(0.1)
BODY_OFFSET_Z = primitives.hmils(0.1)
BAND_OFFSET = primitives.hmils(0.0)
BAND_WIDTH = primitives.hmils(0.1)
CHAMFER_RESOLUTION = 1
LINE_RESOLUTION = 1
EDGE_RESOLUTION = 3
@staticmethod
def generate_package_pins(pattern, count, size, offset, pitch):
def make_pin(x, y, angle, number): # pylint: disable=invalid-name
pin = model.Mesh(parent=pattern, name='Pin{:d}'.format(number))
pin.translate([x, y, 0.0])
pin.rotate([0.0, 0.0, 1.0], angle - math.pi / 2.0)
return pin
rows = int(count / 2)
pins = []
# Pins
y_offset = size[1] / 2.0 + offset
for i in range(0, rows):
x_offset = pitch * (i - (rows - 1) / 2.0)
pins.append(make_pin(x_offset, y_offset, math.pi, i + 1 + rows))
pins.append(make_pin(-x_offset, -y_offset, 0.0, i + 1))
return pins
def generate(self, materials, _, descriptor):
body_size = primitives.hmils(descriptor['body']['size'])
pin_height = body_size[2] / 2.0 + SOP.BODY_OFFSET_Z
pin_shape = primitives.hmils(descriptor['pins']['shape'])
band_width_proj = SOP.BAND_WIDTH * math.sqrt(0.5)
body_slope = math.atan(2.0 * band_width_proj / body_size[2])
pin_offset = pin_shape[1] * math.sin(body_slope) / 2.0
body_transform = model.Transform()
body_transform.rotate([0.0, 0.0, 1.0], math.pi)
body_transform.translate([0.0, 0.0, pin_height])
body_mesh = primitives.make_sloped_box(
size=body_size,
chamfer=SOP.BODY_CHAMFER,
slope=math.pi / 4.0,
slope_height=body_size[2] / 5.0,
edge_resolution=SOP.EDGE_RESOLUTION,
line_resolution=SOP.LINE_RESOLUTION,
band=SOP.BAND_OFFSET,
band_width=SOP.BAND_WIDTH)
if 'Body' in materials:
body_mesh.appearance().material = materials['Body']
body_mesh.apply(body_transform)
body_mesh.rename('Body')
pin_mesh = primitives.make_pin_mesh(
pin_shape_size=pin_shape,
pin_height=pin_height + pin_shape[1] * math.cos(body_slope) / 2.0,
pin_length=primitives.hmils(descriptor['pins']['length']) + pin_offset,
pin_slope=math.pi * (10.0 / 180.0),
end_slope=body_slope,
chamfer_resolution=SOP.CHAMFER_RESOLUTION,
edge_resolution=SOP.EDGE_RESOLUTION)
if 'Pin' in materials:
pin_mesh.appearance().material = materials['Pin']
pins = SOP.generate_package_pins(
pattern=pin_mesh,
count=descriptor['pins']['count'],
size=body_size,
offset=band_width_proj - pin_offset,
pitch=primitives.hmils(descriptor['pins']['pitch']))
return pins + [body_mesh]
types = [SOP]
| gpl-3.0 | -857,548,429,318,372,200 | 32.968085 | 87 | 0.571876 | false |
Micronaet/micronaet-utility | excel_export/__openerp__.py | 1 | 1433 | ###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Export XLSX files',
'version': '0.1',
'category': 'Utility',
'description': '''
Utility for manage external XLSX files
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
],
'init_xml': [],
'demo': [],
'data': [
'data/config_data.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
| agpl-3.0 | -5,440,369,799,933,118,000 | 33.95122 | 79 | 0.552687 | false |
liberation/django-registration | registration/forms.py | 1 | 5027 | """
Forms and validation code for user registration.
Note that all of these forms assume Django's bundle default ``User``
model; since it's not possible for a form to anticipate in advance the
needs of custom user models, you will need to write your own forms if
you're using a custom model.
"""
from django.contrib.auth.models import User
from django import forms
from django.utils.translation import ugettext_lazy as _
# I put this on all required fields, because it's easier to pick up
# on them with CSS or JavaScript if they have a class of "required"
# in the HTML. Your mileage may vary. If/when Django ticket #3515
# lands in trunk, this will no longer be necessary.
attrs_dict = {'class': 'required'}
class RegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
required_css_class = 'required'
username = forms.RegexField(regex=r'^[\w.@+-]+$',
max_length=30,
label=_("Username"),
error_messages={'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_("Email address"))
password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password"))
password2 = forms.CharField(widget=forms.PasswordInput,
label=_("Password (again)"))
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
existing = User.objects.filter(username__iexact=self.cleaned_data['username'])
if existing.exists():
raise forms.ValidationError(_("A user with that username already exists."))
else:
return self.cleaned_data['username']
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput,
label=_(u'I have read and agree to the Terms of Service'),
error_messages={'required': _("You must agree to the terms to register")})
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com',
'yahoo.com']
def clean_email(self):
"""
Check the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_("Registration using free email addresses is prohibited. Please supply a different email address."))
return self.cleaned_data['email']
| bsd-3-clause | -6,836,387,276,208,603,000 | 37.968992 | 141 | 0.623831 | false |
Symbian9/ysfs_2_0 | explode_srf.py | 1 | 5198 | import os
import bpy
import bmesh
import mathutils
from bpy.props import (BoolProperty, FloatProperty, StringProperty, EnumProperty)
from bpy_extras.io_utils import (ImportHelper, ExportHelper, unpack_list, unpack_face_list, axis_conversion)
# Infomation
bl_info = {
'name' : 'YSFS 2.0 - DNM Parts as SRF file',
'description': 'YSFlight scripts | Export all objects in scene to DNM with separated parts as SRF files.',
'author' : 'Symbian9, Mr Mofumofu',
'version' : (2, 0, 1),
'blender' : (2, 75, 0),
'location' : 'File > Import-Export',
'warning' : '',
'wiki_url' : '',
'tracker_url': 'http://github.com/Symbian9/ysfs_2_0/issues/new',
'category' : 'Airplanes 3D',
}
# Export Form
class ExplodeSRF(bpy.types.Operator, ExportHelper):
# Settings
bl_idname = 'export_model.expsrf'
bl_label = 'Export DNM Parts(SURF)'
filter_glob = StringProperty(
default = '*.srf',
options = {'HIDDEN'},
)
check_extension = True
filename_ext = '.srf'
# On Click Save Button
def execute(self, context):
# ==============================
# Getting Data
# ==============================
# Currently Scene
scene = context.scene
# Rotation(Option)
global_matrix = mathutils.Matrix((
(-1.0, 0.0, 0.0, 0.0),
( 0.0, 0.0, 1.0, 0.0),
( 0.0, -1.0, 0.0, 0.0),
( 0.0, 0.0, 0.0, 1.0),
))
# Selected Object
for object in scene.objects:
export(object, self.filepath, global_matrix)
return {'FINISHED'}
def export(object, filepath, global_matrix):
me = object.data
for objects in object.children:
export(objects, filepath, global_matrix)
if isinstance(me, bpy.types.Mesh):
# Convert to BMesh(For N-Sided Polygon)
bm = bmesh.new()
bm.from_mesh(me)
# Rotation(Option)
bm.transform(global_matrix * object.matrix_world)
bm.normal_update()
# Vertexs and Faces
verts = bm.verts
faces = bm.faces
# ==============================
# Output
# ==============================
# Save File
filepath = '{0}/{1}.srf'.format(os.path.dirname(filepath), object.name)
filepath = os.fsencode(filepath)
fp = open(filepath, 'w')
# For Transparent
za = ''
zacount = 0
# Header
fp.write('SURF\n')
# Vertexs
for vert in verts:
fp.write('V {:.4f} {:.4f} {:.4f} '.format(*vert.co))
# Smoothing
smooth = True
for edge in vert.link_edges:
if edge.smooth == False:
smooth = False
break
if smooth:
for face in vert.link_faces:
if face.smooth:
fp.write('R')
break
fp.write('\n')
# Faces
for face in faces:
fp.write('F\n')
# Has Material?
if len(object.material_slots):
# Getting Material
material = object.material_slots[face.material_index].material
# Color
color = material.diffuse_color * 255.0
fp.write('C {:.0f} {:.0f} {:.0f}\n'.format(*color))
# Lighting
if material.emit > 0.0:
fp.write('B\n')
# Transparent
if material.alpha < 1.0:
if zacount == 0:
za = 'ZA {:d} {:.0f}'.format(face.index, (1.0 - material.alpha) * 228.0)
elif zacount % 8 == 0:
za += '\nZA {:d} {:.0f}'.format(face.index, (1.0 - material.alpha) * 228.0)
else:
za += ' {:d} {:.0f}'.format(face.index, (1.0 - material.alpha) * 228.0)
zacount = zacount + 1
# Median and Normal
median = face.calc_center_median_weighted()
normal = -face.normal
fp.write('N {:.4f} {:.4f} {:.4f} '.format(*median))
fp.write('{:.4f} {:.4f} {:.4f}\n'.format(*normal))
# Vertexs consist Face
fp.write('V')
for vid in face.verts:
fp.write(' {:d}'.format(vid.index))
fp.write('\n')
fp.write('E\n')
# Footer
fp.write('E\n')
# For Transparent
if za != '':
fp.write(za + '\n')
# ==============================
# Close
# ==============================
fp.close()
bm.free()
return {'FINISHED'}
# Menu Button
def menu_func_export(self, context):
self.layout.operator(ExplodeSRF.bl_idname, text = 'DNM Parts (.srf)')
# Regist
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func_export)
# Unregist
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
if __name__ == '__main__':
register()
| mit | -1,173,587,664,364,313,600 | 29.940476 | 110 | 0.481147 | false |
DoWhatILove/turtle | programming/python/library/nltk/tagging.py | 1 | 4417 | #%%
from collections import defaultdict
from nltk.corpus import brown
from nltk import bigrams
pos = defaultdict(lambda:defaultdict(int))
brown_news_tagged = brown.tagged_words(categories='news',tagset='universal')
for ((w1,t1),(w2,t2)) in bigrams(brown_news_tagged):
pos[(t1,w2)][t2] += 1
pos[('DET','right')]
#%%
# the tag of a word depends on the word and its context within a sentence.
import nltk
brown_tagged_sents = brown.tagged_sents(categories='news')
brown_sents = brown.sents(categories='news')
tags = [tag for (word,tag) in brown.tagged_words(categories='news')]
nltk.FreqDist(tags).max()
#%%
from nltk import word_tokenize
raw = "I do not like green eggs and ham, I do not like them Sam I am!"
tokens = word_tokenize(raw)
default_tagger = nltk.DefaultTagger('NN')
default_tagger.tag(tokens)
#%%
default_tagger.evaluate(brown_tagged_sents)
#%%
brown_tagged_sents[1]
#%%
patterns = [
(r'.*ing$','VBG'),
(r'.*ed$','VBD'),
(r'.*es$','VBZ'),
(r'.*ould$','MD'),
(r'.*\'s$','NN$'),
(r'.*s$','NNS'),
(r'^in$','IN'),
(r'^-?[0-9]+(.[0-9]+)?$','CD'),
(r'.*','NN')
]
regexp_tagger = nltk.RegexpTagger(patterns)
regexp_tagger.tag(brown_sents[3])
regexp_tagger.evaluate(brown_tagged_sents)
#%%
fd = nltk.FreqDist(brown.words(categories='news'))
cfd = nltk.ConditionalFreqDist(brown.tagged_words(categories='news'))
most_freq_words = fd.most_common(100)
likely_tags = dict((word, cfd[word].max()) for (word,_) in most_freq_words)
baseline_tagger = nltk.UnigramTagger(model=likely_tags,backoff=nltk.DefaultTagger('NN'))
baseline_tagger.evaluate(brown_tagged_sents)
#%%
def performance(cfd, wordlist):
lt = dict((word, cfd[word].max()) for word in wordlist)
baseline_tagger = nltk.UnigramTagger(model=lt, backoff=nltk.DefaultTagger('NN'))
return baseline_tagger.evaluate(brown.tagged_sents(categories='news'))
def display():
import pylab
word_freqs = nltk.FreqDist(brown.words(categories='news')).most_common()
words_by_freq = [w for (w,_) in word_freqs]
cfd = nltk.ConditionalFreqDist(brown.tagged_words(categories='news'))
sizes = 2 ** pylab.arange(15)
perfs = [performance(cfd, words_by_freq[:size]) for size in sizes]
pylab.plot(sizes,perfs,'-bo')
pylab.title('Lookup Tagger performance with Varying model size')
pylab.xlabel('Model Size')
pylab.ylabel('Performance')
pylab.show()
display()
#%%
# Unigram tagging
from nltk.corpus import brown
brown_tagged_sents = brown.tagged_sents(categories='news')
brown_sents = brown.sents(categories='news')
train_size = int(len(brown_tagged_sents)*0.9)
train_sents = brown_tagged_sents[:train_size]
test_sents=brown_tagged_sents[train_size:]
unigram_tagger = nltk.UnigramTagger(train_sents)
unigram_tagger.evaluate(test_sents)
#%%
# sparse problem: with bigram tagger, we cannot observe the <context> in the training data
bigram_tagger = nltk.BigramTagger(train_sents)
bigram_tagger.evaluate(test_sents)
#%%
# use the unigram as back-off model
# one way to address the trade-off between accuracy and coverage is to use the more accurate algorithms,
# when we can, but to fall back on algorithms with wider coverage when necessary.
# 1. try tagging the token with the bigram tagger
# 2. if the bigram tagger is unable to find a tag for the token, try the unigram tagger
# 3. if the unigram tagger is also unable to find a tag, use a default tagger
t0 = nltk.DefaultTagger('NN')
print(t0.evaluate(test_sents))
t1 = nltk.UnigramTagger(train_sents,backoff=t0)
print(t1.evaluate(test_sents))
t2 = nltk.BigramTagger(train_sents,backoff=t1)
print(t2.evaluate(test_sents))
t3 = nltk.TrigramTagger(train_sents,backoff=t2)
t3.evaluate(test_sents)
#%%
# dump and load
from pickle import dump,load
stored = open(r'E:\temp\t2.pkl','wb')
dump(t2,stored,-1)
stored.close()
model = open(r'E:\temp\t2.pkl','rb')
tagger = load(model)
model.close()
tagger.tag(test_sents[0])
#%%
# Brill tagging
# unlike n-gram tagging, it does not count observations but compiles a list of transformational correction rules
# all rules are generated from a template of the following form: replace T1 with T2 in the context C
# typical context are the identity or the tag of the preceding or following word
# or the appearance of a specific tag within 2-3 words of the current word
# the score of a rule is the number of broken examples it corrects minus the number of correct cases it breaks
| mit | 3,780,675,056,940,133,400 | 31.718519 | 112 | 0.715418 | false |
linsalrob/PyFBA | PyFBA/cmd/gapfill_from_roles.py | 1 | 22366 | """
Given a set of roles (e.g. from a genome annotation) can we gap fill those?
Largely based on From_functional_roles_to_gap-filling
"""
import os
import sys
import PyFBA
import argparse
import copy
from PyFBA import log_and_message
def run_eqn(why, md, r2r, med, bme, verbose=False):
"""
Run the fba
:param why: why are we doing this
:param md: modeldata
:param r2r: reactions to run
:param med: media object
:param bme: biomass equation
:param verbose: more output
:type verbose: bool
:return: (value, growth)
"""
status, value, growth = PyFBA.fba.run_fba(md, r2r, med, bme)
log_and_message(f"FBA run {why} has a biomass flux value of {value} --> Growth: {growth}", stderr=verbose)
return value, growth
def minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation, verbose=False):
"""
Sort thorugh all the added reactions and return a dict of new reactions
:param original_reactions_to_run: the original set from our genome
:type original_reactions_to_run: set(PyFBA.metabolism.Reaction)
:param added_reactions: new reactions we need
:type added_reactions: list[(str, set(str)]
:param modeldata: our modeldata object
:type modeldata: PyFBA.model_seed.ModelData
:param media: our media object
:type media: set[PyFBA.metabolism.Compound]
:param biomass_equation: our biomass equation
:type biomass_equation: PyFBA.metabolism.Reaction
:param verbose: more output
:type verbose: bool
:return: A dict of the minimal set of reactions and their source
:rtype: dict[str, str]
"""
reqd_additional = set()
print(f"Before we began, we had {len(original_reactions_to_run)} reactions")
rxn_source = {}
while added_reactions:
ori = copy.deepcopy(original_reactions_to_run)
ori.update(reqd_additional)
# Test next set of gap-filled reactions
# Each set is based on a method described above
how, new = added_reactions.pop()
sys.stderr.write(f"Testing reactions from {how}\n")
# Get all the other gap-filled reactions we need to add
for tple in added_reactions:
ori.update(tple[1])
for r in new:
# remember the source. It doesn't matter if we overwrite, as it will replace later with earlier
rxn_source[r] = how
# Use minimization function to determine the minimal
# set of gap-filled reactions from the current method
new_essential = PyFBA.gapfill.minimize_additional_reactions(ori, new, modeldata, media, biomass_equation,
verbose=True)
log_and_message(f"Saved {len(new_essential)} reactions from {how}", stderr=verbose)
# Record the method used to determine
# how the reaction was gap-filled
for new_r in new_essential:
modeldata.reactions[new_r].is_gapfilled = True
modeldata.reactions[new_r].gapfill_method = how
reqd_additional.update(new_essential)
# add the original set too
for r in original_reactions_to_run:
rxn_source[r] = 'genome prediction'
# Combine old and new reactions and add the source, to return a dict
return {r: rxn_source[r] for r in original_reactions_to_run.union(reqd_additional)}
def roles_to_reactions_to_run(roles, orgtype='gramnegative', verbose=False):
roles_to_reactions = PyFBA.filters.roles_to_reactions(roles, organism_type=orgtype, verbose=verbose)
reactions_to_run = set()
for role in roles_to_reactions:
reactions_to_run.update(roles_to_reactions[role])
log_and_message(f"There are {len(reactions_to_run)} unique reactions associated with this genome", stderr=verbose)
return reactions_to_run
def read_media(mediafile, modeldata, verbose=False):
"""
Read the media file and return a set of compounds
:param modeldata: the modeldata object
:type modeldata: PyFBA.model_seed.ModelData
:param mediafile: the media file to read
:param verbose: more output
:type verbose: bool
:return: a set of media compounds
:rtype: Set[PyFBA.metabolism.Compound]
"""
if mediafile in PyFBA.parse.media_files():
log_and_message(f"parsing media directly from {mediafile}", stderr=verbose)
# pyfba media already corrects the names, so we can just return it.
return PyFBA.parse.pyfba_media(mediafile, modeldata)
elif os.path.exists(mediafile):
log_and_message(f"parsing media file {mediafile}", stderr=verbose)
media = PyFBA.parse.read_media_file(mediafile)
elif 'PYFBA_MEDIA_DIR' in os.environ and os.path.exists(os.path.join(os.environ['PYFBA_MEDIA_DIR'], mediafile)):
log_and_message(f"parsing media file {os.path.join(os.environ['PYFBA_MEDIA_DIR'], mediafile)}", stderr=verbose)
media = PyFBA.parse.read_media_file(os.path.join(os.environ['PYFBA_MEDIA_DIR'], mediafile))
else:
log_and_message(f"Can't figure out how to parse media from {mediafile}", stderr=True, loglevel="CRITICAL")
sys.exit(-1)
return PyFBA.parse.correct_media_names(media, modeldata.compounds)
def update_r2r(old, new, why, verbose=False):
"""
Update the reactions to run and log the changes
:param old: the initial reactions to run
:param new: the new reactions to add
:param why: the step we are at
:param verbose: more output
:return: a set of reactions to run
:rtype: set[str]
"""
before = len(old)
old.update(new)
msg = f"Before updating reactions from {why}: {before} reactions, after {len(old)} reactions"
log_and_message(msg, stderr=verbose)
return old
def run_gapfill_from_roles(roles, reactions_to_run, modeldata, media, orgtype='gramnegative', close_orgs=None,
close_genera=None, verbose=False):
"""
gapfill growth from a set of roles in the genome
:param close_genera: the list of roles in close genera
:param close_orgs: the list of roles in close organisms
:param roles: The set of roles in this genome
:type roles: set[str[
:param reactions_to_run: The reactions to run
:type reactions_to_run: set[str]
:param modeldata: the modeldata object
:type modeldata: PyFBA.model_seed.ModelData
:param media: a set of media compounds
:type media: Set[PyFBA.metabolism.Compound]
:param orgtype: the organism type for the model
:type orgtype: str
:param verbose: more output
:type verbose: bool
:return: a dict of the reactions and what step they were added at
:rtype: dict[str, str]
"""
tempset = set()
for r in reactions_to_run:
if r in modeldata.reactions:
tempset.add(r)
else:
log_and_message(f"Reaction ID {r} is not in our reactions list. Skipped", stderr=verbose)
reactions_to_run = tempset
biomass_equation = PyFBA.metabolism.biomass_equation(orgtype)
run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
added_reactions = []
original_reactions_to_run = copy.deepcopy(reactions_to_run)
#############################################################################################
# Gapfilling #
# #
# We do this in the order: #
# essential reactions: because you need to have these, but it is stronger evidence if #
# your friends have it too! #
# media: because you should be importing everything in the media #
# linked_reactions: because they make sense! #
# closely related organisms: because you should have roles your friends have #
# subsystems: to complete things you already have #
# orphans: to make sure everything is produced/consumed #
# probability: because there are other reactions we can add #
# reactions with proteins: to make sure you can at least grow on the media #
# #
#############################################################################################
#############################################################################################
# ESSENTIAL PROTEINS #
#############################################################################################
log_and_message("Gap filling from Essential Reactions", stderr=verbose)
essential_reactions = PyFBA.gapfill.suggest_essential_reactions()
for r in essential_reactions:
modeldata.reactions[r].reset_bounds()
added_reactions.append(("essential", essential_reactions))
reactions_to_run = update_r2r(reactions_to_run, essential_reactions, "ESSENTIAL REACTIONS")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
#############################################################################################
# LINKED REACTIONS #
#############################################################################################
log_and_message("Gap filling from Linked Reactions", stderr=verbose)
linked_reactions = PyFBA.gapfill.suggest_linked_reactions(modeldata, reactions_to_run)
for r in linked_reactions:
modeldata.reactions[r].reset_bounds()
added_reactions.append(("linked_reactions", linked_reactions))
reactions_to_run = update_r2r(reactions_to_run, linked_reactions, "LINKED REACTIONS")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
#############################################################################################
# EC NUMBERS #
#############################################################################################
log_and_message("Gap filling from limited EC numbers", stderr=verbose)
ecnos = PyFBA.gapfill.suggest_reactions_using_ec(roles, modeldata, reactions_to_run, verbose=verbose)
for r in ecnos:
modeldata.reactions[r].reset_bounds()
added_reactions.append(("ec_numbers_brief", ecnos))
reactions_to_run = update_r2r(reactions_to_run, ecnos, "EC Numbers")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
#############################################################################################
# Media import reactions #
#############################################################################################
log_and_message("Gap filling from MEDIA", stderr=verbose)
media_reactions = PyFBA.gapfill.suggest_from_media(modeldata, reactions_to_run, media, verbose=verbose)
added_reactions.append(("media", media_reactions))
reactions_to_run = update_r2r(reactions_to_run, media_reactions, "MEDIA REACTIONS")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
#############################################################################################
# Other genomes and organisms #
#############################################################################################
log_and_message("Gap filling from CLOSE GENOMES", stderr=verbose)
if close_orgs:
# add reactions from roles in close genomes
close_reactions = PyFBA.gapfill.suggest_from_roles(close_orgs, modeldata.reactions, threshold=0,
verbose=verbose)
close_reactions.difference_update(reactions_to_run)
added_reactions.append(("close genomes ", close_reactions))
reactions_to_run = update_r2r(reactions_to_run, close_reactions, "CLOSE ORGANISMS")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
if close_genera:
# add reactions from roles in similar genera
genus_reactions = PyFBA.gapfill.suggest_from_roles(close_genera, modeldata.reactions, threshold=0,
verbose=verbose)
genus_reactions.difference_update(reactions_to_run)
added_reactions.append(("other genera", genus_reactions))
reactions_to_run = update_r2r(reactions_to_run, genus_reactions, "CLOSE GENERA")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
#############################################################################################
# Subsystems #
#############################################################################################
log_and_message("Gap filling from SUBSYSTEMS", stderr=verbose)
subsystem_reactions = PyFBA.gapfill.suggest_reactions_from_subsystems(modeldata.reactions, reactions_to_run,
organism_type=orgtype, threshold=0.5,
verbose=verbose)
added_reactions.append(("subsystems", subsystem_reactions))
reactions_to_run = update_r2r(reactions_to_run, subsystem_reactions, "SUBSYSTEMS")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
#############################################################################################
# Orphan compounds #
#############################################################################################
log_and_message("Gap filling from ORPHANS", stderr=verbose)
orphan_compounds = PyFBA.gapfill.suggest_by_compound(modeldata, reactions_to_run, 1)
added_reactions.append(("orphans", orphan_compounds))
reactions_to_run = update_r2r(reactions_to_run, orphan_compounds, "ORPHANS")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
# ## Revisit EC Numbers
#
# When we added the EC numbers before, we were a little conservative, only adding those EC numbers that appeared in
# two or less (by default) reactions. If we get here, lets be aggressive and add any EC number regardless of how
# many reactions we add. We set the `maxnumrx` variable to 0
#############################################################################################
# EC NUMBERS #
#############################################################################################
log_and_message("Gap filling from limited EC numbers", stderr=verbose)
ecnos = PyFBA.gapfill.suggest_reactions_using_ec(roles, modeldata, reactions_to_run, maxnumrx=0, verbose=verbose)
for r in ecnos:
modeldata.reactions[r].reset_bounds()
added_reactions.append(("ec_numbers_full", ecnos))
reactions_to_run = update_r2r(reactions_to_run, ecnos, "EC Numbers")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
# We revist linked reactions once more, because now we have many more reactions in our set to run!
#############################################################################################
# LINKED REACTIONS #
#############################################################################################
log_and_message("Gap filling from Linked Reactions", stderr=verbose)
linked_reactions = PyFBA.gapfill.suggest_linked_reactions(modeldata, reactions_to_run)
for r in linked_reactions:
modeldata.reactions[r].reset_bounds()
added_reactions.append(("linked_reactions_full", linked_reactions))
reactions_to_run = update_r2r(reactions_to_run, linked_reactions, "LINKED REACTIONS")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
log_and_message(f"FATAL: After compiling {len(reactions_to_run)} reactions, we still could not get growth",
stderr=True, loglevel='CRITICAL')
return set()
def gapfill_from_roles():
"""
Parse the arguments and start the gapfilling.
"""
orgtypes = ['gramnegative', 'grampositive', 'microbial', 'mycobacteria', 'plant']
parser = argparse.ArgumentParser(description='Run Flux Balance Analysis on a set of gapfilled functional roles')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-r', '--roles', help='A list of functional roles in this genome, one per line')
group.add_argument('-a', '--assigned_functions', help='RAST assigned functions (tab separated PEG/Functional Role)')
group.add_argument('-f', '--features', help='PATRIC features.txt file (with 5 columns)')
parser.add_argument('-o', '--output', help='file to save new reaction list to', required=True)
parser.add_argument('-m', '--media', help='media name', required=True)
parser.add_argument('-t', '--type', default='gramnegative',
help=f'organism type for the model (currently allowed are {orgtypes}). Default=gramnegative')
parser.add_argument('-c', '--close', help='a file with roles from close organisms')
parser.add_argument('-g', '--genera', help='a file with roles from similar genera')
parser.add_argument('-v', '--verbose', help='verbose output', action='store_true')
args = parser.parse_args(sys.argv[2:])
log_and_message(f"Running PyFBA with the parameters: {sys.argv}\n", quiet=True)
model_data = PyFBA.parse.model_seed.parse_model_seed_data(args.type)
if args.roles:
if not os.path.exists(args.roles):
sys.stderr.write(f"FATAL: {args.roles} does not exist. Please check your files\n")
sys.exit(1)
log_and_message(f"Getting the roles from {args.roles}", stderr=args.verbose)
roles = PyFBA.parse.read_functional_roles(args.roles, args.verbose)
elif args.assigned_functions:
if not os.path.exists(args.assigned_functions):
sys.stderr.write(f"FATAL: {args.assigned_functions} does not exist. Please check your files\n")
sys.exit(1)
log_and_message(f"Getting the roles from {args.assigned_functions}", stderr=args.verbose)
roles = PyFBA.parse.assigned_functions_set(args.assigned_functions)
elif args.features:
if not os.path.exists(args.features):
sys.stderr.write(f"FATAL: {args.features} does not exist. Please check your files\n")
sys.exit(1)
log_and_message(f"Getting the roles from {args.features}", stderr=args.verbose)
roles = PyFBA.parse.read_features_file(args.features, args.verbose)
else:
sys.stderr.write("FATAL. Either a roles or functions file must be provided")
sys.exit(1)
reactions_to_run = roles_to_reactions_to_run(roles, args.type, args.verbose)
media = read_media(args.media, model_data, args.verbose)
new_reactions = run_gapfill_from_roles(roles=roles, reactions_to_run=reactions_to_run, modeldata=model_data,
media=media, orgtype=args.type, close_orgs=args.close,
close_genera=args.genera, verbose=args.verbose)
if new_reactions:
with open(args.output, 'w') as out:
for r in new_reactions:
out.write(f"{r}\t{new_reactions[r]}\n")
if __name__ == "__main__":
gapfill_from_roles()
| mit | 6,733,707,812,166,747,000 | 51.874704 | 120 | 0.573281 | false |
django-extensions/django-extensions | tests/test_validators.py | 1 | 3306 | # -*- coding: utf-8 -*-
from django.core.exceptions import ValidationError
from django.test import TestCase
from django_extensions.validators import NoControlCharactersValidator, NoWhitespaceValidator
class NoControlCharactersValidatorTests(TestCase):
"""Tests for NoControlCharactersValidator."""
def test_should_raise_default_message_and_code_if_value_contains_new_line(self):
self.validator = NoControlCharactersValidator()
value_with_new_line = 'test\nvalue'
with self.assertRaises(ValidationError) as cm:
self.validator(value_with_new_line)
self.assertEqual(cm.exception.message, 'Control Characters like new lines or tabs are not allowed.')
self.assertEqual(cm.exception.code, 'no_control_characters')
self.assertDictEqual(cm.exception.params, {'value': value_with_new_line, 'whitelist': None})
def test_should_raise_custom_message_and_code_if_value_contains_tabs(self):
self.validator = NoControlCharactersValidator(message='custom message', code='custom code')
value_with_tabs = 'test\tvalue'
with self.assertRaises(ValidationError) as cm:
self.validator(value_with_tabs)
self.assertEqual(cm.exception.message, 'custom message')
self.assertEqual(cm.exception.code, 'custom code')
self.assertDictEqual(cm.exception.params, {'value': value_with_tabs, 'whitelist': None})
def test_should_not_raise_if_value_contains_characters_which_is_on_whitelist(self):
self.validator = NoControlCharactersValidator(message='custom message', code='custom code', whitelist=['\n'])
value_with_new_line = 'test\nvalue'
result = self.validator(value_with_new_line)
self.assertIsNone(result)
class NoWhiteSpaceValidatorTests(TestCase):
"""Tests for NoWhitespaceValidator."""
def test_should_raise_default_message_and_code_if_value_has_leading_whitespace(self):
self.validator = NoWhitespaceValidator()
value_with_leading_whitespace = ' test_value'
with self.assertRaises(ValidationError) as cm:
self.validator(value_with_leading_whitespace)
self.assertEqual(cm.exception.message, 'Leading and Trailing whitespaces are not allowed.')
self.assertEqual(cm.exception.code, 'no_whitespace')
self.assertDictEqual(cm.exception.params, {'value': value_with_leading_whitespace})
def test_should_raise_custom_message_and_code_if_value_has_trailing_whitespace(self):
self.validator = NoWhitespaceValidator(message='custom message', code='custom code')
value_with_trailing_whitespace = 'test value '
with self.assertRaises(ValidationError) as cm:
self.validator(value_with_trailing_whitespace)
self.assertEqual(cm.exception.message, 'custom message')
self.assertEqual(cm.exception.code, 'custom code')
self.assertDictEqual(cm.exception.params, {'value': value_with_trailing_whitespace})
def test_should_not_raise_if_value_doesnt_have_leading_or_trailing_whitespaces(self):
self.validator = NoWhitespaceValidator()
value_without_leading_or_trailing_whitespaces = 'test value'
result = self.validator(value_without_leading_or_trailing_whitespaces)
self.assertIsNone(result)
| mit | -5,568,638,572,219,079,000 | 43.675676 | 117 | 0.717786 | false |
damianpv/sfotipy | sfotipy/urls.py | 1 | 1434 | from django.conf.urls import patterns, include, url
from django.conf import settings
from artists.views import ArtistDetailView, ArtistListView
from django.contrib import admin
admin.autodiscover()
from rest_framework import routers
from artists.views import ArtistViewSet
from albums.views import AlbumViewSet
from tracks.views import TrackViewSet
router = routers.DefaultRouter()
router.register(r'artists', ArtistViewSet)
router.register(r'albums', AlbumViewSet)
router.register(r'tracks', TrackViewSet)
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'sfotipy.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^grappelli/', include('grappelli.urls')), # grappelli URLS
url(r'^admin/', include(admin.site.urls)),
url(r'^tracks/(?P<title>[\w\-\W]+)/', 'tracks.views.track_view', name='track_view'),
#url(r'^tracks/(?P<title>[\w\-]+)/', 'tracks.views.track_view', name='track_view'),
url(r'^signup/', 'userprofiles.views.signup', name='signup'),
url(r'^signin/', 'userprofiles.views.signin', name='signin'),
url(r'^artists/(?P<pk>[\d]+)', ArtistDetailView.as_view()),
url(r'^api/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, }),
)
| mit | -1,419,620,475,134,443,000 | 37.756757 | 108 | 0.684798 | false |
thoas/django-sequere | setup.py | 1 | 1329 | # -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
version = __import__('sequere').__version__
root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(root, 'README.rst')) as f:
README = f.read()
setup(
name='django-sequere',
version=version,
description='A Django application to implement a follow system and a timeline using multiple backends (db, redis, etc.)',
long_description=README,
author='Florent Messa',
author_email='[email protected]',
url='http://github.com/thoas/django-sequere',
zip_safe=False,
include_package_data=True,
keywords='django libraries settings redis follow timeline'.split(),
platforms='any',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Utilities',
],
extras_require={
'redis': ['redis'],
'nydus': ['nydus'],
},
install_requires=['six'],
tests_require=['coverage', 'exam', 'celery', 'nydus'],
packages=find_packages(exclude=['tests']),
)
| mit | 5,351,943,238,205,519,000 | 31.414634 | 125 | 0.623777 | false |
our-city-app/oca-backend | src/solutions/common/integrations/cirklo/api.py | 1 | 13752 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import cloudstorage
import logging
from babel.dates import format_datetime
from datetime import datetime
from google.appengine.ext import ndb, deferred, db
from typing import List
from xlwt import Worksheet, Workbook, XFStyle
from mcfw.cache import invalidate_cache
from mcfw.consts import REST_TYPE_TO
from mcfw.exceptions import HttpBadRequestException, HttpForbiddenException, HttpNotFoundException
from mcfw.restapi import rest
from mcfw.rpc import returns, arguments
from rogerthat.bizz.gcs import get_serving_url
from rogerthat.bizz.service import re_index_map_only
from rogerthat.consts import FAST_QUEUE, HIGH_LOAD_WORKER_QUEUE
from rogerthat.models import ServiceIdentity
from rogerthat.models.settings import ServiceInfo
from rogerthat.rpc import users
from rogerthat.rpc.users import get_current_session
from rogerthat.utils import parse_date
from rogerthat.utils.cloud_tasks import schedule_tasks, create_task
from rogerthat.utils.service import create_service_identity_user
from shop.models import Customer
from solutions import translate
from solutions.common.bizz import SolutionModule, broadcast_updates_pending
from solutions.common.bizz.campaignmonitor import send_smart_email_without_check
from solutions.common.consts import OCA_FILES_BUCKET
from solutions.common.dal import get_solution_settings
from solutions.common.integrations.cirklo.cirklo import get_city_id_by_service_email, whitelist_merchant, \
list_whitelisted_merchants, list_cirklo_cities
from solutions.common.integrations.cirklo.models import CirkloCity, CirkloMerchant, SignupLanguageProperty, \
SignupMails, CirkloAppInfo
from solutions.common.integrations.cirklo.to import CirkloCityTO, CirkloVoucherListTO, CirkloVoucherServiceTO, \
WhitelistVoucherServiceTO
from solutions.common.restapi.services import _check_is_city
def _check_permission(city_sln_settings):
if SolutionModule.CIRKLO_VOUCHERS not in city_sln_settings.modules:
raise HttpForbiddenException()
if len(city_sln_settings.modules) != 1:
_check_is_city(city_sln_settings.service_user)
@rest('/common/vouchers/cities', 'get', silent_result=True)
@returns([dict])
@arguments(staging=bool)
def api_list_cirklo_cities(staging=False):
return list_cirklo_cities(staging)
@rest('/common/vouchers/services', 'get', silent_result=True)
@returns(CirkloVoucherListTO)
@arguments()
def get_cirklo_vouchers_services():
city_service_user = users.get_current_user()
city_sln_settings = get_solution_settings(city_service_user)
_check_permission(city_sln_settings)
to = CirkloVoucherListTO()
to.total = 0
to.results = []
to.cursor = None
to.more = False
cirklo_city = CirkloCity.get_by_service_email(city_service_user.email())
if not cirklo_city:
return to
cirklo_merchants = list_whitelisted_merchants(cirklo_city.city_id)
cirklo_dict = {}
cirklo_emails = []
for merchant in cirklo_merchants:
if merchant['email'] in cirklo_emails:
logging.error('Duplicate found %s', merchant['email'])
continue
cirklo_emails.append(merchant['email'])
cirklo_dict[merchant['email']] = merchant
qry = CirkloMerchant.list_by_city_id(cirklo_city.city_id) # type: List[CirkloMerchant]
osa_merchants = []
merchants_to_put = []
for merchant in qry:
if merchant.service_user_email:
osa_merchants.append(merchant)
else:
cirklo_merchant = cirklo_dict.get(merchant.data['company']['email'])
changed = merchant.populate_from_cirklo(cirklo_merchant)
if changed:
merchants_to_put.append(merchant)
if cirklo_merchant:
if merchant.data['company']['email'] in cirklo_emails:
cirklo_emails.remove(merchant.data['company']['email'])
whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None
to.results.append(
CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant.registered, u'Cirklo signup'))
if osa_merchants:
customer_to_get = [Customer.create_key(merchant.customer_id) for merchant in osa_merchants]
customers_dict = {customer.id: customer for customer in db.get(customer_to_get)}
info_keys = [ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT)
for merchant in osa_merchants]
models = ndb.get_multi(info_keys)
for service_info, merchant in zip(models, osa_merchants):
customer = customers_dict[merchant.customer_id]
if not customer.service_user:
merchant.key.delete()
continue
cirklo_merchant = cirklo_dict.get(customer.user_email)
changed = merchant.populate_from_cirklo(cirklo_merchant)
if changed:
merchants_to_put.append(merchant)
if cirklo_merchant:
if customer.user_email in cirklo_emails:
cirklo_emails.remove(customer.user_email)
whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None
service_to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant.registered, u'OSA signup')
service_to.populate_from_info(service_info, customer)
to.results.append(service_to)
if merchants_to_put:
logging.debug('Updating merchants: %s', merchants_to_put)
ndb.put_multi(merchants_to_put)
tasks = [create_task(re_index_map_only, create_service_identity_user(users.User(merchant.service_user_email)))
for merchant in merchants_to_put if merchant.service_user_email]
schedule_tasks(tasks, HIGH_LOAD_WORKER_QUEUE)
for email in cirklo_emails:
cirklo_merchant = cirklo_dict[email]
to.results.append(CirkloVoucherServiceTO.from_cirklo_info(cirklo_merchant))
return to
@rest('/common/vouchers/services/whitelist', 'put', type=REST_TYPE_TO)
@returns(CirkloVoucherServiceTO)
@arguments(data=WhitelistVoucherServiceTO)
def whitelist_voucher_service(data):
city_service_user = users.get_current_user()
city_sln_settings = get_solution_settings(city_service_user)
_check_permission(city_sln_settings)
cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) # type: CirkloCity
if not cirklo_city:
raise HttpNotFoundException('No cirklo settings found.')
is_cirklo_only_merchant = '@' not in data.id
if is_cirklo_only_merchant:
merchant = CirkloMerchant.create_key(long(data.id)).get() # type: CirkloMerchant
language = merchant.get_language()
else:
merchant = CirkloMerchant.create_key(data.id).get()
language = get_solution_settings(users.User(merchant.service_user_email)).main_language
if data.accepted:
email_id = cirklo_city.get_signup_accepted_mail(language)
if not email_id:
raise HttpBadRequestException(
'The "Signup accepted" email for the language %s is not configured yet' % language)
whitelist_merchant(cirklo_city.city_id, data.email)
else:
email_id = cirklo_city.get_signup_denied_mail(language)
if not email_id:
raise HttpBadRequestException(
'The "Signup denied" email for the language %s is not configured yet' % language)
deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1,
_queue=FAST_QUEUE)
whitelist_date = datetime.now().isoformat() + 'Z' if data.accepted else None
if not is_cirklo_only_merchant:
if data.accepted:
merchant.whitelisted = True
else:
merchant.denied = True
merchant.put()
service_info = ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT).get()
customer = Customer.get_by_id(merchant.customer_id) # type: Customer
if data.accepted:
service_identity_user = create_service_identity_user(customer.service_user)
deferred.defer(re_index_map_only, service_identity_user)
to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'OSA signup')
to.populate_from_info(service_info, customer)
return to
else:
if data.accepted:
merchant.whitelisted = True
else:
merchant.denied = True
merchant.put()
return CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'Cirklo signup')
@rest('/common/vouchers/cirklo', 'get')
@returns(CirkloCityTO)
@arguments()
def api_vouchers_get_cirklo_settings():
service_user = users.get_current_user()
city = CirkloCity.get_by_service_email(service_user.email())
return CirkloCityTO.from_model(city)
@rest('/common/vouchers/cirklo', 'put')
@returns(CirkloCityTO)
@arguments(data=CirkloCityTO)
def api_vouchers_save_cirklo_settings(data):
service_user = users.get_current_user()
if not get_current_session().shop:
lang = get_solution_settings(service_user).main_language
raise HttpForbiddenException(translate(lang, 'no_permission'))
other_city = CirkloCity.get_by_service_email(service_user.email()) # type: CirkloCity
if not data.city_id:
if other_city:
other_city.key.delete()
return CirkloCityTO.from_model(None)
key = CirkloCity.create_key(data.city_id)
city = key.get()
if not city:
city = CirkloCity(key=key, service_user_email=service_user.email())
elif city.service_user_email != service_user.email():
raise HttpBadRequestException('City id %s is already in use by another service' % data.city_id)
if other_city and other_city.key != key:
other_city.key.delete()
invalidate_cache(get_city_id_by_service_email, service_user.email())
city.logo_url = data.logo_url
city.signup_enabled = data.signup_enabled
city.signup_logo_url = data.signup_logo_url
city.signup_names = None
city.signup_mail = SignupMails.from_to(data.signup_mail)
if data.signup_name_nl and data.signup_name_fr:
city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl,
fr=data.signup_name_fr)
elif data.signup_name_nl:
city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl,
fr=data.signup_name_nl)
elif data.signup_name_fr:
city.signup_names = SignupLanguageProperty(nl=data.signup_name_fr,
fr=data.signup_name_fr)
og_info = city.app_info and city.app_info.to_dict()
info = CirkloAppInfo(enabled=data.app_info.enabled,
title=data.app_info.title,
buttons=data.app_info.buttons)
sln_settings = get_solution_settings(service_user)
if info.to_dict() != og_info and not sln_settings.ciklo_vouchers_only():
city.app_info = info
sln_settings.updates_pending = True
sln_settings.put()
broadcast_updates_pending(sln_settings)
city.put()
return CirkloCityTO.from_model(city)
@rest('/common/vouchers/cirklo/export', 'post')
@returns(dict)
@arguments()
def api_export_cirklo_services():
service_user = users.get_current_user()
city_sln_settings = get_solution_settings(service_user)
_check_permission(city_sln_settings)
all_services = get_cirklo_vouchers_services()
if all_services.cursor:
raise NotImplementedError()
book = Workbook(encoding='utf-8')
sheet = book.add_sheet('Cirklo') # type: Worksheet
language = city_sln_settings.main_language
sheet.write(0, 0, translate(language, 'reservation-name'))
sheet.write(0, 1, translate(language, 'Email'))
sheet.write(0, 2, translate(language, 'address'))
sheet.write(0, 3, translate(language, 'Phone number'))
sheet.write(0, 4, translate(language, 'created'))
sheet.write(0, 5, translate(language, 'merchant_registered'))
date_format = XFStyle()
date_format.num_format_str = 'dd/mm/yyyy'
row = 0
for service in all_services.results:
row += 1
sheet.write(row, 0, service.name)
sheet.write(row, 1, service.email)
sheet.write(row, 2, service.address)
sheet.write(row, 3, service.phone_number)
sheet.write(row, 4, parse_date(service.creation_date), date_format)
sheet.write(row, 5, translate(language, 'Yes') if service.merchant_registered else translate(language, 'No'))
date = format_datetime(datetime.now(), format='medium', locale='en_GB')
gcs_path = '/%s/tmp/cirklo/export-cirklo-%s.xls' % (OCA_FILES_BUCKET, date.replace(' ', '-'))
content_type = 'application/vnd.ms-excel'
with cloudstorage.open(gcs_path, 'w', content_type=content_type) as gcs_file:
book.save(gcs_file)
deferred.defer(cloudstorage.delete, gcs_path, _countdown=86400)
return {
'url': get_serving_url(gcs_path),
}
| apache-2.0 | -1,141,183,746,215,933,700 | 42.109718 | 120 | 0.686009 | false |
kirankaranth1/ShareIt | myproject/myapp/views.py | 1 | 2533 | # -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from myproject.myapp.forms import EmailForm
from myproject.myapp.models import Document
from myproject.myapp.forms import DocumentForm
from django.shortcuts import render, get_object_or_404, redirect
from django.core.mail import send_mail
def list(request):
# Handle file upload
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
newdoc = Document(docfile = request.FILES['docfile'])
newdoc.save()
url=newdoc.docfile.url
request.session['file_url'] = url
# Redirect to the document list after POST
return HttpResponseRedirect(reverse('myproject.myapp.views.email_url'))
else:
form = DocumentForm() # A empty, unbound form
# Load documents for the list page
documents = Document.objects.all()
# Render list page with the documents and the form
return render_to_response(
'myapp/list.html',
{'documents': documents, 'form': form},
context_instance=RequestContext(request)
)
def send_url(email,name,url):
#Need to put mail function here
#send_mail('Subject here', 'Here is the message.', '[email protected]',['[email protected]'], fail_silently=False)
print("Sharing %s with %s as %s" %(url,email,name))
def email_url(request):
file_url = request.session.get('file_url')
hostname = request.get_host()
file_url = str(hostname) + str(file_url)
eform = EmailForm(request.POST or None)
if eform.is_valid():
email = eform.cleaned_data["email"]
name = eform.cleaned_data["name"]
send_url(email,name,file_url)
request.session['recipentEmail'] = email
request.session['name'] = name
request.session['file_url'] = file_url
return HttpResponseRedirect(reverse('myproject.myapp.views.thank_you'))
context = { "eform": eform, "file_url":file_url,}
return render(request,"myapp/email_share.html",context)
def thank_you(request):
recipentEmail = request.session.get('recipentEmail')
recipentName = request.session.get('name')
file_url = request.session.get('file_url')
context = { "recipentName": recipentName,"recipentEmail": recipentEmail, "file_url":file_url}
return render(request,"myapp/thank_you.html",context) | gpl-2.0 | -4,222,012,508,288,221,000 | 37.393939 | 121 | 0.680616 | false |
rjfellman/molecule | molecule/driver/proxmoxdriver.py | 1 | 1362 | # Copyright (c) 2015-2016 Cisco Systems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from molecule.driver import basedriver
# Place holder for Proxmox, partially implemented
class ProxmoxDriver(basedriver.BaseDriver):
def __init__(self, molecule):
super(ProxmoxDriver, self).__init__()
self.molecule = molecule
| mit | -483,145,222,922,522,240 | 47.642857 | 80 | 0.759178 | false |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.5-py2.5.egg/sqlalchemy/util.py | 1 | 35617 | # util.py
# Copyright (C) 2005, 2006, 2007, 2008 Michael Bayer [email protected]
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import inspect, itertools, new, sets, sys, warnings, weakref
import __builtin__
types = __import__('types')
from sqlalchemy import exceptions
try:
import thread, threading
except ImportError:
import dummy_thread as thread
import dummy_threading as threading
try:
Set = set
set_types = set, sets.Set
except NameError:
set_types = sets.Set,
# layer some of __builtin__.set's binop behavior onto sets.Set
class Set(sets.Set):
def _binary_sanity_check(self, other):
pass
def issubset(self, iterable):
other = type(self)(iterable)
return sets.Set.issubset(self, other)
def __le__(self, other):
sets.Set._binary_sanity_check(self, other)
return sets.Set.__le__(self, other)
def issuperset(self, iterable):
other = type(self)(iterable)
return sets.Set.issuperset(self, other)
def __ge__(self, other):
sets.Set._binary_sanity_check(self, other)
return sets.Set.__ge__(self, other)
# lt and gt still require a BaseSet
def __lt__(self, other):
sets.Set._binary_sanity_check(self, other)
return sets.Set.__lt__(self, other)
def __gt__(self, other):
sets.Set._binary_sanity_check(self, other)
return sets.Set.__gt__(self, other)
def __ior__(self, other):
if not isinstance(other, sets.BaseSet):
return NotImplemented
return sets.Set.__ior__(self, other)
def __iand__(self, other):
if not isinstance(other, sets.BaseSet):
return NotImplemented
return sets.Set.__iand__(self, other)
def __ixor__(self, other):
if not isinstance(other, sets.BaseSet):
return NotImplemented
return sets.Set.__ixor__(self, other)
def __isub__(self, other):
if not isinstance(other, sets.BaseSet):
return NotImplemented
return sets.Set.__isub__(self, other)
try:
import cPickle as pickle
except ImportError:
import pickle
try:
reversed = __builtin__.reversed
except AttributeError:
def reversed(seq):
i = len(seq) -1
while i >= 0:
yield seq[i]
i -= 1
raise StopIteration()
try:
# Try the standard decimal for > 2.3 or the compatibility module
# for 2.3, if installed.
from decimal import Decimal
decimal_type = Decimal
except ImportError:
def Decimal(arg):
if Decimal.warn:
warn("True Decimal types not available on this Python, "
"falling back to floats.")
Decimal.warn = False
return float(arg)
Decimal.warn = True
decimal_type = float
try:
from operator import attrgetter
except:
def attrgetter(attribute):
return lambda value: getattr(value, attribute)
if sys.version_info >= (2, 5):
class PopulateDict(dict):
"""a dict which populates missing values via a creation function.
note the creation function takes a key, unlike collections.defaultdict.
"""
def __init__(self, creator):
self.creator = creator
def __missing__(self, key):
self[key] = val = self.creator(key)
return val
else:
class PopulateDict(dict):
"""a dict which populates missing values via a creation function."""
def __init__(self, creator):
self.creator = creator
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
self[key] = value = self.creator(key)
return value
try:
from collections import defaultdict
except ImportError:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.iteritems()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self))
try:
from collections import deque
except ImportError:
class deque(list):
def appendleft(self, x):
self.insert(0, x)
def extendleft(self, iterable):
self[0:0] = list(iterable)
def popleft(self):
return self.pop(0)
def rotate(self, n):
for i in xrange(n):
self.appendleft(self.pop())
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
def array_as_starargs_decorator(func):
"""Interpret a single positional array argument as
*args for the decorated method.
"""
def starargs_as_list(self, *args, **kwargs):
if len(args) == 1:
return func(self, *to_list(args[0], []), **kwargs)
else:
return func(self, *args, **kwargs)
return starargs_as_list
def to_set(x):
if x is None:
return Set()
if not isinstance(x, Set):
return Set(to_list(x))
else:
return x
def to_ascii(x):
"""Convert Unicode or a string with unknown encoding into ASCII."""
if isinstance(x, str):
return x.encode('string_escape')
elif isinstance(x, unicode):
return x.encode('unicode_escape')
else:
raise TypeError
def flatten_iterator(x):
"""Given an iterator of which further sub-elements may also be
iterators, flatten the sub-elements into a single iterator.
"""
for elem in x:
if hasattr(elem, '__iter__'):
for y in flatten_iterator(elem):
yield y
else:
yield elem
class ArgSingleton(type):
instances = weakref.WeakValueDictionary()
def dispose(cls):
for key in list(ArgSingleton.instances):
if key[0] is cls:
del ArgSingleton.instances[key]
dispose = staticmethod(dispose)
def __call__(self, *args):
hashkey = (self, args)
try:
return ArgSingleton.instances[hashkey]
except KeyError:
instance = type.__call__(self, *args)
ArgSingleton.instances[hashkey] = instance
return instance
def get_cls_kwargs(cls):
"""Return the full set of inherited kwargs for the given `cls`.
Probes a class's __init__ method, collecting all named arguments. If the
__init__ defines a **kwargs catch-all, then the constructor is presumed to
pass along unrecognized keywords to it's base classes, and the collection
process is repeated recursively on each of the bases.
"""
for c in cls.__mro__:
if '__init__' in c.__dict__:
stack = Set([c])
break
else:
return []
args = Set()
while stack:
class_ = stack.pop()
ctr = class_.__dict__.get('__init__', False)
if not ctr or not isinstance(ctr, types.FunctionType):
continue
names, _, has_kw, _ = inspect.getargspec(ctr)
args.update(names)
if has_kw:
stack.update(class_.__bases__)
args.discard('self')
return list(args)
def get_func_kwargs(func):
"""Return the full set of legal kwargs for the given `func`."""
return inspect.getargspec(func)[0]
def unbound_method_to_callable(func_or_cls):
"""Adjust the incoming callable such that a 'self' argument is not required."""
if isinstance(func_or_cls, types.MethodType) and not func_or_cls.im_self:
return func_or_cls.im_func
else:
return func_or_cls
# from paste.deploy.converters
def asbool(obj):
if isinstance(obj, (str, unicode)):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
return bool(obj)
def coerce_kw_type(kw, key, type_, flexi_bool=True):
"""If 'key' is present in dict 'kw', coerce its value to type 'type_' if
necessary. If 'flexi_bool' is True, the string '0' is considered false
when coercing to boolean.
"""
if key in kw and type(kw[key]) is not type_ and kw[key] is not None:
if type_ is bool and flexi_bool:
kw[key] = asbool(kw[key])
else:
kw[key] = type_(kw[key])
def duck_type_collection(specimen, default=None):
"""Given an instance or class, guess if it is or is acting as one of
the basic collection types: list, set and dict. If the __emulates__
property is present, return that preferentially.
"""
if hasattr(specimen, '__emulates__'):
# canonicalize set vs sets.Set to a standard: util.Set
if (specimen.__emulates__ is not None and
issubclass(specimen.__emulates__, set_types)):
return Set
else:
return specimen.__emulates__
isa = isinstance(specimen, type) and issubclass or isinstance
if isa(specimen, list): return list
if isa(specimen, set_types): return Set
if isa(specimen, dict): return dict
if hasattr(specimen, 'append'):
return list
elif hasattr(specimen, 'add'):
return Set
elif hasattr(specimen, 'set'):
return dict
else:
return default
def dictlike_iteritems(dictlike):
"""Return a (key, value) iterator for almost any dict-like object."""
if hasattr(dictlike, 'iteritems'):
return dictlike.iteritems()
elif hasattr(dictlike, 'items'):
return iter(dictlike.items())
getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None))
if getter is None:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
if hasattr(dictlike, 'iterkeys'):
def iterator():
for key in dictlike.iterkeys():
yield key, getter(key)
return iterator()
elif hasattr(dictlike, 'keys'):
return iter([(key, getter(key)) for key in dictlike.keys()])
else:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
def assert_arg_type(arg, argtype, name):
if isinstance(arg, argtype):
return arg
else:
if isinstance(argtype, tuple):
raise exceptions.ArgumentError("Argument '%s' is expected to be one of type %s, got '%s'" % (name, ' or '.join(["'%s'" % str(a) for a in argtype]), str(type(arg))))
else:
raise exceptions.ArgumentError("Argument '%s' is expected to be of type '%s', got '%s'" % (name, str(argtype), str(type(arg))))
def warn_exception(func, *args, **kwargs):
"""executes the given function, catches all exceptions and converts to a warning."""
try:
return func(*args, **kwargs)
except:
warn("%s('%s') ignored" % sys.exc_info()[0:2])
def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None,
name='self.proxy', from_instance=None):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = ('__slots__', '__del__', '__getattribute__',
'__metaclass__', '__getstate__', '__setstate__')
dunders = [m for m in dir(from_cls)
if (m.startswith('__') and m.endswith('__') and
not hasattr(into_cls, m) and m not in skip)]
for method in dunders:
try:
spec = inspect.getargspec(getattr(from_cls, method))
fn_args = inspect.formatargspec(spec[0])
d_args = inspect.formatargspec(spec[0][1:])
except TypeError:
fn_args = '(self, *args, **kw)'
d_args = '(*args, **kw)'
py = ("def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals())
env = from_instance is not None and {name: from_instance} or {}
exec py in env
setattr(into_cls, method, env[method])
class SimpleProperty(object):
"""A *default* property accessor."""
def __init__(self, key):
self.key = key
def __set__(self, obj, value):
setattr(obj, self.key, value)
def __delete__(self, obj):
delattr(obj, self.key)
def __get__(self, obj, owner):
if obj is None:
return self
else:
return getattr(obj, self.key)
class NotImplProperty(object):
"""a property that raises ``NotImplementedError``."""
def __init__(self, doc):
self.__doc__ = doc
def __set__(self, obj, value):
raise NotImplementedError()
def __delete__(self, obj):
raise NotImplementedError()
def __get__(self, obj, owner):
if obj is None:
return self
else:
raise NotImplementedError()
class OrderedProperties(object):
"""An object that maintains the order in which attributes are set upon it.
Also provides an iterator and a very basic getitem/setitem
interface to those attributes.
(Not really a dict, since it iterates over values, not keys. Not really
a list, either, since each value must have a key associated; hence there is
no append or extend.)
"""
def __init__(self):
self.__dict__['_data'] = OrderedDict()
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.itervalues()
def __add__(self, other):
return list(self) + list(other)
def __setitem__(self, key, object):
self._data[key] = object
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, object):
self._data[key] = object
def __getstate__(self):
return {'_data': self.__dict__['_data']}
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
def __getattr__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
return key in self._data
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def keys(self):
return self._data.keys()
def has_key(self, key):
return self._data.has_key(key)
def clear(self):
self._data.clear()
class OrderedDict(dict):
"""A Dictionary that returns keys/values/items in the order they were added."""
def __init__(self, ____sequence=None, **kwargs):
self._list = []
if ____sequence is None:
if kwargs:
self.update(**kwargs)
else:
self.update(____sequence, **kwargs)
def clear(self):
self._list = []
dict.clear(self)
def update(self, ____sequence=None, **kwargs):
if ____sequence is not None:
if hasattr(____sequence, 'keys'):
for key in ____sequence.keys():
self.__setitem__(key, ____sequence[key])
else:
for key, value in ____sequence:
self[key] = value
if kwargs:
self.update(kwargs)
def setdefault(self, key, value):
if key not in self:
self.__setitem__(key, value)
return value
else:
return self.__getitem__(key)
def __iter__(self):
return iter(self._list)
def values(self):
return [self[key] for key in self._list]
def itervalues(self):
return iter(self.values())
def keys(self):
return list(self._list)
def iterkeys(self):
return iter(self.keys())
def items(self):
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
return iter(self.items())
def __setitem__(self, key, object):
if key not in self:
self._list.append(key)
dict.__setitem__(self, key, object)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._list.remove(key)
def pop(self, key):
value = dict.pop(self, key)
self._list.remove(key)
return value
def popitem(self):
item = dict.popitem(self)
self._list.remove(item[0])
return item
try:
from threading import local as ThreadLocal
except ImportError:
try:
from dummy_threading import local as ThreadLocal
except ImportError:
class ThreadLocal(object):
"""An object in which attribute access occurs only within the context of the current thread."""
def __init__(self):
self.__dict__['_tdict'] = {}
def __delattr__(self, key):
try:
del self._tdict[(thread.get_ident(), key)]
except KeyError:
raise AttributeError(key)
def __getattr__(self, key):
try:
return self._tdict[(thread.get_ident(), key)]
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
self._tdict[(thread.get_ident(), key)] = value
class OrderedSet(Set):
def __init__(self, d=None):
Set.__init__(self)
self._list = []
if d is not None:
self.update(d)
def add(self, key):
if key not in self:
self._list.append(key)
Set.add(self, key)
def remove(self, element):
Set.remove(self, element)
self._list.remove(element)
def discard(self, element):
try:
Set.remove(self, element)
except KeyError:
pass
else:
self._list.remove(element)
def clear(self):
Set.clear(self)
self._list = []
def __getitem__(self, key):
return self._list[key]
def __iter__(self):
return iter(self._list)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._list)
__str__ = __repr__
def update(self, iterable):
add = self.add
for i in iterable:
add(i)
return self
__ior__ = update
def union(self, other):
result = self.__class__(self)
result.update(other)
return result
__or__ = union
def intersection(self, other):
other = Set(other)
return self.__class__([a for a in self if a in other])
__and__ = intersection
def symmetric_difference(self, other):
other = Set(other)
result = self.__class__([a for a in self if a not in other])
result.update([a for a in other if a not in self])
return result
__xor__ = symmetric_difference
def difference(self, other):
other = Set(other)
return self.__class__([a for a in self if a not in other])
__sub__ = difference
def intersection_update(self, other):
other = Set(other)
Set.intersection_update(self, other)
self._list = [ a for a in self._list if a in other]
return self
__iand__ = intersection_update
def symmetric_difference_update(self, other):
Set.symmetric_difference_update(self, other)
self._list = [ a for a in self._list if a in self]
self._list += [ a for a in other._list if a in self]
return self
__ixor__ = symmetric_difference_update
def difference_update(self, other):
Set.difference_update(self, other)
self._list = [ a for a in self._list if a in self]
return self
__isub__ = difference_update
if hasattr(Set, '__getstate__'):
def __getstate__(self):
base = Set.__getstate__(self)
return base, self._list
def __setstate__(self, state):
Set.__setstate__(self, state[0])
self._list = state[1]
class IdentitySet(object):
"""A set that considers only object id() for uniqueness.
This strategy has edge cases for builtin types- it's possible to have
two 'foo' strings in one of these sets, for example. Use sparingly.
"""
_working_set = Set
def __init__(self, iterable=None):
self._members = _IterableUpdatableDict()
if iterable:
for o in iterable:
self.add(o)
def add(self, value):
self._members[id(value)] = value
def __contains__(self, value):
return id(value) in self._members
def remove(self, value):
del self._members[id(value)]
def discard(self, value):
try:
self.remove(value)
except KeyError:
pass
def pop(self):
try:
pair = self._members.popitem()
return pair[1]
except KeyError:
raise KeyError('pop from an empty set')
def clear(self):
self._members.clear()
def __cmp__(self, other):
raise TypeError('cannot compare sets using cmp()')
def __eq__(self, other):
if isinstance(other, IdentitySet):
return self._members == other._members
else:
return False
def __ne__(self, other):
if isinstance(other, IdentitySet):
return self._members != other._members
else:
return True
def issubset(self, iterable):
other = type(self)(iterable)
if len(self) > len(other):
return False
for m in itertools.ifilterfalse(other._members.has_key,
self._members.iterkeys()):
return False
return True
def __le__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issubset(other)
def __lt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) < len(other) and self.issubset(other)
def issuperset(self, iterable):
other = type(self)(iterable)
if len(self) < len(other):
return False
for m in itertools.ifilterfalse(self._members.has_key,
other._members.iterkeys()):
return False
return True
def __ge__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issuperset(other)
def __gt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) > len(other) and self.issuperset(other)
def union(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._members.iteritems()).union(_iter_id(iterable)))
return result
def __or__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.union(other)
def update(self, iterable):
self._members = self.union(iterable)._members
def __ior__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.update(other)
return self
def difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._members.iteritems()).difference(_iter_id(iterable)))
return result
def __sub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.difference(other)
def difference_update(self, iterable):
self._members = self.difference(iterable)._members
def __isub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.difference_update(other)
return self
def intersection(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._members.iteritems()).intersection(_iter_id(iterable)))
return result
def __and__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.intersection(other)
def intersection_update(self, iterable):
self._members = self.intersection(iterable)._members
def __iand__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.intersection_update(other)
return self
def symmetric_difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._members.iteritems()).symmetric_difference(_iter_id(iterable)))
return result
def __xor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference_update(self, iterable):
self._members = self.symmetric_difference(iterable)._members
def __ixor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.symmetric_difference(other)
return self
def copy(self):
return type(self)(self._members.itervalues())
__copy__ = copy
def __len__(self):
return len(self._members)
def __iter__(self):
return self._members.itervalues()
def __hash__(self):
raise TypeError('set objects are unhashable')
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self._members.values())
if sys.version_info >= (2, 4):
_IterableUpdatableDict = dict
else:
class _IterableUpdatableDict(dict):
"""A dict that can update(iterable) like Python 2.4+'s dict."""
def update(self, __iterable=None, **kw):
if __iterable is not None:
if not isinstance(__iterable, dict):
__iterable = dict(__iterable)
dict.update(self, __iterable)
if kw:
dict.update(self, **kw)
def _iter_id(iterable):
"""Generator: ((id(o), o) for o in iterable)."""
for item in iterable:
yield id(item), item
class OrderedIdentitySet(IdentitySet):
class _working_set(OrderedSet):
# a testing pragma: exempt the OIDS working set from the test suite's
# "never call the user's __hash__" assertions. this is a big hammer,
# but it's safe here: IDS operates on (id, instance) tuples in the
# working set.
__sa_hash_exempt__ = True
def __init__(self, iterable=None):
IdentitySet.__init__(self)
self._members = OrderedDict()
if iterable:
for o in iterable:
self.add(o)
class UniqueAppender(object):
"""Only adds items to a collection once.
Additional appends() of the same object are ignored. Membership is
determined by identity (``is a``) not equality (``==``).
"""
def __init__(self, data, via=None):
self.data = data
self._unique = IdentitySet()
if via:
self._data_appender = getattr(data, via)
elif hasattr(data, 'append'):
self._data_appender = data.append
elif hasattr(data, 'add'):
# TODO: we think its a set here. bypass unneeded uniquing logic ?
self._data_appender = data.add
def append(self, item):
if item not in self._unique:
self._data_appender(item)
self._unique.add(item)
def __iter__(self):
return iter(self.data)
class ScopedRegistry(object):
"""A Registry that can store one or multiple instances of a single
class on a per-thread scoped basis, or on a customized scope.
createfunc
a callable that returns a new object to be placed in the registry
scopefunc
a callable that will return a key to store/retrieve an object,
defaults to ``thread.get_ident`` for thread-local objects. Use
a value like ``lambda: True`` for application scope.
"""
def __init__(self, createfunc, scopefunc=None):
self.createfunc = createfunc
if scopefunc is None:
self.scopefunc = thread.get_ident
else:
self.scopefunc = scopefunc
self.registry = {}
def __call__(self):
key = self._get_key()
try:
return self.registry[key]
except KeyError:
return self.registry.setdefault(key, self.createfunc())
def has(self):
return self._get_key() in self.registry
def set(self, obj):
self.registry[self._get_key()] = obj
def clear(self):
try:
del self.registry[self._get_key()]
except KeyError:
pass
def _get_key(self):
return self.scopefunc()
class _symbol(object):
def __init__(self, name):
"""Construct a new named symbol."""
assert isinstance(name, str)
self.name = name
def __reduce__(self):
return symbol, (self.name,)
def __repr__(self):
return "<symbol '%s>" % self.name
_symbol.__name__ = 'symbol'
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
"""
symbols = {}
_lock = threading.Lock()
def __new__(cls, name):
cls._lock.acquire()
try:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name)
return sym
finally:
symbol._lock.release()
def function_named(fn, name):
"""Return a function with a given __name__.
Will assign to __name__ and return the original function if possible on
the Python implementation, otherwise a new function will be constructed.
"""
try:
fn.__name__ = name
except TypeError:
fn = new.function(fn.func_code, fn.func_globals, name,
fn.func_defaults, fn.func_closure)
return fn
def conditional_cache_decorator(func):
"""apply conditional caching to the return value of a function."""
return cache_decorator(func, conditional=True)
def cache_decorator(func, conditional=False):
"""apply caching to the return value of a function."""
name = '_cached_' + func.__name__
def do_with_cache(self, *args, **kwargs):
if conditional:
cache = kwargs.pop('cache', False)
if not cache:
return func(self, *args, **kwargs)
try:
return getattr(self, name)
except AttributeError:
value = func(self, *args, **kwargs)
setattr(self, name, value)
return value
return do_with_cache
def reset_cached(instance, name):
try:
delattr(instance, '_cached_' + name)
except AttributeError:
pass
def warn(msg):
if isinstance(msg, basestring):
warnings.warn(msg, exceptions.SAWarning, stacklevel=3)
else:
warnings.warn(msg, stacklevel=3)
def warn_deprecated(msg):
warnings.warn(msg, exceptions.SADeprecationWarning, stacklevel=3)
def deprecated(message=None, add_deprecation_to_docstring=True):
"""Decorates a function and issues a deprecation warning on use.
message
If provided, issue message in the warning. A sensible default
is used if not provided.
add_deprecation_to_docstring
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = message is not None and message or 'Deprecated.'
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn, exceptions.SADeprecationWarning,
message % dict(func=fn.__name__), header)
return decorate
def pending_deprecation(version, message=None,
add_deprecation_to_docstring=True):
"""Decorates a function and issues a pending deprecation warning on use.
version
An approximate future version at which point the pending deprecation
will become deprecated. Not used in messaging.
message
If provided, issue message in the warning. A sensible default
is used if not provided.
add_deprecation_to_docstring
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = message is not None and message or 'Deprecated.'
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn, exceptions.SAPendingDeprecationWarning,
message % dict(func=fn.__name__), header)
return decorate
def _decorate_with_warning(func, wtype, message, docstring_header=None):
"""Wrap a function with a warnings.warn and augmented docstring."""
def func_with_warning(*args, **kwargs):
warnings.warn(wtype(message), stacklevel=2)
return func(*args, **kwargs)
doc = func.__doc__ is not None and func.__doc__ or ''
if docstring_header is not None:
doc = '\n'.join((docstring_header.rstrip(), doc))
func_with_warning.__doc__ = doc
func_with_warning.__dict__.update(func.__dict__)
return function_named(func_with_warning, func.__name__)
| bsd-3-clause | -2,789,202,377,800,578,600 | 29.05654 | 176 | 0.574613 | false |
fernandoacorreia/DjangoWAWSLogging | DjangoWAWSLogging/DjangoWAWSLogging/wsgi.py | 1 | 1156 | """
WSGI config for DjangoWAWSLogging project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoWAWSLogging.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit | -1,895,272,311,485,901,600 | 40.285714 | 79 | 0.803633 | false |
dj95/telegram-autokick | autokick.py | 1 | 3167 | #!/bin/env python3
#
# Telegram Auto Kick
#
# © 2015 Daniel Jankowski
import subprocess
import os
import re
import json
import sqlite3
import db
from threading import Thread,Event
DATABASE = '/home/neo/Projekte/Python/telegram-autokick/banned_usernames.db'
GROUP_NAME = 'Linux_statt_Windows'
class kicker(Thread):
def __init__(self):
super().__init__()
self.stop_event = Event()
self.__db = db.db_handler(DATABASE)
self.__db.create_table()
self.__db.close_database()
def add_username(self, username):
self.__db.add_user(username)
def remove_shit(self, data):
data = re.sub('\r', '', data)
data = re.sub('\x1b\[K', '', data)
data = re.sub('>\s*', '', data)
data = re.sub('All done\. Exit\n', '', data)
data = re.sub('halt\n*', '', data)
data = re.sub('\n*$', '', data)
return json.loads(data)
def run(self):
while not self.stop_event.is_set():
# get data from telegram-cli
cmd = ['telegram-cli','-b','-W','-D','--json','-e chat_info ' + GROUP_NAME]
s = subprocess.Popen(cmd, stdout=subprocess.PIPE)
data = s.communicate()[0].decode('utf-8')
data = self.remove_shit(data)
# processing data
members = data['members']
self.__db = db.db_handler(DATABASE)
banned_users = self.__db.get_banned_usernames()
self.__db.close_database()
banned_usernames = []
banned_ids = []
for user in banned_users:
banned_usernames.append(user[0])
banned_ids.append(user[1])
for member in members:
if 'username' in member:
if member['username'] in banned_usernames:
if member['id'] not in banned_ids:
self.__db = db.db_handler(DATABASE)
self.__db.add_user_id(member['id'], member['username'])
self.__db.close_database()
if 'print_name' in member:
if member['print_name'] in banned_usernames:
if member['id'] not in banned_ids:
self.__db = db.db_handler(DATABASE)
self.__db.add_user_id(member['id'], member['username'])
self.__db.close_database()
if member['id'] in banned_ids:
cmd = ['telegram-cli','-b','-W','-D','--json','-e chat_del_user ' + GROUP_NAME + ' ' + member['print_name']]
s = subprocess.Popen(cmd, stdout=subprocess.PIPE)
data = s.communicate()[0].decode('utf-8')
data = self.remove_shit(data)
self.stop_event.wait(1.0)
def stop(self):
self.stop_event.set()
def main():
print('Telegram Auto Kick')
bot = kicker()
bot.start()
inp = ''
while inp != "exit":
inp = input()
bot.stop()
bot.join()
return
if __name__ == '__main__':
main()
| lgpl-3.0 | -4,750,373,982,801,555,000 | 30.66 | 128 | 0.49463 | false |
joelmpiper/bill_taxonomy | src/report/make_roc_curve.py | 1 | 1830 | from sklearn.cross_validation import train_test_split
from sklearn import metrics
import matplotlib.pyplot as plt
import pickle
from src.utils.get_time_stamp import get_time_stamp
from sklearn.grid_search import GridSearchCV
def make_roc_curve(pipeline, X, y, train_frac, subject, cfg):
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=train_frac,
random_state=1,
stratify=y)
grid_search = GridSearchCV(pipeline, {}, scoring=cfg['scoring'],
verbose=10)
grid_search.fit(X_train, y_train)
y_pred_class = grid_search.predict(X_test)
y_pred_prob = grid_search.predict_proba(X_test)[:, 1]
acc_score = metrics.accuracy_score(y_test, y_pred_class)
print(acc_score)
conf_mat = metrics.confusion_matrix(y_test, y_pred_class)
print(conf_mat)
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred_prob)
roc_auc = metrics.auc(fpr, tpr)
# method I: plt
plt.title(subject + '\nReceiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
fig_dir = cfg['fig_dir']
plt.savefig(fig_dir + '/roc_curve_' + subject.lower() +
'_' + get_time_stamp() + '.png')
results_save = (grid_search, X_test, y_test, acc_score, conf_mat,
y_pred_class, y_pred_prob)
pickle.dump(results_save, open(fig_dir + '/split_data_' + subject.lower() +
'_' + get_time_stamp() + '.p', 'wb'))
| mit | -2,897,342,823,128,817,000 | 42.571429 | 79 | 0.562295 | false |
sfcta/CountDracula | scripts/updateCountsWorkbooks.py | 1 | 9336 | """
Created on Jul 25, 2011
@author: lmz
This script reads counts data from input Excel workbooks and inserts the info into the CountDracula dataabase.
"""
import getopt, logging, os, re, shutil, sys, time, traceback, xlrd, xlwt
libdir = os.path.realpath(os.path.join(os.path.split(__file__)[0], "..", "geodjango"))
sys.path.append(libdir)
os.environ['DJANGO_SETTINGS_MODULE'] = 'geodjango.settings'
from django.core.management import setup_environ
from geodjango import settings
from django.contrib.auth.models import User
import countdracula.models
from countdracula.parsers.CountsWorkbookParser import CountsWorkbookParser
USAGE = """
python updateCountsWorkbooks.py v1.0_toprocess_dir v1.0_outdated_dir v1.1_new_dir
"""
DATE_REGEX = re.compile(r"(\d\d\d\d)\.(\d{1,2})\.(\d{1,2})")
MAINLINE_NODES = re.compile(r"(\d{5,6}) (\d{5,6})")
CALIBRI_10PT = xlwt.easyxf('font: name Calibri, height 200;')
CALIBRI_10PT_RED = xlwt.easyxf('font: name Calibri, height 200, color-index red;')
CALIBRI_10PT_ORANGE_CENTER = xlwt.easyxf('font: name Calibri, height 200; pattern: pattern solid, fore_color 0x33; alignment: horz center;')
CALIBRI_10PT_LIME_CENTER = xlwt.easyxf('font: name Calibri, height 200; pattern: pattern solid, fore_color 0x32; alignment: horz center;')
def copysheet(rb, r_sheet, wb):
w_sheet = wb.add_sheet(r_sheet.name)
for rownum in range(r_sheet.nrows):
for colnum in range(r_sheet.ncols):
w_sheet.write(rownum, colnum, r_sheet.cell_value(rownum,colnum), CALIBRI_10PT_RED)
def isRowEmpty(r_sheet, r_rownum):
"""
Is the row empty? (aside for the first column)
"""
for colnum in range(1,r_sheet.ncols):
# logger.debug("cell_type=%d cell_value=[%s]" % (r_sheet.cell_type(r_rownum,colnum), str(r_sheet.cell_value(r_rownum,colnum))))
if r_sheet.cell_type(r_rownum,colnum) in [xlrd.XL_CELL_BLANK,xlrd.XL_CELL_EMPTY]:
continue
if r_sheet.cell_value(r_rownum,colnum) != "":
# found something!
return False
return True # didn't find anything
def isColumnZeros(r_sheet, colnum):
"""
Starts at row 2. Breaks on empty row.
"""
for r_rownum in range(2,r_sheet.nrows):
if r_sheet.cell_type(r_rownum,colnum) in [xlrd.XL_CELL_BLANK,xlrd.XL_CELL_EMPTY]: break
elif r_sheet.cell_type(r_rownum,colnum) in [xlrd.XL_CELL_NUMBER]:
if float(r_sheet.cell_value(r_rownum,colnum)) > 0.0: return False
else:
raise Exception("Didn't understand cell value at (%d,%d)" % (r_rownum, colnum))
return True
def updateWorkbook(logger, DIR_TOPROCESS, DIR_OLDV10, DIR_NEWV11, file, mainline_or_turns):
"""
Converts a v1.0 workbook to a v1.1 workbook. For anything unexpected, logs and error and returns.
For success only, the new workbook will be placed in *DIR_NEWV11* and the old one will be placed in *DIR_OLDV10*.
"""
assert(mainline_or_turns in ["MAINLINE","TURNS"])
rb = xlrd.open_workbook(os.path.join(DIR_TOPROCESS, file), formatting_info=True)
wb = xlwt.Workbook(encoding='utf-8')
# go through the sheets
for sheet_idx in range(rb.nsheets):
r_sheet = rb.sheet_by_index(sheet_idx)
sheet_name = r_sheet.name
logger.info(" Reading sheet [%s]" % sheet_name)\
# just copy the source sheet
if sheet_name == "source":
copysheet(rb, r_sheet, wb)
continue
match_obj = re.match(DATE_REGEX, sheet_name)
if match_obj.group(0) != sheet_name:
logger.error("Sheetname [%s] is not the standard date format! Skipping this workbook." % sheet_name)
return
w_sheet = wb.add_sheet(sheet_name)
# check what we're copying over
for colnum in range(r_sheet.ncols):
if mainline_or_turns == "MAINLINE":
# nodes ok
if r_sheet.cell_type(1,colnum) == xlrd.XL_CELL_TEXT and re.match(MAINLINE_NODES, str(r_sheet.cell_value(1,colnum))) != None:
continue
if r_sheet.cell_value(1,colnum) not in [1.0, 2.0, ""]:
logger.warn("Unexpected MAINLINE row 1 cell value = [%s]! Skipping this workbook." % r_sheet.cell_value(1,colnum))
return
if mainline_or_turns == "TURNS" and colnum==0 and r_sheet.cell_value(1,colnum) not in [3.0, 4.0, ""]:
logger.warn("Unexpected TURNS row 1 cell value = [%s]! Skipping this workbook." % r_sheet.cell_value(1,colnum))
return
# copy first line down; make sure its MAINLINE|TURNS, [dir1], [dir2], ...
for colnum in range(r_sheet.ncols):
if colnum == 0 and r_sheet.cell_value(0, colnum) != mainline_or_turns:
logger.warn("Unexpected row 0 cell value = [%s]! Skipping this workbook." % r_sheet.cell_value(0,colnum))
return
if mainline_or_turns == "MAINLINE" and colnum > 0 and r_sheet.cell_value(0,colnum) not in ["NB","SB","EB","WB", ""]:
logger.warn("Unexpected mainline row 0 cell value = [%s]! Skipping this workbook." % r_sheet.cell_value(0,colnum))
return
if mainline_or_turns == "TURNS" and colnum > 0 and r_sheet.cell_value(0,colnum) not in ["NBLT", "NBRT", "NBTH",
"SBLT", "SBRT", "SBTH",
"EBLT", "EBRT", "EBTH",
"WBLT", "WBRT", "WBTH"]:
logger.warn("Unexpected turns row 0 cell value = [%s]! Skipping this workbook." % r_sheet.cell_value(0,colnum))
return
w_sheet.write(1, colnum, r_sheet.cell_value(0,colnum), CALIBRI_10PT_ORANGE_CENTER)
if colnum != 0: w_sheet.write(0, colnum, "")
w_sheet.write(0,0, "All", CALIBRI_10PT_LIME_CENTER)
# mainline - copy over non-empty rows
if mainline_or_turns == "MAINLINE":
w_rownum = 2
for r_rownum in range(2,r_sheet.nrows):
# don't copy the empty rows
if isRowEmpty(r_sheet, r_rownum): continue
# copy this row
for colnum in range(r_sheet.ncols):
w_sheet.write(w_rownum, colnum, r_sheet.cell_value(r_rownum,colnum), CALIBRI_10PT)
w_rownum += 1
# turns - error non-zero columns
else:
# look for zero columns and abort if found
for colnum in range(1,r_sheet.ncols):
if isColumnZeros(r_sheet, colnum):
logger.warn("Zero column found! Skipping this workbook.")
return
# copy over everything
for r_rownum in range(2,r_sheet.nrows):
for colnum in range(r_sheet.ncols):
w_sheet.write(r_rownum, colnum, r_sheet.cell_value(r_rownum,colnum), CALIBRI_10PT)
if os.path.exists(os.path.join(DIR_NEWV11, file)):
logger.warn("File %s already exists! Skipping." % os.path.join(DIR_NEWV11, file))
return
wb.default_style.font.height = 20*10
wb.save(os.path.join(DIR_NEWV11, file))
# move the old one to the deprecated dir
shutil.move(os.path.join(DIR_TOPROCESS,file),
os.path.join(DIR_OLDV10,file))
if __name__ == '__main__':
optlist, args = getopt.getopt(sys.argv[1:], '')
if len(args) < 2:
print USAGE
sys.exit(2)
if len(args) != 3:
print USAGE
sys.exit(2)
DIR_TOPROCESS = args[0]
DIR_OLDV10 = args[1]
DIR_NEWV11 = args[2]
logger = logging.getLogger('countdracula')
logger.setLevel(logging.DEBUG)
consolehandler = logging.StreamHandler()
consolehandler.setLevel(logging.DEBUG)
consolehandler.setFormatter(logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s'))
logger.addHandler(consolehandler)
debugFilename = "updateCountsWorkbooks.DEBUG.log"
debugloghandler = logging.StreamHandler(open(debugFilename, 'w'))
debugloghandler.setLevel(logging.DEBUG)
debugloghandler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%Y-%m-%d %H:%M'))
logger.addHandler(debugloghandler)
files_to_process = sorted(os.listdir(DIR_TOPROCESS))
for file in files_to_process:
if file[-4:] !='.xls':
print "File suffix is not .xls: %s -- skipping" % file[-4:]
continue
logger.info("")
logger.info("Processing file %s" % file)
streetlist = CountsWorkbookParser.parseFilename(file)
# mainline
if len(streetlist) in [2,3]:
updateWorkbook(logger, DIR_TOPROCESS, DIR_OLDV10, DIR_NEWV11, file, "MAINLINE" if len(streetlist)==3 else "TURNS")
else:
logger.info(" Invalid workbook name %s" % file)
| gpl-3.0 | -5,795,683,601,665,664,000 | 41.244344 | 140 | 0.576478 | false |
pombredanne/django-rest-framework-fine-permissions | rest_framework_fine_permissions/fields.py | 1 | 1121 | # -*- coding: utf-8 -*-
"""
"""
import logging
import six
import collections
from django.db import models
from rest_framework import serializers
from rest_framework.fields import Field, empty
from .utils import get_serializer
logger = logging.getLogger(__name__)
class ModelPermissionsField(Field):
""" Field that acts as a ModelPermissionsSerializer for relations. """
def __init__(self, serializer, **kwargs):
self.serializer = get_serializer(serializer)
super(ModelPermissionsField, self).__init__(**kwargs)
def to_representation(self, obj):
""" Represent data for the field. """
many = isinstance(obj, collections.Iterable) \
or isinstance(obj, models.Manager) \
and not isinstance(obj, dict)
serializer_cls = get_serializer(self.serializer)
assert serializer_cls is not None \
and issubclass(serializer_cls, serializers.ModelSerializer), (
"Bad serializer defined %s" % serializer_cls
)
ser = self.serializer(obj, context=self.context, many=many)
return ser.data
| gpl-2.0 | -2,109,886,766,022,636,800 | 26.341463 | 74 | 0.659233 | false |
ucdavis-bioinformatics/proc10xG | profile_mapping.py | 1 | 10092 | #!/usr/bin/env python
"""
Copyright 2017 Matt Settles
Created June 8, 2017
"""
from optparse import OptionParser
from collectons import Counter
import os
import sys
import time
import traceback
import signal
from subprocess import Popen
from subprocess import PIPE
# Handle PE:
# logic: 0x1 = multiple segments in sequencing, 0x4 = segment unmapped, 0x8 = next segment unmapped
if (flag & 0x1): # PE READ
if (not (flag & 0x4) and not (flag & 0x8)): # both pairs mapped
if (flag & 0x40): # is this PE1 (first segment in template)
# PE1 read, check that PE2 is in dict
ID = line2[0]
if ID in PE2:
if mq >= self.minMQ and int(PE2[ID].strip().split()[4]) >= self.minMQ: # check MQ of both reads
self.ok_bc_lines.append(line)
self.ok_bc_lines.append(PE2[ID])
del PE2[ID]
# TODO: NEED to determine read cloud for read
mapped_pairs_count += 1
else:
if (flag & 0x10): # reverse complement
line2[9] = reverseComplement(line2[9])
line2[10] = reverse(line2[10])
r1 = '\n'.join(['@' + line2[0] + ' 1:N:O', line2[9], '+', line2[10]]) # sequence + qual
rl2 = PE2[ID].strip().split()
if (int(rl2[1]) & 0x10): # reverse complement
rl2[9] = reverseComplement(rl2[9])
rl2[10] = reverse(rl2[10])
r2 = '\n'.join(['@' + rl2[0] + ' 2:N:O', rl2[9], '+', rl2[10]]) # sequence + qual
self.addRead('\n'.join([r1, r2]))
del PE2[ID]
remapped_pairs_count += 1
else:
PE1[ID] = line
elif (flag & 0x80): # is this PE2 (last segment in template)
# PE2 read, check that PE1 is in dict and write out
ID = line2[0]
if ID in PE1:
if mq >= self.minMQ and int(PE1[ID].strip().split()[4]) >= self.minMQ: # check MQ of both reads
self.ok_bc_lines.append(line)
self.ok_bc_lines.append(PE1[ID])
del PE1[ID]
# TODO: NEED to determine read cloud for read
mapped_pairs_count += 1
else:
if (flag & 0x10): # reverse complement
line2[9] = reverseComplement(line2[9])
line2[10] = reverse(line2[10])
r2 = '\n'.join(['@' + line2[0] + ' 2:N:O', line2[9], '+', line2[10]]) # sequence + qual
rl1 = PE1[ID].strip().split()
if (int(rl1[1]) & 0x10): # reverse complement
rl1[9] = reverseComplement(rl1[9])
rl1[10] = reverse(rl1[10])
r1 = '\n'.join(['@' + rl1[0] + ' 1:N:O', rl1[9], '+', rl1[10]]) # sequence + qual
self.addRead('\n'.join([r1, r2]))
del PE1[ID]
remapped_pairs_count += 1
else:
PE2[ID] = line
else: # an 'unmapped' pair, at least 1 unmapped
if (flag & 0x40): # is this PE1 (first segment in template)
# PE1 read, check that PE2 is in dict and write out
ID = line2[0]
if ID in PE2:
if (flag & 0x10): # reverse complement
line2[9] = reverseComplement(line2[9])
line2[10] = reverse(line2[10])
r1 = '\n'.join(['@' + line2[0] + ' 1:N:O', line2[9], '+', line2[10]]) # sequence + qual
rl2 = PE2[ID].strip().split()
if (int(rl2[1]) & 0x10): # reverse complement
rl2[9] = reverseComplement(rl2[9])
rl2[10] = reverse(rl2[10])
r2 = '\n'.join(['@' + rl2[0] + ' 2:N:O', rl2[9], '+', rl2[10]]) # sequence + qual
self.addRead('\n'.join([r1, r2]))
del PE2[ID]
remapped_pairs_count += 1
else:
PE1[ID] = line
elif (flag & 0x80): # is this PE2 (last segment in template)
# PE2 read, check that PE1 is in dict and write out
ID = line2[0]
if ID in PE1:
if (flag & 0x10): # reverse complement
line2[9] = reverseComplement(line2[9])
line2[10] = reverse(line2[10])
r1 = '\n'.join(['@' + line2[0] + ' 1:N:O', line2[9], '+', line2[10]]) # sequence + qual
rl2 = PE2[ID].strip().split()
if (int(rl2[1]) & 0x10): # reverse complement
rl2[9] = reverseComplement(rl2[9])
rl2[10] = reverse(rl2[10])
r2 = '\n'.join(['@' + rl2[0] + ' 2:N:O', rl2[9], '+', rl2[10]]) # sequence + qual
self.addRead('\n'.join([r1, r2]))
del PE2[ID]
remapped_pairs_count += 1
else:
PE2[ID] = line
def main(insam, output_all, verbose):
global file_path
refDict = {}
bcDict = {}
line_count = 0
bc_count = 0
for line in insam:
# Comment/header lines start with @
if line[0] == "@":
# pass header directly to output
if line[0:3] == "@SQ":
# reference sequence id
sp = line.split()
refDict[sp[1][3:]] = int(sp[2][3:])
elif line[0] != "@" and len(line.strip().split()) > 2:
line_count += 1
bc = line.split(":")[0]
# instead check the ST:Z:GOOD for GOOD or MATCH or MISMATCH1
if line.split()[15][5:] not in ['GOOD', 'MATCH', 'MISMATCH1']:
# if seqToHash(bc) not in gbcDict:
# barcode does not match whitelist
if output_all:
# if output_all pass line directly to output
outsam.write(line)
elif bc == current_bc:
# add line to bc processing
proc_bc.addLine(line)
current_bc_count += 1
elif current_bc is None:
current_bc = bc
# add line to bc processing
proc_bc.addLine(line)
current_bc_count += 1
else:
# this is a new barcode
# can add a check to see if seen bc before, which is a no-no
# process the bc
proc_bc.process()
# record having processed the barcode
# output to sam file
bc_count += 1
proc_bc.clearbc()
current_bc = bc
# add line to bc processing
current_bc_count = 1
proc_bc.addLine(line)
else:
# Not sure what happened
sys.stderr.write("Unknown line: %s" % line)
if line_count % 100000 == 0 and line_count > 0 and verbose:
print "Records processed: %s" % (line_count)
#####################################
# Parse options and setup #
usage = "usage %prog -o [output file prefix (path + name)] -(a) --quiet samfile"
usage += "%prog will process alignment file produced by processing_10xReads and do profile each barcode"
parser = OptionParser(usage=usage, version="%prog 0.0.1")
parser.add_option('-o', '--output', help="Directory + filename to output bc stats",
action="store", type="str", dest="outfile", default="bc_profile.txt")
parser.add_option('-a', '--all', help="output all barcodes, not just those with valid gem barcode (STATUS is UNKNOWN, or AMBIGUOUS)",
action="store_true", dest="output_all", default=False)
parser.add_option('--quiet', help="turn off verbose output",
action="store_false", dest="verbose", default=True)
(options, args) = parser.parse_args()
if len(args) == 1:
infile = args[0]
# Start opening input/output files:
if not os.path.exists(infile):
sys.exit("Error, can't find input file %s" % infile)
insam = open(infile, 'r')
else:
# reading from stdin
insam = sys.stdin
outfile = options.outfile
if outfile == "stdout":
outf = sys.stdout
else:
outf = open(outfile, 'r')
output_all = options.output_all
verbose = options.verbose
# need to check, can write to output folder
# global variables
file_path = os.path.dirname(os.path.realpath(__file__))
stime = time.time()
main(insam, outf, output_all, verbose)
sys.exit(0)
| apache-2.0 | -3,586,046,802,234,102,300 | 46.380282 | 133 | 0.419144 | false |
hotdogee/gff3-py | docs/conf.py | 1 | 8340 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# gff3 documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import gff3
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'gff3-py'
copyright = u'2014, Han Lin'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = gff3.__version__
# The full version, including alpha/beta/rc tags.
release = gff3.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'gff3doc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'gff3.tex',
u'gff3-py Documentation',
u'Han Lin', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gff3',
u'gff3-py Documentation',
[u'Han Lin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'gff3',
u'gff3-py Documentation',
u'Han Lin',
'gff3',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| isc | 6,599,077,651,162,951,000 | 29.327273 | 76 | 0.701918 | false |
Clebeuf/MAPE-K-Python | MAPE-K/ManagedScraper/ManagedScraper/settings.py | 1 | 3357 | # -*- coding: utf-8 -*-
# Scrapy settings for ManagedScraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'ManagedScraper'
SPIDER_MODULES = ['ManagedScraper.spiders']
NEWSPIDER_MODULE = 'ManagedScraper.spiders'
#ITEM_PIPELINES = {'ManagedScraper.pipelines.MetadataFilterPipeline': 100}
DOWNLOAD_DELAY = 1
#RANDOM_DOWNLOAD_DELAY = TRUE
#CONCURRENT_REQUESTS = 5
#CONCURRENT_REQUESTS_PER_IP = 1
# Crawl responsibly by identifying yourself (and your website) on the user-agent
SER_AGENT = 'ResearchSurveyCrawler (SAS)'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'ManagedScraper (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'ManagedScraper.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'ManagedScraper.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'ManagedScraper.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
| mit | 6,122,486,349,990,847,000 | 35.096774 | 109 | 0.777778 | false |
adfernandes/pcp | src/pcp/pidstat/test/cpu_usage_reporter_test.py | 6 | 5965 | #!/usr/bin/env pmpython
#
# Copyright (C) 2016 Sitaram Shelke.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
from mock import Mock
import unittest
from pcp_pidstat import CpuUsageReporter
class TestCpuUsageReporter(unittest.TestCase):
def setUp(self):
self.options = Mock(
per_processor_usage = False,
show_process_user = None)
process_1 = Mock(pid = Mock(return_value = 1),
process_name = Mock(return_value = "process_1"),
user_name = Mock(return_value='pcp'),
user_id = Mock(return_value=1000),
user_percent = Mock(return_value=2.43),
system_percent = Mock(return_value=1.24),
guest_percent = Mock(return_value=0.00),
total_percent = Mock(return_value=3.67),
cpu_number = Mock(return_value=1),)
self.processes = [process_1]
def test_print_report_without_filtering(self):
cpu_usage = Mock()
process_filter = Mock()
printer = Mock()
process_filter.filter_processes = Mock(return_value=self.processes)
reporter = CpuUsageReporter(cpu_usage, process_filter, 1, printer, self.options)
reporter.print_report(123, 4, " ", " ")
printer.assert_called_with("123 1000\t1\t2.43\t1.24\t0.0\t3.67\t1\tprocess_1")
def test_print_report_with_user_name(self):
self.options.show_process_user = 'pcp'
cpu_usage = Mock()
process_filter = Mock()
printer = Mock()
process_filter.filter_processes = Mock(return_value=self.processes)
reporter = CpuUsageReporter(cpu_usage, process_filter, 1, printer, self.options)
reporter.print_report(123, 4, " ", " ")
printer.assert_called_with("123 pcp\t1\t2.43\t1.24\t0.0\t3.67\t1\tprocess_1")
def test_print_report_with_per_processor_usage(self):
self.options.per_processor_usage = True
cpu_usage = Mock()
process_filter = Mock()
printer = Mock()
process_filter.filter_processes = Mock(return_value=self.processes)
reporter = CpuUsageReporter(cpu_usage, process_filter, 1, printer, self.options)
reporter.print_report(123, 4, " ", " ")
printer.assert_called_with("123 1000\t1\t2.43\t1.24\t0.0\t0.92\t1\tprocess_1")
def test_print_report_with_user_percent_none(self):
cpu_usage = Mock()
process_filter = Mock()
printer = Mock()
self.processes[0].user_percent = Mock(return_value=None)
process_filter.filter_processes = Mock(return_value=self.processes)
reporter = CpuUsageReporter(cpu_usage, process_filter, 1, printer, self.options)
reporter.print_report(123, 4, " ", " ")
printer.assert_called_with("123 1000\t1\tNone\t1.24\t0.0\t3.67\t1\tprocess_1")
def test_print_report_with_guest_percent_none(self):
cpu_usage = Mock()
process_filter = Mock()
printer = Mock()
self.processes[0].guest_percent = Mock(return_value=None)
process_filter.filter_processes = Mock(return_value=self.processes)
reporter = CpuUsageReporter(cpu_usage, process_filter, 1, printer, self.options)
reporter.print_report(123, 4, " ", " ")
printer.assert_called_with("123 1000\t1\t2.43\t1.24\tNone\t3.67\t1\tprocess_1")
def test_print_report_with_system_percent_none(self):
cpu_usage = Mock()
process_filter = Mock()
printer = Mock()
self.processes[0].system_percent = Mock(return_value=None)
process_filter.filter_processes = Mock(return_value=self.processes)
reporter = CpuUsageReporter(cpu_usage, process_filter, 1, printer, self.options)
reporter.print_report(123, 4, " ", " ")
printer.assert_called_with("123 1000\t1\t2.43\tNone\t0.0\t3.67\t1\tprocess_1")
def test_print_report_with_total_percent_none(self):
cpu_usage = Mock()
process_filter = Mock()
printer = Mock()
self.processes[0].total_percent = Mock(return_value=None)
process_filter.filter_processes = Mock(return_value=self.processes)
reporter = CpuUsageReporter(cpu_usage, process_filter, 1, printer, self.options)
reporter.print_report(123, 4, " ", " ")
printer.assert_called_with("123 1000\t1\t2.43\t1.24\t0.0\tNone\t1\tprocess_1")
def test_print_report_header_without_process_user(self):
cpu_usage = Mock()
process_filter = Mock()
printer = Mock()
process_filter.filter_processes = Mock(return_value=self.processes)
reporter = CpuUsageReporter(cpu_usage, process_filter, 1, printer, self.options)
reporter.print_report(123, 4, " ", " ")
printer.assert_any_call("Timestamp UID\tPID\tusr\tsystem\tguest\t%CPU\tCPU\tCommand")
def test_print_report_header_with_process_user(self):
self.options.show_process_user = 'pcp'
cpu_usage = Mock()
process_filter = Mock()
printer = Mock()
process_filter.filter_processes = Mock(return_value=self.processes)
reporter = CpuUsageReporter(cpu_usage, process_filter, 1, printer, self.options)
reporter.print_report(123, 4, " ", " ")
printer.assert_any_call("Timestamp UName\tPID\tusr\tsystem\tguest\t%CPU\tCPU\tCommand")
if __name__ == "__main__":
unittest.main()
| lgpl-2.1 | -211,338,768,710,201,150 | 39.856164 | 96 | 0.628164 | false |
prophittcorey/Artificial-Intelligence | assign2/search.py | 1 | 1618 | #!/usr/bin/env python
'''
File: search.py
Author: Corey Prophitt <[email protected]>
Class: CS440, Colorado State University.
License: GPLv3, see license.txt for more details.
Description:
The iterative deepening search algorithm.
'''
#
# Standard module imports
#
from copy import copy
def depthLimitedSearchHelper(state, actionsF, takeActionF, goalTestF, depthLimit):
'''A helper function for the iterative deepening search. Does the recursive
calls. This is almost verbatim from the class notes.
'''
if goalTestF(state):
return state
if depthLimit == 0:
return "cutoff"
for action in actionsF(state):
childState = takeActionF(copy(state), action)
result = depthLimitedSearchHelper(childState, actionsF, takeActionF, goalTestF, depthLimit-1) # <-- Some problem here
if result == "cutoff":
cutoffOccurred = True
elif result != "failure":
return result # <-- Some problem here
if cutoffOccurred:
return "cutoff"
else:
return "failure"
def iterativeDeepeningSearch(startState, actionsF, takeActionF, goalTestF, maxDepth):
'''The iterative portion of the search. Iterates through the possible "depths".
This is almost verbatim from the class notes.'''
for depth in range(maxDepth):
result = depthLimitedSearchHelper(startState, actionsF, takeActionF, goalTestF, depth)
if result != "cutoff":
return [result]
return "cutoff"
if __name__ == '__main__':
pass | gpl-3.0 | -6,677,673,130,061,893,000 | 28.981481 | 126 | 0.645859 | false |
QingkaiLu/RAMCloud | scripts/smux.py | 1 | 5252 | #!/usr/bin/python
# Copyright (c) 2010-2014 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import os
import sys
import time
totalWindows = 0
MAX_WINDOWS=500
def tcmd(cmd):
os.system("tmux %s" % cmd)
def splitWindow():
global totalWindows
global MAX_WINDOWS
if totalWindows < MAX_WINDOWS:
tcmd("split-window -d -h")
totalWindows += 1
def newWindow():
global totalWindows
global MAX_WINDOWS
if totalWindows < MAX_WINDOWS:
tcmd("new-window")
totalWindows += 1
def carvePanes(numPerWindow, layout):
for i in xrange(numPerWindow - 1):
splitWindow()
tcmd("select-layout %s" % layout)
tcmd("select-layout %s" % layout)
def sendCommand(cmd, pane = 0, ex = True):
time.sleep(0.1)
if ex:
tcmd("send-keys -t %d '%s ' Enter" % (pane,cmd))
else:
tcmd("send-keys -t %d '%s'" % (pane,cmd))
# Commands is a list of lists, where each list is a sequence of
# commands to give to particular window.
# executeBeforeAttach is a function that a client can pass in to be executed
# before the attach (assuming we are not inside a tmux already), because
# nothing can be run after the attach.
def create(numPanesPerWindow, commands, layout = 'tiled', executeBeforeAttach = None):
# Defend against forkbombs
if not numPanesPerWindow > 0:
print "Forkbomb attempt detected!"
return
if numPanesPerWindow > 30:
print "Number per window must be less than 30!"
return
tmux = True
if not os.environ.get('TMUX'): # Session exist
tcmd("new-session -d")
tmux = False
else:
newWindow()
panesNeeded = len(commands)
index = 0
while panesNeeded > 0:
carvePanes(numPanesPerWindow, layout)
panesNeeded -= numPanesPerWindow
# Send the commands in with CR
for i in xrange(min(numPanesPerWindow, len(commands))):
print i
for x in commands[i]:
sendCommand(x,i)
# Pop off the commands we just finished with
for i in xrange(min(numPanesPerWindow, len(commands))):
commands.pop(0)
# Create a new window if necessary
if panesNeeded > 0:
newWindow()
if executeBeforeAttach: executeBeforeAttach()
if not tmux:
tcmd("attach-session")
def startSession(file):
cmds = []
# default args in place
args = {"PANES_PER_WINDOW" : "4", "LAYOUT" : "tiled"}
cur_cmds = None
for line in file:
line = line.strip()
# comments
if line == '' or line.startswith("#"): continue
# Start a new pane specification
if line.startswith("---"):
if cur_cmds is not None:
cmds.append(cur_cmds)
cur_cmds = []
continue
# Configuration part
if cur_cmds == None:
try:
left,right = line.split('=',1)
args[left.strip()] = right.strip()
except:
print "Argment '%s' ignored" % line
print "Arguments must be in the form of key = value"
continue
else: # Actual session is being added to
cur_cmds.append(line.strip())
if cur_cmds:
cmds.append(cur_cmds)
# Start the sessions
create(int(args['PANES_PER_WINDOW']), cmds, args['LAYOUT'])
def usage():
doc_string = '''
mux.py <session_spec_file>
The format of session_spec_file consists of ini-style parameters followed by
lists of commands delimited by lines beginning with '---'.
Any line starting with a # is considered a comment and ignored.
Currently there are two supported parameters.
PANES_PER_WINDOW,
The number of panes that each window will be carved into
LAYOUT,
One of the five standard tmux layouts, given below.
even-horizontal, even-vertical, main-horizontal, main-vertical, tiled.
Sample Input File:
# This is a comment
PANES_PER_WINDOW = 4
LAYOUT = tiled
----------
echo 'This is pane 1'
cat /proc/cpuinfo | less
----------
echo 'This is pane 2'
cat /proc/meminfo
----------
echo 'This is pane 3'
uname -a
----------
echo "This is pane 4"
cat /etc/issue
----------
'''
print doc_string
sys.exit(1)
def main():
if len(sys.argv) < 2 or sys.argv[1] in ['--help', '-h','-?'] : usage()
try:
with open(sys.argv[1]) as f:
startSession(f)
except:
print >>sys.stderr, 'File "%s" does not exist.' % sys.argv[1]
sys.exit(2)
if __name__ == "__main__": main()
| isc | -7,239,412,484,352,983,000 | 27.085561 | 86 | 0.624334 | false |
patriziotufarolo/testagent | tests/test_testagent.py | 1 | 2215 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
"""
test_testagent
----------------------------------
Tests for `testagent` module.
"""
import unittest
from testagent.tasks import start_certification
class TestTestagent(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
assert (start_certification.delay('''
<collector id="1" cmid="1" probe_driver="EmptyProbeDelay">
<TestCases>
<TestCase>
<ID>1</ID>
<Description>TestCase1</Description>
<TestInstance Operation="1">
<Preconditions/>
<HiddenCommunications/>
<Input>
<Item key="Input1" value="Value1" />
<Item key="Input2" value="Value2" />
</Input>
<ExpectedOutput/>
<PostConditions/>
</TestInstance>
<TestInstance Operation="3">
<Preconditions/>
<HiddenCommunications/>
<Input>
<Item key="Input6" value="Value6" />
</Input>
<ExpectedOutput/>
<PostConditions/>
</TestInstance>
<TestInstance Operation="2">
<Preconditions/>
<HiddenCommunications/>
<Input>
<Item key="Input8" value="Value8" />
<Item key="Input5" value="Value9" />
</Input>
<ExpectedOutput/>
<PostConditions/>
</TestInstance>
</TestCase>
</TestCases>
</collector>
'''))
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 1,536,859,947,711,901,400 | 33.076923 | 68 | 0.381038 | false |
linuxrocks123/MailTask | mt_utils.py | 1 | 13214 | #! /usr/bin/env python
# MailTask Alpha: The Email Manager
# Copyright (C) 2015 Patrick Simmons
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
import email
import email.parser
import email.utils
import sys
import time
#Dead-Simple CaseInsensitiveList class
class CaseInsensitiveList(list):
def index(self,key):
lowered_key = key.lower()
for i in range(len(self)):
if self[i].lower()==lowered_key:
return i
raise ValueError
def __contains__(self, key):
try:
self.index(key)
except ValueError:
return False
return True
##Stupidly simple method to turn a sequence type's .index() method into .find()
def find(seqobj, needle):
try:
return seqobj.index(needle)
except ValueError:
return -1
##Returns a date/time string appropriate for use in email main browser
def browser_time(tstr,formatstr="%m/%d/%Y %H:%M"):
tztmt = email.utils.parsedate_tz(tstr)
if tztmt!=None:
return time.strftime(formatstr,time.localtime(email.utils.mktime_tz(tztmt)))
else:
return time.strftime(formatstr,time.localtime(0))
##Given an X-MailTask-Date-Info string, return a 1-tuple of the epoch time deadline for a Deadline task, and a 2-tuple of the beginning epoch time and ending epoch time of a Meeting task.
def gtstfxmdis(dinfo_str):
dinfo = dinfo_str.split("/")
if len(dinfo)==1: #(Deadline)
return (email.utils.mktime_tz(email.utils.parsedate_tz(dinfo[0])),)
else: #len(dinfo)==2 (Meeting)
return (email.utils.mktime_tz(email.utils.parsedate_tz(dinfo[0])),
email.utils.mktime_tz(email.utils.parsedate_tz(dinfo[1])))
##Given an email header, find all instances of commas in nicknames and turn them into
# ASCII character Device Control 1 (0x11)
def decomma(tstr):
to_return=""
in_quotes=False
prev_char_backslash=False
for char in tstr:
if prev_char_backslash and not in_quotes:
if char==',':
to_return+='\x11'
else:
to_return+=char
prev_char_backslash=False
elif char=='\\':
prev_char_backslash=True
elif char=='"':
in_quotes = not in_quotes
elif in_quotes and char==',':
to_return+='\x11'
else:
to_return+=char
return to_return
def recomma(tstr):
return tstr.replace('\x11',',')
##Return the MIME Message-ID, or generate and return NONMIME ID from timestamp.
def get_message_id(msg,folder):
if "Message-ID" in msg:
return msg['Message-ID'].replace(' ','').replace('\t','').replace('\r','').replace('\n','')
else: #generate nonmime-id
sanitized_folder=folder.replace('/','-')
return "<NONMIME-"+base64.b32encode(repr(hash(msg.as_string())))+"@"+sanitized_folder+".mailtask"
##Generate a unique Message-ID for given message
def gen_message_id(msg,params):
messageID = ("<"+base64.b32encode(repr(hash(msg.as_string())))+"@"+base64.b32encode(repr(hash(msg["From"])))+repr(int(params[0]))+".mailtask"+">").replace("=","")
del msg["Message-ID"]
msg["Message-ID"]=messageID
##Get list of MIME IDs of related messages
def get_related_ids(msg):
return msg["References"].replace('\t',' ').replace('\r',' ').replace('\n',' ').replace(',',' ').split() if 'References' in msg else []
##Set "References" header to specified list of MIME IDs
def set_related_ids(msg,related_list):
del msg["References"]
msg["References"]=",".join(set(related_list))
##Encapsulates a message object into an RFC822 attachment
def rfc822_encapsulate(msg,filename=""):
lines = msg.as_string().splitlines()
for header in ("Content-Type","Content-Transfer-Encoding"):
splitpoint=-1
for i in range(len(lines)):
if lines[i]=="":
splitpoint=i
break
for i in range(splitpoint):
if lines[i].lower().find((header+": ").lower())==0:
lines.insert(splitpoint,lines[i])
del lines[i]
#Handle multi-line Content-Type/Content-Transfer-Encoding headers
while len(lines[i]) and lines[i][0] in (' ','\t'):
lines.insert(splitpoint,lines[i])
del lines[i]
break
for i in range(len(lines)):
if lines[i].lower().find("Content-Type: ".lower())==0:
lines.insert(i,"")
break
return email.parser.Parser().parsestr('Content-Type: message/rfc822'+('; name="'+filename+'"' if filename!="" else "")+'\n'+"\n".join(lines))
##Attaches a message object to the payload of another message object
# If the parent message object is not of multipart type, restructure
# the message such that its current payload is the first subpayload of
# the parent message, and change the parent payload's content type to
# multipart/mixed.
def attach_payload(parent,child):
#message/rfc822 encapsulation requires the payload's sole list element to be
#the target of the attachment instead of the encapsulated message
if parent.get_content_type()=="message/rfc822":
attach_payload(parent.get_payload()[0],child)
return
if 'X-MailTask-Virgin' in parent:
del parent['X-MailTask-Virgin']
if 'Content-Type' not in child:
child.set_type("text/plain")
if ('To' in child or 'Cc' in child or 'Bcc' in child or 'Message-ID' in child) and child.get_content_type()!="message/rfc822":
child = rfc822_encapsulate(child)
if isinstance(parent.get_payload(),str):
first_payload = email.message.Message()
first_payload['Content-Type']=parent['Content-Type']
first_payload['Content-Transfer-Encoding']=parent['Content-Transfer-Encoding']
if 'Content-Disposition' in parent:
first_payload['Content-Disposition']=parent['Content-Disposition']
first_payload.set_payload(parent.get_payload())
parent.set_type("multipart/mixed")
parent.set_payload([first_payload])
parent.attach(child)
##Take a message embedded in another message (such as a message of type
# multipart/x.MailTask) and delete the message/rfc822 header. Replace
# it with the message internal header. This is complicated by the fact
# that the message's internal header must be moved up to before the
# Message-ID header in order to be accepted.
# Precondition: message must already have Message-ID header
def unrfc822(message):
msgstr = message.as_string()
msg_parts = msgstr.split("\n")
del msg_parts[0]
insert_idx = -1
fields_to_move = set(["Content-Type","MIME-Version"])
for i in range(len(msg_parts)):
if msg_parts[i].lower().find("Message-ID".lower())==0 and insert_idx==-1:
insert_idx=i
move_this_line = False
for field in fields_to_move:
if msg_parts[i].lower().find(field.lower())==0:
move_this_line = True
fields_to_move.remove(field)
break
if move_this_line:
if insert_idx!=-1:
magic_str = msg_parts[i]
del msg_parts[i]
msg_parts.insert(insert_idx,magic_str)
else:
print "BUG: Content-Type before Message-ID in unrfc822"
return email.parser.Parser().parsestr("\n".join(msg_parts))
##Flatten a message according to RFC2822 by stupidly inserting newlines everywhere.
# Do the minimum necessary because this is asinine but Microsoft SMTP seems to require it.
# I DON'T CARE if it's the standard IT'S 2015 AND ARBITRARY LINE LENGTH LIMITS MAKE NO SENSE!
def rfc2822_flatten(mstring):
to_return=""
for line in mstring.split("\n"):
if len(line)<998:
to_return+=line+"\n"
else:
to_dispose = line
while len(to_dispose):
if len(to_dispose)<998:
to_return+=to_dispose+"\n"
to_dispose=""
else:
if to_dispose[:998].rfind("\n")!=-1:
split_idx = to_dispose[:998].rfind("\n")
else:
split_idx = 998
to_return+=to_dispose[:split_idx]+"\n"
to_dispose = to_dispose[split_idx:]
return to_return
##Deletes the passed object from the payload of message object.
# Handles changing message content type from multipart to single-part if necessary
def delete_payload_component(parent,child):
if parent.get_content_type()=="message/rfc822":
delete_payload_component(parent.get_payload()[0],child)
return
payload = parent.get_payload()
del payload[payload.index(child)]
if len(payload)==1:
sole_component = payload[0]
parent.set_payload(sole_component.get_payload())
if 'Content-Type' in sole_component:
parent.replace_header('Content-Type',sole_component['Content-Type'])
else:
parent.set_type("text/plain")
if 'Content-Transfer-Encoding' in sole_component:
del parent['Content-Transfer-Encoding']
parent['Content-Transfer-Encoding']=sole_component['Content-Transfer-Encoding']
#Get best submessage from an email to use as a body. Return it
def get_body(msg):
#Internal method to rank content types of bodies
def rank_body(ctype):
TYPE_RANKING = ["text/plain","text/html","text/"]
for i in range(len(TYPE_RANKING)):
if ctype.get_content_type().find(TYPE_RANKING[i])==0:
return i
return len(TYPE_RANKING)
full_payload = msg.get_payload()
if isinstance(full_payload,str):
return msg
#Best body found so far
best_body = None
best_body_ranking = sys.maxint
#Check all direct payload subcomponents
for candidate in full_payload:
if 'Content-Type' in candidate and not ('Content-Disposition' in candidate and candidate['Content-Disposition'].lower().find("attachment")!=-1):
if rank_body(candidate) < best_body_ranking:
best_body = candidate
best_body_ranking = rank_body(candidate)
#Check if we have multipart/alternative subpart. Examine it if so.
for node in full_payload:
if 'Content-Type' in node and node.get_content_type().find("multipart/")==0:
subpayload = node.get_payload()
for candidate in subpayload:
if 'Content-Type' in candidate and not ('Content-Disposition' in candidate and candidate['Content-Disposition'].find("attachment")!=-1):
if rank_body(candidate) < best_body_ranking:
best_body = candidate
best_body_ranking = rank_body(candidate)
return best_body
##Returns string representing which type of task we are
def get_task_type(task):
if 'X-MailTask-Date-Info' not in task:
return "Checklist"
elif task['X-MailTask-Date-Info'].find("/")==-1:
return "Deadline"
else:
return "Meeting"
#Search message cache for specific MIDs
def search_cache(mid,cache):
for record in cache:
rdict = record[1]
if 'Message-ID' in rdict and get_message_id(rdict,None)==mid:
return record
return None
##Walk the body of a message and process each submessage
def walk_attachments(submsg,process_single_submsg,force_decomp=False):
if not isinstance(submsg.get_payload(),str) and (force_decomp or submsg.get_content_type().find("multipart/")==0):
for component in submsg.get_payload():
if component.get_content_type().find("multipart/")==0:
for subsubmsg in component.get_payload():
walk_attachments(subsubmsg,process_single_submsg)
else:
process_single_submsg(component)
else:
process_single_submsg(submsg)
##Gets MIME type of file
# Uses magic if available, otherwise mimetypes
try:
import magic
has_magic=True
except ImportError:
has_magic=False
import mimetypes
mimetypes.init()
def get_mime_type(fname):
if has_magic:
return magic.from_file(fname,mime=True)
else:
if fname.find(".")!=-1:
simple_suffix=fname.rsplit(".")[1]
simple_name=fname.split(".")[0]+"."+simple_suffix
else:
simple_name=fname
to_return = mimetypes.guess_type(simple_name,strict=False)[0]
if to_return==None:
to_return = "application/octet-stream"
return to_return
| gpl-3.0 | 5,728,171,730,024,221,000 | 38.681682 | 187 | 0.629484 | false |
tinkerinestudio/Tinkerine-Suite | TinkerineSuite/Cura/util/stl2.py | 1 | 2253 | from __future__ import absolute_import
import sys
import os
import struct
import time
from Cura.util import mesh2
class stlModel(mesh2.mesh):
def __init__(self):
super(stlModel, self).__init__()
def load(self, filename):
f = open(filename, "rb")
if f.read(5).lower() == "solid":
self._loadAscii(f)
if self.vertexCount < 3:
f.seek(5, os.SEEK_SET)
self._loadBinary(f)
else:
self._loadBinary(f)
f.close()
self._postProcessAfterLoad()
return self
def _loadAscii(self, f):
cnt = 0
for lines in f:
for line in lines.split('\r'):
if 'vertex' in line:
cnt += 1
self._prepareVertexCount(int(cnt))
f.seek(5, os.SEEK_SET)
cnt = 0
for lines in f:
for line in lines.split('\r'):
if 'vertex' in line:
data = line.split()
self.addVertex(float(data[1]), float(data[2]), float(data[3]))
def _loadBinary(self, f):
#Skip the header
f.read(80-5)
faceCount = struct.unpack('<I', f.read(4))[0]
self._prepareVertexCount(faceCount * 3)
for idx in xrange(0, faceCount):
data = struct.unpack("<ffffffffffffH", f.read(50))
self.addVertex(data[3], data[4], data[5])
self.addVertex(data[6], data[7], data[8])
self.addVertex(data[9], data[10], data[11])
def saveAsSTL(mesh, filename):
f = open(filename, 'wb')
#Write the STL binary header. This can contain any info, except for "SOLID" at the start.
f.write(("CURA BINARY STL EXPORT. " + time.strftime('%a %d %b %Y %H:%M:%S')).ljust(80, '\000'))
#Next follow 4 binary bytes containing the amount of faces, and then the face information.
f.write(struct.pack("<I", int(mesh.vertexCount / 3)))
for idx in xrange(0, mesh.vertexCount, 3):
v1 = mesh.origonalVertexes[idx]
v2 = mesh.origonalVertexes[idx+1]
v3 = mesh.origonalVertexes[idx+2]
f.write(struct.pack("<fff", 0.0, 0.0, 0.0))
f.write(struct.pack("<fff", v1[0], v1[1], v1[2]))
f.write(struct.pack("<fff", v2[0], v2[1], v2[2]))
f.write(struct.pack("<fff", v3[0], v3[1], v3[2]))
f.write(struct.pack("<H", 0))
f.close()
if __name__ == '__main__':
for filename in sys.argv[1:]:
m = stlModel().load(filename)
print("Loaded %d faces" % (m.vertexCount / 3))
parts = m.splitToParts()
for p in parts:
saveAsSTL(p, "export_%i.stl" % parts.index(p))
| agpl-3.0 | -5,708,727,742,411,427,000 | 28.25974 | 96 | 0.637372 | false |
pombredanne/pyelftools | test/test_dwarf_lineprogram.py | 1 | 4171 | #-------------------------------------------------------------------------------
# elftools tests
#
# Eli Bendersky ([email protected])
# This code is in the public domain
#-------------------------------------------------------------------------------
import unittest
from elftools.common.py3compat import BytesIO, iteritems
from elftools.dwarf.lineprogram import LineProgram, LineState, LineProgramEntry
from elftools.dwarf.structs import DWARFStructs
from elftools.dwarf.constants import *
class TestLineProgram(unittest.TestCase):
def _make_program_in_stream(self, stream):
""" Create a LineProgram from the given program encoded in a stream
"""
ds = DWARFStructs(little_endian=True, dwarf_format=32, address_size=4)
header = ds.Dwarf_lineprog_header.parse(
b'\x04\x10\x00\x00' + # initial lenght
b'\x03\x00' + # version
b'\x20\x00\x00\x00' + # header length
b'\x01\x01\x01\x0F' + # flags
b'\x0A' + # opcode_base
b'\x00\x01\x04\x08\x0C\x01\x01\x01\x00' + # standard_opcode_lengths
# 2 dir names followed by a NULL
b'\x61\x62\x00\x70\x00\x00' +
# a file entry
b'\x61\x72\x00\x0C\x0D\x0F' +
# and another entry
b'\x45\x50\x51\x00\x86\x12\x07\x08' +
# followed by NULL
b'\x00')
lp = LineProgram(header, stream, ds, 0, len(stream.getvalue()))
return lp
def assertLineState(self, state, **kwargs):
""" Assert that the state attributes specified in kwargs have the given
values (the rest are default).
"""
for k, v in iteritems(kwargs):
self.assertEqual(getattr(state, k), v)
def test_spec_sample_59(self):
# Sample in figure 59 of DWARFv3
s = BytesIO()
s.write(
b'\x02\xb9\x04' +
b'\x0b' +
b'\x38' +
b'\x82' +
b'\x73' +
b'\x02\x02' +
b'\x00\x01\x01')
lp = self._make_program_in_stream(s)
linetable = lp.get_entries()
self.assertEqual(len(linetable), 7)
self.assertIs(linetable[0].state, None) # doesn't modify state
self.assertEqual(linetable[0].command, DW_LNS_advance_pc)
self.assertEqual(linetable[0].args, [0x239])
self.assertLineState(linetable[1].state, address=0x239, line=3)
self.assertEqual(linetable[1].command, 0xb)
self.assertEqual(linetable[1].args, [2, 0, 0])
self.assertLineState(linetable[2].state, address=0x23c, line=5)
self.assertLineState(linetable[3].state, address=0x244, line=6)
self.assertLineState(linetable[4].state, address=0x24b, line=7, end_sequence=False)
self.assertEqual(linetable[5].command, DW_LNS_advance_pc)
self.assertEqual(linetable[5].args, [2])
self.assertLineState(linetable[6].state, address=0x24d, line=7, end_sequence=True)
def test_spec_sample_60(self):
# Sample in figure 60 of DWARFv3
s = BytesIO()
s.write(
b'\x09\x39\x02' +
b'\x0b' +
b'\x09\x03\x00' +
b'\x0b' +
b'\x09\x08\x00' +
b'\x0a' +
b'\x09\x07\x00' +
b'\x0a' +
b'\x09\x02\x00' +
b'\x00\x01\x01')
lp = self._make_program_in_stream(s)
linetable = lp.get_entries()
self.assertEqual(len(linetable), 10)
self.assertIs(linetable[0].state, None) # doesn't modify state
self.assertEqual(linetable[0].command, DW_LNS_fixed_advance_pc)
self.assertEqual(linetable[0].args, [0x239])
self.assertLineState(linetable[1].state, address=0x239, line=3)
self.assertLineState(linetable[3].state, address=0x23c, line=5)
self.assertLineState(linetable[5].state, address=0x244, line=6)
self.assertLineState(linetable[7].state, address=0x24b, line=7, end_sequence=False)
self.assertLineState(linetable[9].state, address=0x24d, line=7, end_sequence=True)
if __name__ == '__main__':
unittest.main()
| unlicense | 8,914,488,276,242,202,000 | 38.72381 | 91 | 0.56725 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.