repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
jcfr/girder | tests/cases/events_test.py | 1 | 4976 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import time
import unittest
from girder import events
class EventsTestCase(unittest.TestCase):
"""
This test case is just a unit test of the girder.events system. It does not
require the server to be running, or any use of the database.
"""
def setUp(self):
events.unbindAll()
self.ctr = 0
self.responses = None
def _raiseException(self, event):
raise Exception('Failure condition')
def _increment(self, event):
self.ctr += event.info['amount']
def _incrementWithResponse(self, event):
self._increment(event)
event.addResponse('foo')
def _eatEvent(self, event):
event.addResponse({'foo': 'bar'})
event.stopPropagation()
event.preventDefault()
def _shouldNotBeCalled(self, event):
self.fail('This should not be called due to stopPropagation().')
def testSynchronousEvents(self):
name, failname = '_test.event', '_test.failure'
handlerName = '_test.handler'
events.bind(name, handlerName, self._increment)
events.bind(failname, handlerName, self._raiseException)
# Make sure our exception propagates out of the handler
try:
events.trigger(failname)
self.assertTrue(False)
except Exception as e:
self.assertEqual(e.message, 'Failure condition')
# Bind an event to increment the counter
self.assertEqual(self.ctr, 0)
event = events.trigger(name, {'amount': 2})
self.assertEqual(self.ctr, 2)
self.assertTrue(event.propagate)
self.assertFalse(event.defaultPrevented)
self.assertEqual(event.responses, [])
# The event should still be bound here if a different handler unbinds
events.unbind(name, 'not the handler name')
events.trigger(name, {'amount': 2})
self.assertEqual(self.ctr, 4)
# Actually unbind the event, it show now no longer execute
events.unbind(name, handlerName)
events.trigger(name, {'amount': 2})
self.assertEqual(self.ctr, 4)
# Bind an event that prevents the default action and passes a response
events.bind(name, handlerName, self._eatEvent)
events.bind(name, 'other handler name', self._shouldNotBeCalled)
event = events.trigger(name)
self.assertTrue(event.defaultPrevented)
self.assertFalse(event.propagate)
self.assertEqual(event.responses, [{'foo': 'bar'}])
def testAsyncEvents(self):
name, failname = '_test.event', '_test.failure'
handlerName = '_test.handler'
events.bind(failname, handlerName, self._raiseException)
events.bind(name, handlerName, self._incrementWithResponse)
def callback(event):
self.ctr += 1
self.responses = event.responses
# Make sure an async handler that fails does not break the event loop
# and that its callback is not triggered.
self.assertEqual(events.daemon.eventQueue.qsize(), 0)
events.daemon.trigger(failname, handlerName, callback)
# Triggering the event before the daemon starts should do nothing
self.assertEqual(events.daemon.eventQueue.qsize(), 1)
events.daemon.trigger(name, {'amount': 2}, callback)
self.assertEqual(events.daemon.eventQueue.qsize(), 2)
self.assertEqual(self.ctr, 0)
# Now run the asynchronous event handler, which should eventually
# cause our counter to be incremented.
events.daemon.start()
# Ensure that all of our events have been started within a reasonable
# amount of time. Also check the results in the loop, since the qsize
# only indicates if all events were started, not finished.
startTime = time.time()
while True:
if events.daemon.eventQueue.qsize() == 0:
if self.ctr == 3:
break
if time.time()-startTime > 15:
break
time.sleep(0.1)
self.assertEqual(events.daemon.eventQueue.qsize(), 0)
self.assertEqual(self.ctr, 3)
self.assertEqual(self.responses, ['foo'])
| apache-2.0 | -8,410,875,523,043,463,000 | 36.984733 | 79 | 0.627814 | false |
mway08/grpc | test/core/bad_client/gen_build_json.py | 1 | 2847 | #!/usr/bin/env python
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generates the appropriate build.json data for all the end2end tests."""
import simplejson
import collections
TestOptions = collections.namedtuple('TestOptions', 'flaky')
default_test_options = TestOptions(False)
# maps test names to options
BAD_CLIENT_TESTS = {
'connection_prefix': default_test_options,
}
def main():
json = {
'#': 'generated with test/bad_client/gen_build_json.py',
'libs': [
{
'name': 'bad_client_test',
'build': 'private',
'language': 'c',
'src': [
'test/core/bad_client/bad_client.c'
]
}],
'targets': [
{
'name': '%s_bad_client_test' % t,
'build': 'test',
'language': 'c',
'secure': 'no',
'src': ['test/core/bad_client/tests/%s.c' % t],
'flaky': 'invoke_large_request' in t,
'deps': [
'bad_client_test',
'grpc_test_util_unsecure',
'grpc_unsecure',
'gpr_test_util',
'gpr'
]
}
for t in sorted(BAD_CLIENT_TESTS.keys())]}
print simplejson.dumps(json, sort_keys=True, indent=2 * ' ')
if __name__ == '__main__':
main()
| bsd-3-clause | -4,322,752,907,040,113,000 | 35.037975 | 74 | 0.643484 | false |
ccpgames/eve-metrics | web2py/gluon/contrib/fpdf/fpdf.py | 1 | 73164 | #!/usr/bin/env python
# -*- coding: latin-1 -*-
# ****************************************************************************
# * Software: FPDF for python *
# * Version: 1.7.1 *
# * Date: 2010-09-10 *
# * Last update: 2012-08-16 *
# * License: LGPL v3.0 *
# * *
# * Original Author (PHP): Olivier PLATHEY 2004-12-31 *
# * Ported to Python 2.4 by Max ([email protected]) on 2006-05 *
# * Maintainer: Mariano Reingart ([email protected]) et al since 2008 est. *
# * NOTE: 'I' and 'D' destinations are disabled, and simply print to STDOUT *
# ****************************************************************************
from datetime import datetime
import math
import errno
import os, sys, zlib, struct, re, tempfile, struct
try:
import cPickle as pickle
except ImportError:
import pickle
# Check if PIL is available (tries importing both pypi version and corrected or manually installed versions).
# Necessary for JPEG and GIF support.
try:
try:
import Image
except:
from PIL import Image
except ImportError:
Image = None
from ttfonts import TTFontFile
from fonts import fpdf_charwidths
from php import substr, sprintf, print_r, UTF8ToUTF16BE, UTF8StringToArray
# Global variables
FPDF_VERSION = '1.7.1'
FPDF_FONT_DIR = os.path.join(os.path.dirname(__file__),'font')
SYSTEM_TTFONTS = None
PY3K = sys.version_info >= (3, 0)
def set_global(var, val):
globals()[var] = val
class FPDF(object):
"PDF Generation class"
def __init__(self, orientation='P',unit='mm',format='A4'):
# Some checks
self._dochecks()
# Initialization of properties
self.offsets={} # array of object offsets
self.page=0 # current page number
self.n=2 # current object number
self.buffer='' # buffer holding in-memory PDF
self.pages={} # array containing pages
self.orientation_changes={} # array indicating orientation changes
self.state=0 # current document state
self.fonts={} # array of used fonts
self.font_files={} # array of font files
self.diffs={} # array of encoding differences
self.images={} # array of used images
self.page_links={} # array of links in pages
self.links={} # array of internal links
self.in_footer=0 # flag set when processing footer
self.lastw=0
self.lasth=0 # height of last cell printed
self.font_family='' # current font family
self.font_style='' # current font style
self.font_size_pt=12 # current font size in points
self.underline=0 # underlining flag
self.draw_color='0 G'
self.fill_color='0 g'
self.text_color='0 g'
self.color_flag=0 # indicates whether fill and text colors are different
self.ws=0 # word spacing
self.angle=0
# Standard fonts
self.core_fonts={'courier':'Courier','courierB':'Courier-Bold','courierI':'Courier-Oblique','courierBI':'Courier-BoldOblique',
'helvetica':'Helvetica','helveticaB':'Helvetica-Bold','helveticaI':'Helvetica-Oblique','helveticaBI':'Helvetica-BoldOblique',
'times':'Times-Roman','timesB':'Times-Bold','timesI':'Times-Italic','timesBI':'Times-BoldItalic',
'symbol':'Symbol','zapfdingbats':'ZapfDingbats'}
# Scale factor
if(unit=='pt'):
self.k=1
elif(unit=='mm'):
self.k=72/25.4
elif(unit=='cm'):
self.k=72/2.54
elif(unit=='in'):
self.k=72
else:
self.error('Incorrect unit: '+unit)
# Page format
if(isinstance(format,basestring)):
format=format.lower()
if(format=='a3'):
format=(841.89,1190.55)
elif(format=='a4'):
format=(595.28,841.89)
elif(format=='a5'):
format=(420.94,595.28)
elif(format=='letter'):
format=(612,792)
elif(format=='legal'):
format=(612,1008)
else:
self.error('Unknown page format: '+format)
self.fw_pt=format[0]
self.fh_pt=format[1]
else:
self.fw_pt=format[0]*self.k
self.fh_pt=format[1]*self.k
self.fw=self.fw_pt/self.k
self.fh=self.fh_pt/self.k
# Page orientation
orientation=orientation.lower()
if(orientation=='p' or orientation=='portrait'):
self.def_orientation='P'
self.w_pt=self.fw_pt
self.h_pt=self.fh_pt
elif(orientation=='l' or orientation=='landscape'):
self.def_orientation='L'
self.w_pt=self.fh_pt
self.h_pt=self.fw_pt
else:
self.error('Incorrect orientation: '+orientation)
self.cur_orientation=self.def_orientation
self.w=self.w_pt/self.k
self.h=self.h_pt/self.k
# Page margins (1 cm)
margin=28.35/self.k
self.set_margins(margin,margin)
# Interior cell margin (1 mm)
self.c_margin=margin/10.0
# line width (0.2 mm)
self.line_width=.567/self.k
# Automatic page break
self.set_auto_page_break(1,2*margin)
# Full width display mode
self.set_display_mode('fullwidth')
# Enable compression
self.set_compression(1)
# Set default PDF version number
self.pdf_version='1.3'
def set_margins(self, left,top,right=-1):
"Set left, top and right margins"
self.l_margin=left
self.t_margin=top
if(right==-1):
right=left
self.r_margin=right
def set_left_margin(self, margin):
"Set left margin"
self.l_margin=margin
if(self.page>0 and self.x<margin):
self.x=margin
def set_top_margin(self, margin):
"Set top margin"
self.t_margin=margin
def set_right_margin(self, margin):
"Set right margin"
self.r_margin=margin
def set_auto_page_break(self, auto,margin=0):
"Set auto page break mode and triggering margin"
self.auto_page_break=auto
self.b_margin=margin
self.page_break_trigger=self.h-margin
def set_display_mode(self, zoom,layout='continuous'):
"Set display mode in viewer"
if(zoom=='fullpage' or zoom=='fullwidth' or zoom=='real' or zoom=='default' or not isinstance(zoom,basestring)):
self.zoom_mode=zoom
else:
self.error('Incorrect zoom display mode: '+zoom)
if(layout=='single' or layout=='continuous' or layout=='two' or layout=='default'):
self.layout_mode=layout
else:
self.error('Incorrect layout display mode: '+layout)
def set_compression(self, compress):
"Set page compression"
self.compress=compress
def set_title(self, title):
"Title of document"
self.title=title
def set_subject(self, subject):
"Subject of document"
self.subject=subject
def set_author(self, author):
"Author of document"
self.author=author
def set_keywords(self, keywords):
"Keywords of document"
self.keywords=keywords
def set_creator(self, creator):
"Creator of document"
self.creator=creator
def alias_nb_pages(self, alias='{nb}'):
"Define an alias for total number of pages"
self.str_alias_nb_pages=alias
return alias
def error(self, msg):
"Fatal error"
raise RuntimeError('FPDF error: '+msg)
def open(self):
"Begin document"
self.state=1
def close(self):
"Terminate document"
if(self.state==3):
return
if(self.page==0):
self.add_page()
#Page footer
self.in_footer=1
self.footer()
self.in_footer=0
#close page
self._endpage()
#close document
self._enddoc()
def add_page(self, orientation=''):
"Start a new page"
if(self.state==0):
self.open()
family=self.font_family
if self.underline:
style = self.font_style + 'U'
else:
style = self.font_style
size=self.font_size_pt
lw=self.line_width
dc=self.draw_color
fc=self.fill_color
tc=self.text_color
cf=self.color_flag
if(self.page>0):
#Page footer
self.in_footer=1
self.footer()
self.in_footer=0
#close page
self._endpage()
#Start new page
self._beginpage(orientation)
#Set line cap style to square
self._out('2 J')
#Set line width
self.line_width=lw
self._out(sprintf('%.2f w',lw*self.k))
#Set font
if(family):
self.set_font(family,style,size)
#Set colors
self.draw_color=dc
if(dc!='0 G'):
self._out(dc)
self.fill_color=fc
if(fc!='0 g'):
self._out(fc)
self.text_color=tc
self.color_flag=cf
#Page header
self.header()
#Restore line width
if(self.line_width!=lw):
self.line_width=lw
self._out(sprintf('%.2f w',lw*self.k))
#Restore font
if(family):
self.set_font(family,style,size)
#Restore colors
if(self.draw_color!=dc):
self.draw_color=dc
self._out(dc)
if(self.fill_color!=fc):
self.fill_color=fc
self._out(fc)
self.text_color=tc
self.color_flag=cf
def header(self):
"Header to be implemented in your own inherited class"
pass
def footer(self):
"Footer to be implemented in your own inherited class"
pass
def page_no(self):
"Get current page number"
return self.page
def set_draw_color(self, r,g=-1,b=-1):
"Set color for all stroking operations"
if((r==0 and g==0 and b==0) or g==-1):
self.draw_color=sprintf('%.3f G',r/255.0)
else:
self.draw_color=sprintf('%.3f %.3f %.3f RG',r/255.0,g/255.0,b/255.0)
if(self.page>0):
self._out(self.draw_color)
def set_fill_color(self,r,g=-1,b=-1):
"Set color for all filling operations"
if((r==0 and g==0 and b==0) or g==-1):
self.fill_color=sprintf('%.3f g',r/255.0)
else:
self.fill_color=sprintf('%.3f %.3f %.3f rg',r/255.0,g/255.0,b/255.0)
self.color_flag=(self.fill_color!=self.text_color)
if(self.page>0):
self._out(self.fill_color)
def set_text_color(self, r,g=-1,b=-1):
"Set color for text"
if((r==0 and g==0 and b==0) or g==-1):
self.text_color=sprintf('%.3f g',r/255.0)
else:
self.text_color=sprintf('%.3f %.3f %.3f rg',r/255.0,g/255.0,b/255.0)
self.color_flag=(self.fill_color!=self.text_color)
def get_string_width(self, s):
"Get width of a string in the current font"
cw=self.current_font['cw']
w=0
l=len(s)
if self.unifontsubset:
for char in s:
char = ord(char)
if len(cw) > char:
w += cw[char] # ord(cw[2*char])<<8 + ord(cw[2*char+1])
#elif (char>0 and char<128 and isset($cw[chr($char)])) { $w += $cw[chr($char)]; }
elif (self.current_font['desc']['MissingWidth']) :
w += self.current_font['desc']['MissingWidth']
#elif (isset($this->CurrentFont['MissingWidth'])) { $w += $this->CurrentFont['MissingWidth']; }
else:
w += 500
else:
for i in xrange(0, l):
w += cw.get(s[i],0)
return w*self.font_size/1000.0
def set_line_width(self, width):
"Set line width"
self.line_width=width
if(self.page>0):
self._out(sprintf('%.2f w',width*self.k))
def line(self, x1,y1,x2,y2):
"Draw a line"
self._out(sprintf('%.2f %.2f m %.2f %.2f l S',x1*self.k,(self.h-y1)*self.k,x2*self.k,(self.h-y2)*self.k))
def _set_dash(self, dash_length=False, space_length=False):
if(dash_length and space_length):
s = sprintf('[%.3f %.3f] 0 d', dash_length*self.k, space_length*self.k)
else:
s = '[] 0 d'
self._out(s)
def dashed_line(self, x1,y1,x2,y2, dash_length=1, space_length=1):
"""Draw a dashed line. Same interface as line() except:
- dash_length: Length of the dash
- space_length: Length of the space between dashes"""
self._set_dash(dash_length, space_length)
self.line(x1, y1, x2, y2)
self._set_dash()
def rect(self, x,y,w,h,style=''):
"Draw a rectangle"
if(style=='F'):
op='f'
elif(style=='FD' or style=='DF'):
op='B'
else:
op='S'
self._out(sprintf('%.2f %.2f %.2f %.2f re %s',x*self.k,(self.h-y)*self.k,w*self.k,-h*self.k,op))
def add_font(self, family, style='', fname='', uni=False):
"Add a TrueType or Type1 font"
family = family.lower()
if (fname == ''):
fname = family.replace(' ','') + style.lower() + '.pkl'
if (family == 'arial'):
family = 'helvetica'
style = style.upper()
if (style == 'IB'):
style = 'BI'
fontkey = family+style
if fontkey in self.fonts:
# Font already added!
return
if (uni):
global SYSTEM_TTFONTS
if os.path.exists(fname):
ttffilename = fname
elif (FPDF_FONT_DIR and
os.path.exists(os.path.join(FPDF_FONT_DIR, fname))):
ttffilename = os.path.join(FPDF_FONT_DIR, fname)
elif (SYSTEM_TTFONTS and
os.path.exists(os.path.join(SYSTEM_TTFONTS, fname))):
ttffilename = os.path.join(SYSTEM_TTFONTS, fname)
else:
raise RuntimeError("TTF Font file not found: %s" % fname)
unifilename = os.path.splitext(ttffilename)[0] + '.pkl'
name = ''
if os.path.exists(unifilename):
fh = open(unifilename)
try:
font_dict = pickle.load(fh)
finally:
fh.close()
else:
ttf = TTFontFile()
ttf.getMetrics(ttffilename)
desc = {
'Ascent': int(round(ttf.ascent, 0)),
'Descent': int(round(ttf.descent, 0)),
'CapHeight': int(round(ttf.capHeight, 0)),
'Flags': ttf.flags,
'FontBBox': "[%s %s %s %s]" % (
int(round(ttf.bbox[0], 0)),
int(round(ttf.bbox[1], 0)),
int(round(ttf.bbox[2], 0)),
int(round(ttf.bbox[3], 0))),
'ItalicAngle': int(ttf.italicAngle),
'StemV': int(round(ttf.stemV, 0)),
'MissingWidth': int(round(ttf.defaultWidth, 0)),
}
# Generate metrics .pkl file
font_dict = {
'name': re.sub('[ ()]', '', ttf.fullName),
'type': 'TTF',
'desc': desc,
'up': round(ttf.underlinePosition),
'ut': round(ttf.underlineThickness),
'ttffile': ttffilename,
'fontkey': fontkey,
'originalsize': os.stat(ttffilename).st_size,
'cw': ttf.charWidths,
}
try:
fh = open(unifilename, "w")
pickle.dump(font_dict, fh)
fh.close()
except IOError, e:
if not e.errno == errno.EACCES:
raise # Not a permission error.
del ttf
if hasattr(self,'str_alias_nb_pages'):
sbarr = range(0,57) # include numbers in the subset!
else:
sbarr = range(0,32)
self.fonts[fontkey] = {
'i': len(self.fonts)+1, 'type': font_dict['type'],
'name': font_dict['name'], 'desc': font_dict['desc'],
'up': font_dict['up'], 'ut': font_dict['ut'],
'cw': font_dict['cw'],
'ttffile': font_dict['ttffile'], 'fontkey': fontkey,
'subset': sbarr, 'unifilename': unifilename,
}
self.font_files[fontkey] = {'length1': font_dict['originalsize'],
'type': "TTF", 'ttffile': ttffilename}
self.font_files[fname] = {'type': "TTF"}
else:
fontfile = open(fname)
try:
font_dict = pickle.load(fontfile)
finally:
fontfile.close()
self.fonts[fontkey] = {'i': len(self.fonts)+1}
self.fonts[fontkey].update(font_dict)
if (diff):
#Search existing encodings
d = 0
nb = len(self.diffs)
for i in xrange(1, nb+1):
if(self.diffs[i] == diff):
d = i
break
if (d == 0):
d = nb + 1
self.diffs[d] = diff
self.fonts[fontkey]['diff'] = d
filename = font_dict.get('filename')
if (filename):
if (type == 'TrueType'):
self.font_files[filename]={'length1': originalsize}
else:
self.font_files[filename]={'length1': size1,
'length2': size2}
def set_font(self, family,style='',size=0):
"Select a font; size given in points"
family=family.lower()
if(family==''):
family=self.font_family
if(family=='arial'):
family='helvetica'
elif(family=='symbol' or family=='zapfdingbats'):
style=''
style=style.upper()
if('U' in style):
self.underline=1
style=style.replace('U','')
else:
self.underline=0
if(style=='IB'):
style='BI'
if(size==0):
size=self.font_size_pt
#Test if font is already selected
if(self.font_family==family and self.font_style==style and self.font_size_pt==size):
return
#Test if used for the first time
fontkey=family+style
if fontkey not in self.fonts:
#Check if one of the standard fonts
if fontkey in self.core_fonts:
if fontkey not in fpdf_charwidths:
#Load metric file
name=os.path.join(FPDF_FONT_DIR,family)
if(family=='times' or family=='helvetica'):
name+=style.lower()
execfile(name+'.font')
if fontkey not in fpdf_charwidths:
self.error('Could not include font metric file for'+fontkey)
i=len(self.fonts)+1
self.fonts[fontkey]={'i':i,'type':'core','name':self.core_fonts[fontkey],'up':-100,'ut':50,'cw':fpdf_charwidths[fontkey]}
else:
self.error('Undefined font: '+family+' '+style)
#Select it
self.font_family=family
self.font_style=style
self.font_size_pt=size
self.font_size=size/self.k
self.current_font=self.fonts[fontkey]
self.unifontsubset = (self.fonts[fontkey]['type'] == 'TTF')
if(self.page>0):
self._out(sprintf('BT /F%d %.2f Tf ET',self.current_font['i'],self.font_size_pt))
def set_font_size(self, size):
"Set font size in points"
if(self.font_size_pt==size):
return
self.font_size_pt=size
self.font_size=size/self.k
if(self.page>0):
self._out(sprintf('BT /F%d %.2f Tf ET',self.current_font['i'],self.font_size_pt))
def add_link(self):
"Create a new internal link"
n=len(self.links)+1
self.links[n]=(0,0)
return n
def set_link(self, link,y=0,page=-1):
"Set destination of internal link"
if(y==-1):
y=self.y
if(page==-1):
page=self.page
self.links[link]=[page,y]
def link(self, x,y,w,h,link):
"Put a link on the page"
if not self.page in self.page_links:
self.page_links[self.page] = []
self.page_links[self.page] += [(x*self.k,self.h_pt-y*self.k,w*self.k,h*self.k,link),]
def text(self, x, y, txt=''):
"Output a string"
txt = self.normalize_text(txt)
if (self.unifontsubset):
txt2 = self._escape(UTF8ToUTF16BE(txt, False))
for uni in UTF8StringToArray(txt):
self.current_font['subset'].append(uni)
else:
txt2 = self._escape(txt)
s=sprintf('BT %.2f %.2f Td (%s) Tj ET',x*self.k,(self.h-y)*self.k, txt2)
if(self.underline and txt!=''):
s+=' '+self._dounderline(x,y,txt)
if(self.color_flag):
s='q '+self.text_color+' '+s+' Q'
self._out(s)
def rotate(self, angle, x=None, y=None):
if x is None:
x = self.x
if y is None:
y = self.y;
if self.angle!=0:
self._out('Q')
self.angle = angle
if angle!=0:
angle *= math.pi/180;
c = math.cos(angle);
s = math.sin(angle);
cx = x*self.k;
cy = (self.h-y)*self.k
s = sprintf('q %.5F %.5F %.5F %.5F %.2F %.2F cm 1 0 0 1 %.2F %.2F cm',c,s,-s,c,cx,cy,-cx,-cy)
self._out(s)
def accept_page_break(self):
"Accept automatic page break or not"
return self.auto_page_break
def cell(self, w,h=0,txt='',border=0,ln=0,align='',fill=0,link=''):
"Output a cell"
txt = self.normalize_text(txt)
k=self.k
if(self.y+h>self.page_break_trigger and not self.in_footer and self.accept_page_break()):
#Automatic page break
x=self.x
ws=self.ws
if(ws>0):
self.ws=0
self._out('0 Tw')
self.add_page(self.cur_orientation)
self.x=x
if(ws>0):
self.ws=ws
self._out(sprintf('%.3f Tw',ws*k))
if(w==0):
w=self.w-self.r_margin-self.x
s=''
if(fill==1 or border==1):
if(fill==1):
if border==1:
op='B'
else:
op='f'
else:
op='S'
s=sprintf('%.2f %.2f %.2f %.2f re %s ',self.x*k,(self.h-self.y)*k,w*k,-h*k,op)
if(isinstance(border,basestring)):
x=self.x
y=self.y
if('L' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-y)*k,x*k,(self.h-(y+h))*k)
if('T' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-y)*k,(x+w)*k,(self.h-y)*k)
if('R' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',(x+w)*k,(self.h-y)*k,(x+w)*k,(self.h-(y+h))*k)
if('B' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-(y+h))*k,(x+w)*k,(self.h-(y+h))*k)
if(txt!=''):
if(align=='R'):
dx=w-self.c_margin-self.get_string_width(txt)
elif(align=='C'):
dx=(w-self.get_string_width(txt))/2.0
else:
dx=self.c_margin
if(self.color_flag):
s+='q '+self.text_color+' '
# If multibyte, Tw has no effect - do word spacing using an adjustment before each space
if (self.ws and self.unifontsubset):
for uni in UTF8StringToArray(txt):
self.current_font['subset'].append(uni)
space = self._escape(UTF8ToUTF16BE(' ', False))
s += sprintf('BT 0 Tw %.2F %.2F Td [',(self.x + dx) * k,(self.h - (self.y + 0.5*h+ 0.3 * self.font_size)) * k)
t = txt.split(' ')
numt = len(t)
for i in range(numt):
tx = t[i]
tx = '(' + self._escape(UTF8ToUTF16BE(tx, False)) + ')'
s += sprintf('%s ', tx);
if ((i+1)<numt):
adj = -(self.ws * self.k) * 1000 / self.font_size_pt
s += sprintf('%d(%s) ', adj, space)
s += '] TJ'
s += ' ET'
else:
if (self.unifontsubset):
txt2 = self._escape(UTF8ToUTF16BE(txt, False))
for uni in UTF8StringToArray(txt):
self.current_font['subset'].append(uni)
else:
txt2 = self._escape(txt)
s += sprintf('BT %.2f %.2f Td (%s) Tj ET',(self.x+dx)*k,(self.h-(self.y+.5*h+.3*self.font_size))*k,txt2)
if(self.underline):
s+=' '+self._dounderline(self.x+dx,self.y+.5*h+.3*self.font_size,txt)
if(self.color_flag):
s+=' Q'
if(link):
self.link(self.x+dx,self.y+.5*h-.5*self.font_size,self.get_string_width(txt),self.font_size,link)
if(s):
self._out(s)
self.lasth=h
if(ln>0):
#Go to next line
self.y+=h
if(ln==1):
self.x=self.l_margin
else:
self.x+=w
def multi_cell(self, w, h, txt='', border=0, align='J', fill=0, split_only=False):
"Output text with automatic or explicit line breaks"
txt = self.normalize_text(txt)
ret = [] # if split_only = True, returns splited text cells
cw=self.current_font['cw']
if(w==0):
w=self.w-self.r_margin-self.x
wmax=(w-2*self.c_margin)*1000.0/self.font_size
s=txt.replace("\r",'')
nb=len(s)
if(nb>0 and s[nb-1]=="\n"):
nb-=1
b=0
if(border):
if(border==1):
border='LTRB'
b='LRT'
b2='LR'
else:
b2=''
if('L' in border):
b2+='L'
if('R' in border):
b2+='R'
if ('T' in border):
b=b2+'T'
else:
b=b2
sep=-1
i=0
j=0
l=0
ns=0
nl=1
while(i<nb):
#Get next character
c=s[i]
if(c=="\n"):
#Explicit line break
if(self.ws>0):
self.ws=0
if not split_only:
self._out('0 Tw')
if not split_only:
self.cell(w,h,substr(s,j,i-j),b,2,align,fill)
else:
ret.append(substr(s,j,i-j))
i+=1
sep=-1
j=i
l=0
ns=0
nl+=1
if(border and nl==2):
b=b2
continue
if(c==' '):
sep=i
ls=l
ns+=1
if self.unifontsubset:
l += self.get_string_width(c) / self.font_size*1000.0
else:
l += cw.get(c,0)
if(l>wmax):
#Automatic line break
if(sep==-1):
if(i==j):
i+=1
if(self.ws>0):
self.ws=0
if not split_only:
self._out('0 Tw')
if not split_only:
self.cell(w,h,substr(s,j,i-j),b,2,align,fill)
else:
ret.append(substr(s,j,i-j))
else:
if(align=='J'):
if ns>1:
self.ws=(wmax-ls)/1000.0*self.font_size/(ns-1)
else:
self.ws=0
if not split_only:
self._out(sprintf('%.3f Tw',self.ws*self.k))
if not split_only:
self.cell(w,h,substr(s,j,sep-j),b,2,align,fill)
else:
ret.append(substr(s,j,sep-j))
i=sep+1
sep=-1
j=i
l=0
ns=0
nl+=1
if(border and nl==2):
b=b2
else:
i+=1
#Last chunk
if(self.ws>0):
self.ws=0
if not split_only:
self._out('0 Tw')
if(border and 'B' in border):
b+='B'
if not split_only:
self.cell(w,h,substr(s,j,i-j),b,2,align,fill)
self.x=self.l_margin
else:
ret.append(substr(s,j,i-j))
return ret
def write(self, h, txt='', link=''):
"Output text in flowing mode"
txt = self.normalize_text(txt)
cw=self.current_font['cw']
w=self.w-self.r_margin-self.x
wmax=(w-2*self.c_margin)*1000.0/self.font_size
s=txt.replace("\r",'')
nb=len(s)
sep=-1
i=0
j=0
l=0
nl=1
while(i<nb):
#Get next character
c=s[i]
if(c=="\n"):
#Explicit line break
self.cell(w,h,substr(s,j,i-j),0,2,'',0,link)
i+=1
sep=-1
j=i
l=0
if(nl==1):
self.x=self.l_margin
w=self.w-self.r_margin-self.x
wmax=(w-2*self.c_margin)*1000.0/self.font_size
nl+=1
continue
if(c==' '):
sep=i
if self.unifontsubset:
l += self.get_string_width(c) / self.font_size*1000.0
else:
l += cw.get(c,0)
if(l>wmax):
#Automatic line break
if(sep==-1):
if(self.x>self.l_margin):
#Move to next line
self.x=self.l_margin
self.y+=h
w=self.w-self.r_margin-self.x
wmax=(w-2*self.c_margin)*1000.0/self.font_size
i+=1
nl+=1
continue
if(i==j):
i+=1
self.cell(w,h,substr(s,j,i-j),0,2,'',0,link)
else:
self.cell(w,h,substr(s,j,sep-j),0,2,'',0,link)
i=sep+1
sep=-1
j=i
l=0
if(nl==1):
self.x=self.l_margin
w=self.w-self.r_margin-self.x
wmax=(w-2*self.c_margin)*1000.0/self.font_size
nl+=1
else:
i+=1
#Last chunk
if(i!=j):
self.cell(l/1000.0*self.font_size,h,substr(s,j),0,0,'',0,link)
def image(self, name, x=None, y=None, w=0,h=0,type='',link=''):
"Put an image on the page"
if not name in self.images:
#First use of image, get info
if(type==''):
pos=name.rfind('.')
if(not pos):
self.error('image file has no extension and no type was specified: '+name)
type=substr(name,pos+1)
type=type.lower()
if(type=='jpg' or type=='jpeg'):
info=self._parsejpg(name)
elif(type=='png'):
info=self._parsepng(name)
else:
#Allow for additional formats
#maybe the image is not showing the correct extension,
#but the header is OK,
succeed_parsing = False
#try all the parsing functions
parsing_functions = [self._parsejpg,self._parsepng,self._parsegif]
for pf in parsing_functions:
try:
info = pf(name)
succeed_parsing = True
break;
except:
pass
#last resource
if not succeed_parsing:
mtd='_parse'+type
if not hasattr(self,mtd):
self.error('Unsupported image type: '+type)
info=getattr(self, mtd)(name)
mtd='_parse'+type
if not hasattr(self,mtd):
self.error('Unsupported image type: '+type)
info=getattr(self, mtd)(name)
info['i']=len(self.images)+1
self.images[name]=info
else:
info=self.images[name]
#Automatic width and height calculation if needed
if(w==0 and h==0):
#Put image at 72 dpi
w=info['w']/self.k
h=info['h']/self.k
elif(w==0):
w=h*info['w']/info['h']
elif(h==0):
h=w*info['h']/info['w']
# Flowing mode
if y is None:
if (self.y + h > self.page_break_trigger and not self.in_footer and self.accept_page_break()):
#Automatic page break
x = self.x
self.add_page(self.cur_orientation)
self.x = x
y = self.y
self.y += h
if x is None:
x = self.x
self._out(sprintf('q %.2f 0 0 %.2f %.2f %.2f cm /I%d Do Q',w*self.k,h*self.k,x*self.k,(self.h-(y+h))*self.k,info['i']))
if(link):
self.link(x,y,w,h,link)
def ln(self, h=''):
"Line Feed; default value is last cell height"
self.x=self.l_margin
if(isinstance(h, basestring)):
self.y+=self.lasth
else:
self.y+=h
def get_x(self):
"Get x position"
return self.x
def set_x(self, x):
"Set x position"
if(x>=0):
self.x=x
else:
self.x=self.w+x
def get_y(self):
"Get y position"
return self.y
def set_y(self, y):
"Set y position and reset x"
self.x=self.l_margin
if(y>=0):
self.y=y
else:
self.y=self.h+y
def set_xy(self, x,y):
"Set x and y positions"
self.set_y(y)
self.set_x(x)
def output(self, name='',dest=''):
"Output PDF to some destination"
#Finish document if necessary
if(self.state<3):
self.close()
dest=dest.upper()
if(dest==''):
if(name==''):
name='doc.pdf'
dest='I'
else:
dest='F'
if dest=='I':
print self.buffer
elif dest=='D':
print self.buffer
elif dest=='F':
#Save to local file
f=open(name,'wb')
if(not f):
self.error('Unable to create output file: '+name)
if PY3K:
# TODO: proper unicode support
f.write(self.buffer.encode("latin1"))
else:
f.write(self.buffer)
f.close()
elif dest=='S':
#Return as a string
return self.buffer
else:
self.error('Incorrect output destination: '+dest)
return ''
def normalize_text(self, txt):
"Check that text input is in the correct format/encoding"
# - for TTF unicode fonts: unicode object (utf8 encoding)
# - for built-in fonts: string instances (latin 1 encoding)
if self.unifontsubset and isinstance(txt, str):
txt = txt.decode('utf8')
elif not self.unifontsubset and isinstance(txt, unicode) and not PY3K:
txt = txt.encode('latin1')
return txt
def _dochecks(self):
#Check for locale-related bug
# if(1.1==1):
# self.error("Don\'t alter the locale before including class file");
#Check for decimal separator
if(sprintf('%.1f',1.0)!='1.0'):
import locale
locale.setlocale(locale.LC_NUMERIC,'C')
def _getfontpath(self):
return FPDF_FONT_DIR+'/'
def _putpages(self):
nb=self.page
if hasattr(self,'str_alias_nb_pages'):
# Replace number of pages in fonts using subsets (unicode)
alias = UTF8ToUTF16BE(self.str_alias_nb_pages, False);
r = UTF8ToUTF16BE(str(nb), False)
for n in xrange(1, nb+1):
self.pages[n] = self.pages[n].replace(alias, r)
# Now repeat for no pages in non-subset fonts
for n in xrange(1,nb+1):
self.pages[n]=self.pages[n].replace(self.str_alias_nb_pages,str(nb))
if(self.def_orientation=='P'):
w_pt=self.fw_pt
h_pt=self.fh_pt
else:
w_pt=self.fh_pt
h_pt=self.fw_pt
if self.compress:
filter='/Filter /FlateDecode '
else:
filter=''
for n in xrange(1,nb+1):
#Page
self._newobj()
self._out('<</Type /Page')
self._out('/Parent 1 0 R')
if n in self.orientation_changes:
self._out(sprintf('/MediaBox [0 0 %.2f %.2f]',h_pt,w_pt))
self._out('/Resources 2 0 R')
if self.page_links and n in self.page_links:
#Links
annots='/Annots ['
for pl in self.page_links[n]:
rect=sprintf('%.2f %.2f %.2f %.2f',pl[0],pl[1],pl[0]+pl[2],pl[1]-pl[3])
annots+='<</Type /Annot /Subtype /Link /Rect ['+rect+'] /Border [0 0 0] '
if(isinstance(pl[4],basestring)):
annots+='/A <</S /URI /URI '+self._textstring(pl[4])+'>>>>'
else:
l=self.links[pl[4]]
if l[0] in self.orientation_changes:
h=w_pt
else:
h=h_pt
annots+=sprintf('/Dest [%d 0 R /XYZ 0 %.2f null]>>',1+2*l[0],h-l[1]*self.k)
self._out(annots+']')
if(self.pdf_version>'1.3'):
self._out('/Group <</Type /Group /S /Transparency /CS /DeviceRGB>>')
self._out('/Contents '+str(self.n+1)+' 0 R>>')
self._out('endobj')
#Page content
if self.compress:
p = zlib.compress(self.pages[n])
else:
p = self.pages[n]
self._newobj()
self._out('<<'+filter+'/Length '+str(len(p))+'>>')
self._putstream(p)
self._out('endobj')
#Pages root
self.offsets[1]=len(self.buffer)
self._out('1 0 obj')
self._out('<</Type /Pages')
kids='/Kids ['
for i in xrange(0,nb):
kids+=str(3+2*i)+' 0 R '
self._out(kids+']')
self._out('/Count '+str(nb))
self._out(sprintf('/MediaBox [0 0 %.2f %.2f]',w_pt,h_pt))
self._out('>>')
self._out('endobj')
def _putfonts(self):
nf=self.n
for diff in self.diffs:
#Encodings
self._newobj()
self._out('<</Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences ['+self.diffs[diff]+']>>')
self._out('endobj')
for name,info in self.font_files.iteritems():
if 'type' in info and info['type'] != 'TTF':
#Font file embedding
self._newobj()
self.font_files[name]['n']=self.n
font=''
f=open(self._getfontpath()+name,'rb',1)
if(not f):
self.error('Font file not found')
font=f.read()
f.close()
compressed=(substr(name,-2)=='.z')
if(not compressed and 'length2' in info):
header=(ord(font[0])==128)
if(header):
#Strip first binary header
font=substr(font,6)
if(header and ord(font[info['length1']])==128):
#Strip second binary header
font=substr(font,0,info['length1'])+substr(font,info['length1']+6)
self._out('<</Length '+str(len(font)))
if(compressed):
self._out('/Filter /FlateDecode')
self._out('/Length1 '+str(info['length1']))
if('length2' in info):
self._out('/Length2 '+str(info['length2'])+' /Length3 0')
self._out('>>')
self._putstream(font)
self._out('endobj')
for k,font in self.fonts.iteritems():
#Font objects
self.fonts[k]['n']=self.n+1
type=font['type']
name=font['name']
if(type=='core'):
#Standard font
self._newobj()
self._out('<</Type /Font')
self._out('/BaseFont /'+name)
self._out('/Subtype /Type1')
if(name!='Symbol' and name!='ZapfDingbats'):
self._out('/Encoding /WinAnsiEncoding')
self._out('>>')
self._out('endobj')
elif(type=='Type1' or type=='TrueType'):
#Additional Type1 or TrueType font
self._newobj()
self._out('<</Type /Font')
self._out('/BaseFont /'+name)
self._out('/Subtype /'+type)
self._out('/FirstChar 32 /LastChar 255')
self._out('/Widths '+str(self.n+1)+' 0 R')
self._out('/FontDescriptor '+str(self.n+2)+' 0 R')
if(font['enc']):
if('diff' in font):
self._out('/Encoding '+str(nf+font['diff'])+' 0 R')
else:
self._out('/Encoding /WinAnsiEncoding')
self._out('>>')
self._out('endobj')
#Widths
self._newobj()
cw=font['cw']
s='['
for i in xrange(32,256):
# Get doesn't rise exception; returns 0 instead of None if not set
s+=str(cw.get(chr(i)) or 0)+' '
self._out(s+']')
self._out('endobj')
#Descriptor
self._newobj()
s='<</Type /FontDescriptor /FontName /'+name
for k in ('Ascent', 'Descent', 'CapHeight', 'Falgs', 'FontBBox', 'ItalicAngle', 'StemV', 'MissingWidth'):
s += ' /%s %s' % (k, font['desc'][k])
filename=font['file']
if(filename):
s+=' /FontFile'
if type!='Type1':
s+='2'
s+=' '+str(self.font_files[filename]['n'])+' 0 R'
self._out(s+'>>')
self._out('endobj')
elif (type == 'TTF'):
self.fonts[k]['n'] = self.n + 1
ttf = TTFontFile()
fontname = 'MPDFAA' + '+' + font['name']
subset = font['subset']
del subset[0]
ttfontstream = ttf.makeSubset(font['ttffile'], subset)
ttfontsize = len(ttfontstream)
fontstream = zlib.compress(ttfontstream)
codeToGlyph = ttf.codeToGlyph
##del codeToGlyph[0]
# Type0 Font
# A composite font - a font composed of other fonts, organized hierarchically
self._newobj()
self._out('<</Type /Font');
self._out('/Subtype /Type0');
self._out('/BaseFont /' + fontname + '');
self._out('/Encoding /Identity-H');
self._out('/DescendantFonts [' + str(self.n + 1) + ' 0 R]')
self._out('/ToUnicode ' + str(self.n + 2) + ' 0 R')
self._out('>>')
self._out('endobj')
# CIDFontType2
# A CIDFont whose glyph descriptions are based on TrueType font technology
self._newobj()
self._out('<</Type /Font')
self._out('/Subtype /CIDFontType2')
self._out('/BaseFont /' + fontname + '')
self._out('/CIDSystemInfo ' + str(self.n + 2) + ' 0 R')
self._out('/FontDescriptor ' + str(self.n + 3) + ' 0 R')
if (font['desc'].get('MissingWidth')):
self._out('/DW %d' % font['desc']['MissingWidth'])
self._putTTfontwidths(font, ttf.maxUni)
self._out('/CIDToGIDMap ' + str(self.n + 4) + ' 0 R')
self._out('>>')
self._out('endobj')
# ToUnicode
self._newobj()
toUni = "/CIDInit /ProcSet findresource begin\n" \
"12 dict begin\n" \
"begincmap\n" \
"/CIDSystemInfo\n" \
"<</Registry (Adobe)\n" \
"/Ordering (UCS)\n" \
"/Supplement 0\n" \
">> def\n" \
"/CMapName /Adobe-Identity-UCS def\n" \
"/CMapType 2 def\n" \
"1 begincodespacerange\n" \
"<0000> <FFFF>\n" \
"endcodespacerange\n" \
"1 beginbfrange\n" \
"<0000> <FFFF> <0000>\n" \
"endbfrange\n" \
"endcmap\n" \
"CMapName currentdict /CMap defineresource pop\n" \
"end\n" \
"end"
self._out('<</Length ' + str(len(toUni)) + '>>')
self._putstream(toUni)
self._out('endobj')
# CIDSystemInfo dictionary
self._newobj()
self._out('<</Registry (Adobe)')
self._out('/Ordering (UCS)')
self._out('/Supplement 0')
self._out('>>')
self._out('endobj')
# Font descriptor
self._newobj()
self._out('<</Type /FontDescriptor')
self._out('/FontName /' + fontname)
for kd in ('Ascent', 'Descent', 'CapHeight', 'Flags', 'FontBBox', 'ItalicAngle', 'StemV', 'MissingWidth'):
v = font['desc'][kd]
if (kd == 'Flags'):
v = v | 4;
v = v & ~32; # SYMBOLIC font flag
self._out(' /%s %s' % (kd, v))
self._out('/FontFile2 ' + str(self.n + 2) + ' 0 R')
self._out('>>')
self._out('endobj')
# Embed CIDToGIDMap
# A specification of the mapping from CIDs to glyph indices
cidtogidmap = '';
cidtogidmap = ["\x00"] * 256*256*2
for cc, glyph in codeToGlyph.items():
cidtogidmap[cc*2] = chr(glyph >> 8)
cidtogidmap[cc*2 + 1] = chr(glyph & 0xFF)
cidtogidmap = zlib.compress(''.join(cidtogidmap));
self._newobj()
self._out('<</Length ' + str(len(cidtogidmap)) + '')
self._out('/Filter /FlateDecode')
self._out('>>')
self._putstream(cidtogidmap)
self._out('endobj')
#Font file
self._newobj()
self._out('<</Length ' + str(len(fontstream)))
self._out('/Filter /FlateDecode')
self._out('/Length1 ' + str(ttfontsize))
self._out('>>')
self._putstream(fontstream)
self._out('endobj')
del ttf
else:
#Allow for additional types
mtd='_put'+type.lower()
if(not method_exists(self,mtd)):
self.error('Unsupported font type: '+type)
self.mtd(font)
def _putTTfontwidths(self, font, maxUni):
cw127fname = os.path.splitext(font['unifilename'])[0] + '.cw127.pkl'
if (os.path.exists(cw127fname)):
fh = open(cw127fname);
try:
font_dict = pickle.load(fh)
finally:
fh.close()
rangeid = font_dict['rangeid']
range_ = font_dict['range']
prevcid = font_dict['prevcid']
prevwidth = font_dict['prevwidth']
interval = font_dict['interval']
range_interval = font_dict['range_interval']
startcid = 128
else:
rangeid = 0
range_ = {}
range_interval = {}
prevcid = -2
prevwidth = -1
interval = False
startcid = 1
cwlen = maxUni + 1
# for each character
for cid in range(startcid, cwlen):
if (cid==128 and not os.path.exists(cw127fname)):
try:
fh = open(cw127fname, "wb")
font_dict = {}
font_dict['rangeid'] = rangeid
font_dict['prevcid'] = prevcid
font_dict['prevwidth'] = prevwidth
font_dict['interval'] = interval
font_dict['range_interval'] = range_interval
font_dict['range'] = range_
pickle.dump(font_dict, fh)
fh.close()
except IOError, e:
if not e.errno == errno.EACCES:
raise # Not a permission error.
if (font['cw'][cid] == 0):
continue
width = font['cw'][cid]
if (width == 65535): width = 0
if (cid > 255 and (cid not in font['subset']) or not cid): #
continue
if ('dw' not in font or (font['dw'] and width != font['dw'])):
if (cid == (prevcid + 1)):
if (width == prevwidth):
if (width == range_[rangeid][0]):
range_.setdefault(rangeid, []).append(width)
else:
range_[rangeid].pop()
# new range
rangeid = prevcid
range_[rangeid] = [prevwidth, width]
interval = True
range_interval[rangeid] = True
else:
if (interval):
# new range
rangeid = cid
range_[rangeid] = [width]
else:
range_[rangeid].append(width)
interval = False
else:
rangeid = cid
range_[rangeid] = [width]
interval = False
prevcid = cid
prevwidth = width
prevk = -1
nextk = -1
prevint = False
for k, ws in sorted(range_.items()):
cws = len(ws)
if (k == nextk and not prevint and (not k in range_interval or cws < 3)):
if (k in range_interval):
del range_interval[k]
range_[prevk] = range_[prevk] + range_[k]
del range_[k]
else:
prevk = k
nextk = k + cws
if (k in range_interval):
prevint = (cws > 3)
del range_interval[k]
nextk -= 1
else:
prevint = False
w = []
for k, ws in sorted(range_.items()):
if (len(set(ws)) == 1):
w.append(' %s %s %s' % (k, k + len(ws) - 1, ws[0]))
else:
w.append(' %s [ %s ]\n' % (k, ' '.join([str(int(h)) for h in ws]))) ##
self._out('/W [%s]' % ''.join(w))
def _putimages(self):
filter=''
if self.compress:
filter='/Filter /FlateDecode '
for filename,info in self.images.iteritems():
self._putimage(info)
del info['data']
if 'smask' in info:
del info['smask']
def _putimage(self, info):
if 'data' in info:
self._newobj()
info['n']=self.n
self._out('<</Type /XObject')
self._out('/Subtype /Image')
self._out('/Width '+str(info['w']))
self._out('/Height '+str(info['h']))
if(info['cs']=='Indexed'):
self._out('/ColorSpace [/Indexed /DeviceRGB '+str(len(info['pal'])/3-1)+' '+str(self.n+1)+' 0 R]')
else:
self._out('/ColorSpace /'+info['cs'])
if(info['cs']=='DeviceCMYK'):
self._out('/Decode [1 0 1 0 1 0 1 0]')
self._out('/BitsPerComponent '+str(info['bpc']))
if 'f' in info:
self._out('/Filter /'+info['f'])
if 'dp' in info:
self._out('/DecodeParms <<' + info['dp'] + '>>')
if('trns' in info and isinstance(info['trns'], list)):
trns=''
for i in xrange(0,len(info['trns'])):
trns+=str(info['trns'][i])+' '+str(info['trns'][i])+' '
self._out('/Mask ['+trns+']')
if('smask' in info):
self._out('/SMask ' + str(self.n+1) + ' 0 R');
self._out('/Length '+str(len(info['data']))+'>>')
self._putstream(info['data'])
self._out('endobj')
# Soft mask
if('smask' in info):
dp = '/Predictor 15 /Colors 1 /BitsPerComponent 8 /Columns ' + str(info['w'])
smask = {'w': info['w'], 'h': info['h'], 'cs': 'DeviceGray', 'bpc': 8, 'f': info['f'], 'dp': dp, 'data': info['smask']}
self._putimage(smask)
#Palette
if(info['cs']=='Indexed'):
self._newobj()
filter = self.compress and '/Filter /FlateDecode ' or ''
if self.compress:
pal=zlib.compress(info['pal'])
else:
pal=info['pal']
self._out('<<'+filter+'/Length '+str(len(pal))+'>>')
self._putstream(pal)
self._out('endobj')
def _putxobjectdict(self):
for image in self.images.values():
self._out('/I'+str(image['i'])+' '+str(image['n'])+' 0 R')
def _putresourcedict(self):
self._out('/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]')
self._out('/Font <<')
for font in self.fonts.values():
self._out('/F'+str(font['i'])+' '+str(font['n'])+' 0 R')
self._out('>>')
self._out('/XObject <<')
self._putxobjectdict()
self._out('>>')
def _putresources(self):
self._putfonts()
self._putimages()
#Resource dictionary
self.offsets[2]=len(self.buffer)
self._out('2 0 obj')
self._out('<<')
self._putresourcedict()
self._out('>>')
self._out('endobj')
def _putinfo(self):
self._out('/Producer '+self._textstring('PyFPDF '+FPDF_VERSION+' http://pyfpdf.googlecode.com/'))
if hasattr(self,'title'):
self._out('/Title '+self._textstring(self.title))
if hasattr(self,'subject'):
self._out('/Subject '+self._textstring(self.subject))
if hasattr(self,'author'):
self._out('/Author '+self._textstring(self.author))
if hasattr (self,'keywords'):
self._out('/Keywords '+self._textstring(self.keywords))
if hasattr(self,'creator'):
self._out('/Creator '+self._textstring(self.creator))
self._out('/CreationDate '+self._textstring('D:'+datetime.now().strftime('%Y%m%d%H%M%S')))
def _putcatalog(self):
self._out('/Type /Catalog')
self._out('/Pages 1 0 R')
if(self.zoom_mode=='fullpage'):
self._out('/OpenAction [3 0 R /Fit]')
elif(self.zoom_mode=='fullwidth'):
self._out('/OpenAction [3 0 R /FitH null]')
elif(self.zoom_mode=='real'):
self._out('/OpenAction [3 0 R /XYZ null null 1]')
elif(not isinstance(self.zoom_mode,basestring)):
self._out('/OpenAction [3 0 R /XYZ null null '+(self.zoom_mode/100)+']')
if(self.layout_mode=='single'):
self._out('/PageLayout /SinglePage')
elif(self.layout_mode=='continuous'):
self._out('/PageLayout /OneColumn')
elif(self.layout_mode=='two'):
self._out('/PageLayout /TwoColumnLeft')
def _putheader(self):
self._out('%PDF-'+self.pdf_version)
def _puttrailer(self):
self._out('/Size '+str(self.n+1))
self._out('/Root '+str(self.n)+' 0 R')
self._out('/Info '+str(self.n-1)+' 0 R')
def _enddoc(self):
self._putheader()
self._putpages()
self._putresources()
#Info
self._newobj()
self._out('<<')
self._putinfo()
self._out('>>')
self._out('endobj')
#Catalog
self._newobj()
self._out('<<')
self._putcatalog()
self._out('>>')
self._out('endobj')
#Cross-ref
o=len(self.buffer)
self._out('xref')
self._out('0 '+(str(self.n+1)))
self._out('0000000000 65535 f ')
for i in xrange(1,self.n+1):
self._out(sprintf('%010d 00000 n ',self.offsets[i]))
#Trailer
self._out('trailer')
self._out('<<')
self._puttrailer()
self._out('>>')
self._out('startxref')
self._out(o)
self._out('%%EOF')
self.state=3
def _beginpage(self, orientation):
self.page+=1
self.pages[self.page]=''
self.state=2
self.x=self.l_margin
self.y=self.t_margin
self.font_family=''
#Page orientation
if(not orientation):
orientation=self.def_orientation
else:
orientation=orientation[0].upper()
if(orientation!=self.def_orientation):
self.orientation_changes[self.page]=1
if(orientation!=self.cur_orientation):
#Change orientation
if(orientation=='P'):
self.w_pt=self.fw_pt
self.h_pt=self.fh_pt
self.w=self.fw
self.h=self.fh
else:
self.w_pt=self.fh_pt
self.h_pt=self.fw_pt
self.w=self.fh
self.h=self.fw
self.page_break_trigger=self.h-self.b_margin
self.cur_orientation=orientation
def _endpage(self):
#End of page contents
self.state=1
def _newobj(self):
#Begin a new object
self.n+=1
self.offsets[self.n]=len(self.buffer)
self._out(str(self.n)+' 0 obj')
def _dounderline(self, x,y,txt):
#Underline text
up=self.current_font['up']
ut=self.current_font['ut']
w=self.get_string_width(txt)+self.ws*txt.count(' ')
return sprintf('%.2f %.2f %.2f %.2f re f',x*self.k,(self.h-(y-up/1000.0*self.font_size))*self.k,w*self.k,-ut/1000.0*self.font_size_pt)
def _parsejpg(self, filename):
# Extract info from a JPEG file
if Image is None:
self.error('PIL not installed')
try:
f = open(filename, 'rb')
im = Image.open(f)
except Exception, e:
self.error('Missing or incorrect image file: %s. error: %s' % (filename, str(e)))
else:
a = im.size
# We shouldn't get into here, as Jpeg is RGB=8bpp right(?), but, just in case...
bpc=8
if im.mode == 'RGB':
colspace='DeviceRGB'
elif im.mode == 'CMYK':
colspace='DeviceCMYK'
else:
colspace='DeviceGray'
# Read whole file from the start
f.seek(0)
data = f.read()
f.close()
return {'w':a[0],'h':a[1],'cs':colspace,'bpc':bpc,'f':'DCTDecode','data':data}
def _parsegif(self, filename):
# Extract info from a GIF file (via PNG conversion)
if Image is None:
self.error('PIL is required for GIF support')
try:
im = Image.open(filename)
except Exception, e:
self.error('Missing or incorrect image file: %s. error: %s' % (filename, str(e)))
else:
# Use temporary file
f = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
tmp = f.name
f.close()
if "transparency" in im.info:
im.save(tmp, transparency = im.info['transparency'])
else:
im.save(tmp)
info = self._parsepng(tmp)
os.unlink(tmp)
return info
def _parsepng(self, name):
#Extract info from a PNG file
if name.startswith("http://") or name.startswith("https://"):
import urllib
f = urllib.urlopen(name)
else:
f=open(name,'rb')
if(not f):
self.error("Can't open image file: "+name)
#Check signature
if(f.read(8)!='\x89'+'PNG'+'\r'+'\n'+'\x1a'+'\n'):
self.error('Not a PNG file: '+name)
#Read header chunk
f.read(4)
if(f.read(4)!='IHDR'):
self.error('Incorrect PNG file: '+name)
w=self._freadint(f)
h=self._freadint(f)
bpc=ord(f.read(1))
if(bpc>8):
self.error('16-bit depth not supported: '+name)
ct=ord(f.read(1))
if(ct==0 or ct==4):
colspace='DeviceGray'
elif(ct==2 or ct==6):
colspace='DeviceRGB'
elif(ct==3):
colspace='Indexed'
else:
self.error('Unknown color type: '+name)
if(ord(f.read(1))!=0):
self.error('Unknown compression method: '+name)
if(ord(f.read(1))!=0):
self.error('Unknown filter method: '+name)
if(ord(f.read(1))!=0):
self.error('Interlacing not supported: '+name)
f.read(4)
dp='/Predictor 15 /Colors '
if colspace == 'DeviceRGB':
dp+='3'
else:
dp+='1'
dp+=' /BitsPerComponent '+str(bpc)+' /Columns '+str(w)+''
#Scan chunks looking for palette, transparency and image data
pal=''
trns=''
data=''
n=1
while n != None:
n=self._freadint(f)
type=f.read(4)
if(type=='PLTE'):
#Read palette
pal=f.read(n)
f.read(4)
elif(type=='tRNS'):
#Read transparency info
t=f.read(n)
if(ct==0):
trns=[ord(substr(t,1,1)),]
elif(ct==2):
trns=[ord(substr(t,1,1)),ord(substr(t,3,1)),ord(substr(t,5,1))]
else:
pos=t.find('\x00')
if(pos!=-1):
trns=[pos,]
f.read(4)
elif(type=='IDAT'):
#Read image data block
data+=f.read(n)
f.read(4)
elif(type=='IEND'):
break
else:
f.read(n+4)
if(colspace=='Indexed' and not pal):
self.error('Missing palette in '+name)
f.close()
info = {'w':w,'h':h,'cs':colspace,'bpc':bpc,'f':'FlateDecode','dp':dp,'pal':pal,'trns':trns,}
if(ct>=4):
# Extract alpha channel
data = zlib.decompress(data)
color = '';
alpha = '';
if(ct==4):
# Gray image
length = 2*w
for i in range(h):
pos = (1+length)*i
color += data[pos]
alpha += data[pos]
line = substr(data, pos+1, length)
color += re.sub('(.).',lambda m: m.group(1),line, flags=re.DOTALL)
alpha += re.sub('.(.)',lambda m: m.group(1),line, flags=re.DOTALL)
else:
# RGB image
length = 4*w
for i in range(h):
pos = (1+length)*i
color += data[pos]
alpha += data[pos]
line = substr(data, pos+1, length)
color += re.sub('(.{3}).',lambda m: m.group(1),line, flags=re.DOTALL)
alpha += re.sub('.{3}(.)',lambda m: m.group(1),line, flags=re.DOTALL)
del data
data = zlib.compress(color)
info['smask'] = zlib.compress(alpha)
if (self.pdf_version < '1.4'):
self.pdf_version = '1.4'
info['data'] = data
return info
def _freadint(self, f):
#Read a 4-byte integer from file
try:
return struct.unpack('>I', f.read(4))[0]
except:
return None
def _textstring(self, s):
#Format a text string
return '('+self._escape(s)+')'
def _escape(self, s):
#Add \ before \, ( and )
return s.replace('\\','\\\\').replace(')','\\)').replace('(','\\(').replace('\r','\\r')
def _putstream(self, s):
self._out('stream')
self._out(s)
self._out('endstream')
def _out(self, s):
#Add a line to the document
if(self.state==2):
self.pages[self.page]+=s+"\n"
else:
self.buffer+=str(s)+"\n"
def interleaved2of5(self, txt, x, y, w=1.0, h=10.0):
"Barcode I2of5 (numeric), adds a 0 if odd lenght"
narrow = w / 3.0
wide = w
# wide/narrow codes for the digits
bar_char={'0': 'nnwwn', '1': 'wnnnw', '2': 'nwnnw', '3': 'wwnnn',
'4': 'nnwnw', '5': 'wnwnn', '6': 'nwwnn', '7': 'nnnww',
'8': 'wnnwn', '9': 'nwnwn', 'A': 'nn', 'Z': 'wn'}
self.set_fill_color(0)
code = txt
# add leading zero if code-length is odd
if len(code) % 2 != 0:
code = '0' + code
# add start and stop codes
code = 'AA' + code.lower() + 'ZA'
for i in xrange(0, len(code), 2):
# choose next pair of digits
char_bar = code[i]
char_space = code[i+1]
# check whether it is a valid digit
if not char_bar in bar_char.keys():
raise RuntimeError ('Char "%s" invalid for I25: ' % char_bar)
if not char_space in bar_char.keys():
raise RuntimeError ('Char "%s" invalid for I25: ' % char_space)
# create a wide/narrow-seq (first digit=bars, second digit=spaces)
seq = ''
for s in xrange(0, len(bar_char[char_bar])):
seq += bar_char[char_bar][s] + bar_char[char_space][s]
for bar in xrange(0, len(seq)):
# set line_width depending on value
if seq[bar] == 'n':
line_width = narrow
else:
line_width = wide
# draw every second value, the other is represented by space
if bar % 2 == 0:
self.rect(x, y, line_width, h, 'F')
x += line_width
def code39(self, txt, x, y, w=1.5, h=5.0):
"Barcode 3of9"
wide = w
narrow = w / 3.0
gap = narrow
bar_char={'0': 'nnnwwnwnn', '1': 'wnnwnnnnw', '2': 'nnwwnnnnw',
'3': 'wnwwnnnnn', '4': 'nnnwwnnnw', '5': 'wnnwwnnnn',
'6': 'nnwwwnnnn', '7': 'nnnwnnwnw', '8': 'wnnwnnwnn',
'9': 'nnwwnnwnn', 'A': 'wnnnnwnnw', 'B': 'nnwnnwnnw',
'C': 'wnwnnwnnn', 'D': 'nnnnwwnnw', 'E': 'wnnnwwnnn',
'F': 'nnwnwwnnn', 'G': 'nnnnnwwnw', 'H': 'wnnnnwwnn',
'I': 'nnwnnwwnn', 'J': 'nnnnwwwnn', 'K': 'wnnnnnnww',
'L': 'nnwnnnnww', 'M': 'wnwnnnnwn', 'N': 'nnnnwnnww',
'O': 'wnnnwnnwn', 'P': 'nnwnwnnwn', 'Q': 'nnnnnnwww',
'R': 'wnnnnnwwn', 'S': 'nnwnnnwwn', 'T': 'nnnnwnwwn',
'U': 'wwnnnnnnw', 'V': 'nwwnnnnnw', 'W': 'wwwnnnnnn',
'X': 'nwnnwnnnw', 'Y': 'wwnnwnnnn', 'Z': 'nwwnwnnnn',
'-': 'nwnnnnwnw', '.': 'wwnnnnwnn', ' ': 'nwwnnnwnn',
'*': 'nwnnwnwnn', '$': 'nwnwnwnnn', '/': 'nwnwnnnwn',
'+': 'nwnnnwnwn', '%': 'nnnwnwnwn'}
self.set_fill_color(0)
code = txt
code = code.upper()
for i in xrange (0, len(code), 2):
char_bar = code[i]
if not char_bar in bar_char.keys():
raise RuntimeError ('Char "%s" invalid for Code39' % char_bar)
seq= ''
for s in xrange(0, len(bar_char[char_bar])):
seq += bar_char[char_bar][s]
for bar in xrange(0, len(seq)):
if seq[bar] == 'n':
line_width = narrow
else:
line_width = wide
if bar % 2 == 0:
self.rect(x, y, line_width, h, 'F')
x += line_width
x += gap
| mit | 6,015,571,498,540,385,000 | 36.10625 | 142 | 0.439847 | false |
tsnoam/python-telegram-bot | telegram/utils/validate.py | 1 | 1375 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains functions to validate function arguments"""
try:
type(basestring)
except NameError:
basestring = str
def validate_string(arg, name):
"""
Validate a string argument. Raises a ValueError if `arg` is neither an
instance of basestring (Python 2) or str (Python 3) nor None.
Args:
arg (basestring): The value to be tested
name (str): The name of the argument, for the error message
"""
if not isinstance(arg, basestring) and arg is not None:
raise ValueError(name + ' is not a string')
| gpl-3.0 | -5,443,574,451,127,376,000 | 35.184211 | 74 | 0.722909 | false |
javiercantero/streamlink | src/streamlink/plugins/live_russia_tv.py | 1 | 1078 | import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.stream import HLSStream
class LiveRussia(Plugin):
url_re = re.compile(r"https?://(?:www.)?live.russia.tv/index/index/channel_id/")
iframe_re = re.compile(r"""<iframe[^>]*src=["']([^'"]+)["'][^>]*>""")
stream_re = re.compile(r"""window.pl.data.*m3u8":"(.*)"}.*};""")
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def _get_streams(self):
res = http.get(self.url)
iframe_result = re.search(self.iframe_re, res.text)
if not iframe_result:
self.logger.error("The requested content is unavailable.")
return
res = http.get(iframe_result.group(1))
stream_url_result = re.search(self.stream_re, res.text)
if not stream_url_result:
self.logger.error("The requested content is unavailable.")
return
return HLSStream.parse_variant_playlist(self.session, stream_url_result.group(1))
__plugin__ = LiveRussia | bsd-2-clause | 8,933,361,089,661,961,000 | 31.69697 | 89 | 0.627087 | false |
Venris/crazyflie-multilink | examples/basiclog.py | 1 | 5787 | # -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2014 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Simple example that connects to the first Crazyflie found, logs the Stabilizer
and prints it to the console. After 10s the application disconnects and exits.
"""
import sys
sys.path.append("../lib")
import cflib.crtp
import logging
import time
from threading import Timer
import cflib.crtp
from cfclient.utils.logconfigreader import LogConfig
from cflib.crazyflie import Crazyflie
# Only output errors from the logging framework
logging.basicConfig(level=logging.ERROR)
class LoggingExample:
"""
Simple logging example class that logs the Stabilizer from a supplied
link uri and disconnects after 5s.
"""
def __init__(self, link_uri):
""" Initialize and run the example with the specified link_uri """
# Create a Crazyflie object without specifying any cache dirs
self._cf = Crazyflie()
# Connect some callbacks from the Crazyflie API
self._cf.connected.add_callback(self._connected)
self._cf.disconnected.add_callback(self._disconnected)
self._cf.connection_failed.add_callback(self._connection_failed)
self._cf.connection_lost.add_callback(self._connection_lost)
print "Connecting to %s" % link_uri
# Try to connect to the Crazyflie
self._cf.open_link(link_uri)
# Variable used to keep main loop occupied until disconnect
self.is_connected = True
def _connected(self, link_uri):
""" This callback is called form the Crazyflie API when a Crazyflie
has been connected and the TOCs have been downloaded."""
print "Connected to %s" % link_uri
# The definition of the logconfig can be made before connecting
self._lg_stab = LogConfig(name="Stabilizer", period_in_ms=1000)
self._lg_stab.add_variable("acc.x", "float")
self._lg_stab.add_variable("stabilizer.pitch", "float")
self._lg_stab.add_variable("stabilizer.yaw", "float")
# Adding the configuration cannot be done until a Crazyflie is
# connected, since we need to check that the variables we
# would like to log are in the TOC.
try:
self._cf.log.add_config(self._lg_stab)
# This callback will receive the data
self._lg_stab.data_received_cb.add_callback(self._stab_log_data)
# This callback will be called on errors
self._lg_stab.error_cb.add_callback(self._stab_log_error)
# Start the logging
self._lg_stab.start()
except KeyError as e:
print "Could not start log configuration," \
"{} not found in TOC\n{}".format(str(e),link_uri)
except AttributeError:
print "Could not add Stabilizer log config, bad configuration."
# Start a timer to disconnect in 10s
t = Timer(5, self._cf.close_link)
t.start()
def _stab_log_error(self, logconf, msg):
"""Callback from the log API when an error occurs"""
print "Error when logging %s: %s" % (logconf.name, msg)
def _stab_log_data(self, timestamp, data, logconf):
"""Callback froma the log API when data arrives"""
print "[%d][%s]: %s" % (timestamp, logconf.name, data)
def _connection_failed(self, link_uri, msg):
"""Callback when connection initial connection fails (i.e no Crazyflie
at the speficied address)"""
print "Connection to %s failed: %s" % (link_uri, msg)
self.is_connected = False
def _connection_lost(self, link_uri, msg):
"""Callback when disconnected after a connection has been made (i.e
Crazyflie moves out of range)"""
print "Connection to %s lost: %s" % (link_uri, msg)
def _disconnected(self, link_uri):
"""Callback when the Crazyflie is disconnected (called in all cases)"""
print "Disconnected from %s" % link_uri
self.is_connected = False
if __name__ == '__main__':
# Initialize the low-level drivers (don't list the debug drivers)
cflib.crtp.init_drivers(enable_debug_driver=True)
# Scan for Crazyflies and use the first one found
print "Scanning interfaces for Crazyflies..."
available = cflib.crtp.scan_interfaces()
print "Crazyflies found:"
for i in available:
print i[0]
if len(available) > 0:
le = LoggingExample(available[0][0])
# l = LoggingExample("debug://0/0")
else:
print "No Crazyflies found, cannot run example"
# The Crazyflie lib doesn't contain anything to keep the application alive,
# so this is where your application should do something. In our case we
# are just waiting until we are disconnected.
while le.is_connected:
time.sleep(1) | gpl-2.0 | -7,679,174,322,235,042,000 | 37.586667 | 79 | 0.636772 | false |
SVilgelm/CloudFerry | cloudferry/lib/utils/qemu_img.py | 1 | 6446 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import abc
import json
from cloudferry.lib.utils import cmd_cfg
from cloudferry.lib.utils import log
from cloudferry.lib.utils import remote
from cloudferry.lib.utils import remote_runner
from cloudferry.lib.utils import ssh_util
LOG = log.getLogger(__name__)
class QemuImgInfoParser(object):
"""Parses `qemu-img info` command human-readable output.
Tested on qemu-img v1.0 and v2.0.0.
More recent versions of qemu-img support JSON output, but many real-world
systems with old openstack releases still come with qemu-img v1.0 which
does not support JSON"""
__metaclass__ = abc.ABCMeta
def __init__(self, img_info_output):
self.info = self.parse(img_info_output)
@abc.abstractmethod
def parse(self, img_info_output):
pass
@property
def backing_filename(self):
return self.info.get('backing-filename')
@property
def format(self):
return self.info.get('format')
class TextQemuImgInfoParser(QemuImgInfoParser):
def parse(self, img_info_output):
"""Returns dictionary based on human-readable output from
`qemu-img info`
Known problem: breaks if path contains opening parenthesis `(` or
colon `:`"""
result = {}
for l in img_info_output.split('\n'):
if not l.strip():
continue
try:
name, value = l.split(':', 1)
except ValueError:
continue
name = name.strip()
if name == 'backing file':
file_end = value.find('(')
if file_end == -1:
file_end = len(value)
result['backing-filename'] = value[:file_end].strip()
elif name == 'file format':
result['format'] = value.strip()
return result
class JsonQemuImgInfoParser(QemuImgInfoParser):
def parse(self, img_info_output):
return json.loads(img_info_output)
class QemuImg(ssh_util.SshUtil):
commit_cmd = cmd_cfg.qemu_img_cmd("commit %s")
commit_cd_cmd = cmd_cfg.cd_cmd & commit_cmd
convert_cmd = cmd_cfg.qemu_img_cmd("convert %s")
convert_full_image_cmd = cmd_cfg.cd_cmd & convert_cmd("-f %s -O %s %s %s")
rebase_cmd = cmd_cfg.qemu_img_cmd("rebase -u -b %s %s")
convert_cmd = convert_cmd("-O %s %s %s")
def diff_commit(self, dest_path, filename="disk", host_compute=None):
cmd = self.commit_cd_cmd(dest_path, filename)
return self.execute(cmd, host_compute)
def convert_image(self,
disk_format,
path_to_image,
output_format="raw",
baseimage="baseimage",
baseimage_tmp="baseimage.tmp",
host_compute=None):
cmd1 = self.convert_full_image_cmd(path_to_image,
disk_format,
output_format,
baseimage,
baseimage_tmp)
cmd2 = cmd_cfg.move_cmd(path_to_image,
baseimage_tmp,
baseimage)
return \
self.execute(cmd1, host_compute), self.execute(cmd2, host_compute)
def get_info(self, dest_disk_ephemeral, host_instance):
try:
# try to use JSON first, cause it's more reliable
cmd = "qemu-img info --output=json {ephemeral}".format(
ephemeral=dest_disk_ephemeral)
qemu_img_json = self.execute(cmd=cmd,
host_exec=host_instance,
ignore_errors=False,
sudo=True)
return JsonQemuImgInfoParser(qemu_img_json)
except (remote_runner.RemoteExecutionError, TypeError, ValueError) \
as e:
# old qemu version not supporting JSON, fallback to human-readable
# qemu-img output parser
LOG.debug("Failed to get JSON from 'qemu-img info %s', error: %s",
dest_disk_ephemeral, e)
cmd = "qemu-img info {ephemeral}".format(
ephemeral=dest_disk_ephemeral)
qemu_img_output = self.execute(cmd=cmd,
host_exec=host_instance,
ignore_errors=True,
sudo=True)
return TextQemuImgInfoParser(qemu_img_output)
def detect_backing_file(self, dest_disk_ephemeral, host_instance):
return self.get_info(dest_disk_ephemeral,
host_instance).backing_filename
def diff_rebase(self, baseimage, disk, host_compute=None):
LOG.debug("rebase diff: baseimage=%s, disk=%s, host_compute=%s",
baseimage, disk, host_compute)
cmd = self.rebase_cmd(baseimage, disk)
return self.execute(cmd, host_compute, sudo=True)
# example source_path = rbd:compute/QWEQWE-QWE231-QWEWQ
def convert(self, format_to, source_path, dest_path, host_compute=None):
cmd = self.convert_cmd(format_to, source_path, dest_path)
return self.execute(cmd, host_compute)
def get_disk_info(remote_executor, path):
try:
# try to use JSON first, cause it's more reliable
json_output = remote_executor.sudo(
'qemu-img info --output=json "{path}"', path=path)
return JsonQemuImgInfoParser(json_output)
except remote.RemoteFailure:
# old qemu version not supporting JSON, fallback to human-readable
# qemu-img output parser
plain_output = remote_executor.sudo(
'qemu-img info "{path}"', path=path)
return TextQemuImgInfoParser(plain_output)
| apache-2.0 | -2,317,633,674,190,102,500 | 37.831325 | 78 | 0.580205 | false |
ruleant/buildtime-trend | generate_trend.py | 1 | 2619 | #!/usr/bin/env python
# vim: set expandtab sw=4 ts=4:
'''
Generates a trend (graph) from the buildtimes in buildtimes.xml
Usage : generate_trend.py -h --mode=native,keen
Copyright (C) 2014 Dieter Adriaenssens <[email protected]>
This file is part of buildtime-trend
<https://github.com/ruleant/buildtime-trend/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import sys
from buildtimetrend.tools import get_logger
from buildtimetrend.travis import load_travis_env_vars
from buildtimetrend.settings import Settings
from buildtimetrend.settings import process_argv
def generate_trend(argv):
'''
Generate trends from analised buildtime data
'''
settings = Settings()
# load Travis environment variables and save them in settings
load_travis_env_vars()
# process command line arguments
process_argv(argv)
# run trend_keen() always,
# if $KEEN_PROJECT_ID variable is set (checked later), it will be executed
if settings.get_setting("mode_native") is True:
trend_native()
if settings.get_setting("mode_keen") is True:
trend_keen()
def trend_native():
'''
Generate native trend with matplotlib : chart in PNG format
'''
from buildtimetrend.trend import Trend
# use parameter for timestamps file and check if file exists
result_file = os.getenv('BUILD_TREND_OUTPUTFILE', 'trends/buildtimes.xml')
chart_file = os.getenv('BUILD_TREND_TRENDFILE', 'trends/trend.png')
trend = Trend()
if trend.gather_data(result_file):
logger = get_logger()
# log number of builds and list of buildnames
logger.info('Builds (%d) : %s', len(trend.builds), trend.builds)
logger.info('Stages (%d) : %s', len(trend.stages), trend.stages)
trend.generate(chart_file)
def trend_keen():
'''
Setup trends using Keen.io API
'''
from buildtimetrend.keenio import generate_overview_config_file
generate_overview_config_file(Settings().get_project_name())
if __name__ == "__main__":
generate_trend(sys.argv)
| gpl-3.0 | -5,318,230,504,942,116,000 | 30.939024 | 78 | 0.71554 | false |
rahulguptakota/paper-To-Reviewer-Matching-System | citeSentClassifier.py | 1 | 1727 | import xml.etree.ElementTree as ET
import re
import time
import os, csv
from nltk.tokenize import sent_tokenize
from textblob.classifiers import NaiveBayesClassifier
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
stop_words = set(stopwords.words('english'))
train = []
test = []
rootDir = './data_label'
ps = PorterStemmer()
for dirName, subdirList, fileList in os.walk(rootDir, topdown=False):
try:
print(dirName)
fo = open(dirName + "/citeSents.csv", "r")
except:
continue
lines = fo.readlines()
for line in lines:
line = line.strip().lower()
# print(line)
splitsent = line.split(",,")
# print(splitsent)
word_tokens = word_tokenize(splitsent[0])
if splitsent[1] != '1' and splitsent[1] != '0' :
print(splitsent)
# elif splitsent[1] == "1":
# print(splitsent)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
line = " ".join(filtered_sentence)
stemmed = [ps.stem(word) for word in line.split()]
stemmed = filter(lambda x: not(len(x)<3 or re.findall(r"[0-9]+",x)) , stemmed)
stemmed = list(stemmed)
line = " ".join(stemmed)
# print(line)
train.append((line, splitsent[1]))
testindex = int(len(train)*4/5)
test = train[testindex:]
train = train[:testindex]
# print(test)
cl = NaiveBayesClassifier(train)
# print(cl.classify("It is also possible to focus on non-compositional compounds, a key point in bilingual applications (CITATION; CITATION; Lin, 99)")) # "pos"
# print(cl.classify("I don't like their pizza.")) # "neg"
for item in test:
if(cl.classify(item[0]) == '1'):
print(item, cl.classify(item[0]))
print(cl.accuracy(test))
print(cl.show_informative_features(100))
# print(train)
| mit | -5,206,490,144,670,676,000 | 29.839286 | 161 | 0.697742 | false |
skylander86/ycml | ycml/classifiers/neural_networks.py | 1 | 15862 | __all__ = ['KerasNNClassifierMixin', 'keras_f1_score', 'EarlyStopping']
import logging
import math
import os
import shutil
from tempfile import NamedTemporaryFile
try:
from keras import backend as K
from keras.callbacks import Callback, ModelCheckpoint
from keras.models import load_model
import tensorflow as tf
except ImportError:
Callback = object
import numpy as np
import scipy.sparse as sps
from sklearn.model_selection import train_test_split
from uriutils import uri_open
from ..utils import Timer
logger = logging.getLogger(__name__)
class KerasNNClassifierMixin(object):
PICKLE_IGNORED_ATTRIBUTES = set()
NN_MODEL_ATTRIBUTE = 'nn_model_'
def __init__(
self,
tf_config=None, set_session=True, # set_session should be False when you are initializing a second classifier...
epochs=10, batch_size=128, passes_per_epoch=1,
initial_weights=None, initial_epoch=0,
validation_size=0.2, verbose=0,
early_stopping=None, save_best=None, save_weights=None,
log_device_placement=False,
**kwargs
):
self.tf_config = tf_config
self.epochs = epochs
self.batch_size = batch_size
self.passes_per_epoch = passes_per_epoch
self.initial_weights = initial_weights
self.initial_epoch = initial_epoch
self.validation_size = validation_size
self.verbose = verbose
self.early_stopping = early_stopping
self.save_weights = save_weights
self.save_best = save_best
self.log_device_placement = log_device_placement
if set_session: self.set_session(tf_config)
#end def
def fit_binarized(self, X_featurized, Y_binarized, **kwargs):
setattr(self, self.NN_MODEL_ATTRIBUTE, self.compile_model(**kwargs))
self.keras_fit_generator(X_featurized, Y_binarized, **kwargs)
return self
#end def
def compile_model(self, X_featurized, Y_binarized, **kwargs): raise NotImplementedError('compile_model is not implemented.')
def _predict_proba(self, X_featurized, batch_size=1024, use_generator=False, **kwargs):
if X_featurized.shape[0] < batch_size: kwargs['verbose'] = 0
if use_generator or X_featurized.shape[0] > batch_size: Y_proba = self.keras_predict_generator(X_featurized, batch_size=batch_size, **kwargs)
else: Y_proba = self.keras_predict(X_featurized, batch_size=batch_size, **kwargs)
return Y_proba
#end def
def set_session(self, *, tf_config=None, init_op=True, **sess_args):
if tf_config is None:
tf_config = self.tf_config
if tf_config is None:
n_jobs = getattr(self, 'n_jobs', 1)
log_device_placement = getattr(self, 'log_device_placement', logger.getEffectiveLevel() <= logging.DEBUG)
tf_config = tf.ConfigProto(inter_op_parallelism_threads=n_jobs, intra_op_parallelism_threads=n_jobs, log_device_placement=log_device_placement, allow_soft_placement=True)
#end if
self.graph = tf.Graph()
tf_session = tf.Session(config=tf_config, graph=self.graph, **sess_args)
K.set_session(tf_session)
if init_op:
tf_session.run(tf.global_variables_initializer())
self.session = tf_session
return tf_session
#end def
def keras_fit(self, X, Y, *, nn_model=None, validation_data=None, resume=None, **fit_args):
if nn_model is None: nn_model = getattr(self, self.NN_MODEL_ATTRIBUTE)
if not self._pre_fit_setup(nn_model, resume=resume, **fit_args): return
if sps.issparse(X): X = X.toarray()
if sps.issparse(Y): Y = Y.toarray()
if validation_data is not None:
X_validation, Y_validation = validation_data
validation_data = (X_validation.toarray() if sps.issparse(X_validation) else X_validation, Y_validation.toarray() if sps.issparse(Y_validation) else Y_validation)
#end if
logger.info('{} instances used for training and {} instances used for validation.'.format(Y.shape[0], validation_data[1].shape[0] if validation_data else int(self.validation_size * Y.shape[0])))
with self.graph.as_default(), self.session.as_default():
return nn_model.fit(X, Y, validation_data=validation_data, validation_split=0.0 if validation_data is not None else self.validation_size, epochs=self.epochs, batch_size=self.batch_size, verbose=self.verbose, callbacks=self.build_callbacks(), initial_epoch=self.initial_epoch, **fit_args)
#end def
def keras_fit_generator(self, X, Y, *, nn_model=None, generator_func=None, validation_data=None, resume=None, **fit_args):
if nn_model is None: nn_model = getattr(self, self.NN_MODEL_ATTRIBUTE)
if not self._pre_fit_setup(nn_model, resume=resume, **fit_args): return
if validation_data is None:
X_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size=self.validation_size)
validation_data = (X_validation.toarray() if sps.issparse(X_validation) else X_validation, Y_validation.toarray() if sps.issparse(Y_validation) else Y_validation)
else:
X_train, Y_train = X, Y
X_validation, Y_validation = validation_data
validation_data = (X_validation.toarray() if sps.issparse(X_validation) else X_validation, Y_validation.toarray() if sps.issparse(Y_validation) else Y_validation)
#end if
N_train = X_train.shape[0]
logger.info('{} instances used for training and {} instances used for validation.'.format(N_train, validation_data[1].shape[0]))
steps_per_epoch = int((N_train * self.passes_per_epoch) / self.batch_size)
if steps_per_epoch <= 0: raise ValueError('steps_per_epoch ({}) is <= 0!'.format(steps_per_epoch))
logger.info('Fit generator will run {} steps per epoch with batch size of {}. This will make 1 pass through the training data in {:.2f} epochs.'.format(steps_per_epoch, self.batch_size, N_train / (steps_per_epoch * self.batch_size)))
if generator_func is None: generator_func = self._generator
with self.graph.as_default(), self.session.as_default():
return nn_model.fit_generator(
generator_func(X_train, Y_train, batch_size=self.batch_size),
validation_data=validation_data,
initial_epoch=self.initial_epoch,
steps_per_epoch=steps_per_epoch, epochs=self.epochs, verbose=self.verbose, callbacks=self.build_callbacks(),
**fit_args
)
#end def
def _pre_fit_setup(self, nn_model, *, resume=None, **kwargs):
if resume:
self.initial_weights = resume[0]
if len(resume) > 1:
self.initial_epoch = int(resume[1])
if self.initial_epoch < 0:
logger.info('Detected negative initial epoch value. Will not perform model fitting.')
self.epochs = 0
#end if
#end if
#end if
if self.initial_weights:
with self.graph.as_default(), self.session.as_default():
with uri_open(self.initial_weights, in_memory=False) as f:
nn_model.load_weights(f.temp_name)
logger.info('Loaded initial weights file from <{}> will start at epoch {}.'.format(self.initial_weights, self.initial_epoch))
#end if
if self.epochs == 0:
logger.warning('Epochs is set to 0. Model fitting will not continue.')
return
#end if
return True
#end def
def _generator(self, X, Y, *, batch_size=128, shuffle=True):
N = X.shape[0]
if batch_size > N: raise ValueError('batch_size ({}) is > than number of instances ({}).'.format(batch_size, N))
sparse_X = sps.issparse(X)
if Y is None: batch_sized_zeros = np.zeros(batch_size)
if shuffle:
shuffled_indexes = np.random.permutation(N)
X_shuffled = X[shuffled_indexes, ...]
if Y is not None: Y_shuffled = Y[shuffled_indexes]
else: X_shuffled, Y_shuffled = X, Y
cur = 0
while True:
if cur + batch_size > N:
if shuffle:
shuffled_indexes = np.random.permutation(N)
X_shuffled = X[shuffled_indexes, :]
if Y is not None: Y_shuffled = Y[shuffled_indexes]
#end if
cur = 0
#end if
X_batch = X_shuffled[cur:cur + batch_size, ...].toarray() if sparse_X else X_shuffled[cur:cur + batch_size, ...]
Y_batch = batch_sized_zeros if Y is None else Y_shuffled[cur:cur + batch_size]
yield (X_batch, Y_batch)
cur += batch_size
#end while
#end def
def keras_predict(self, X, *, nn_model=None, **kwargs):
if nn_model is None: nn_model = getattr(self, self.NN_MODEL_ATTRIBUTE)
batch_size = kwargs.pop('batch_size', self.batch_size)
verbose = kwargs.pop('verbose', self.verbose)
with self.graph.as_default(), self.session.as_default():
return nn_model.predict(X.toarray() if sps.issparse(X) else X, batch_size=batch_size, verbose=verbose)
#end def
def keras_predict_generator(self, X, *, nn_model=None, generator_func=None, **kwargs):
if nn_model is None: nn_model = getattr(self, self.NN_MODEL_ATTRIBUTE)
N = X.shape[0]
batch_size = kwargs.pop('batch_size', self.batch_size)
verbose = kwargs.pop('verbose', self.verbose)
steps = int(math.ceil(N / batch_size))
if generator_func is None: generator_func = self._generator
with self.graph.as_default(), self.session.as_default():
return nn_model.predict_generator(generator_func(X, None, batch_size=batch_size, shuffle=False), steps=steps, verbose=verbose)[:N, ...]
#end def
def build_callbacks(self):
callbacks = []
if self.early_stopping is not None:
early_stopping = EarlyStopping(**self.early_stopping)
callbacks.append(early_stopping)
logger.info('Set up {}.'.format(early_stopping))
#end if
if self.save_best is not None:
monitor = self.save_best.get('monitor', 'accuracy')
filepath = self.save_best.get('path', '{epoch:04d}_{' + monitor + ':.5f}.h5')
callbacks.append(ModelCheckpoint(filepath=filepath, monitor=monitor, verbose=self.verbose, save_best_only=True, save_weights_only=True, mode='max'))
logger.info('Best `{}` model will be saved to <{}>.'.format(monitor, filepath))
#end if
if self.save_weights is not None:
path = self.save_weights.get('path', '.')
period = self.save_weights.get('period', 1)
filepath = os.path.join(self.save_weights, '{epoch:04d}_{val_keras_f1_score:.5f}.h5') if os.path.isdir(path) else path
callbacks.append(ModelCheckpoint(filepath=filepath, verbose=self.verbose, save_best_only=False, save_weights_only=True, period=period))
logger.info('Weights of every {}th model will be saved to <{}>.'.format(period, filepath))
#end if
return callbacks
#end def
def save_to_tarfile(self, tar_file):
with NamedTemporaryFile(prefix=__name__ + '.', suffix='.h5', delete=False) as f:
temp_path = f.name
#end with
self.nn_model_.save(temp_path)
tar_file.add(temp_path, arcname='nn_model.h5')
os.remove(temp_path)
return self
#end def
def load_from_tarfile(self, tar_file):
self.set_session(init_op=False)
fname = None
try:
with NamedTemporaryFile(suffix='.h5', delete=False) as f:
timer = Timer()
shutil.copyfileobj(tar_file.extractfile('nn_model.h5'), f)
fname = f.name
#end with
with self.graph.as_default(), self.session.as_default():
self.nn_model_ = load_model(fname, custom_objects=self.custom_objects)
self.nn_model_._make_predict_function()
# print(self.graph, tf.get_default_graph())
# self.graph = tf.get_default_graph()
logger.debug('Loaded neural network model weights {}.'.format(timer))
finally:
if fname:
os.remove(fname)
#end try
return self
#end def
def __getstate__(self):
state = super(KerasNNClassifierMixin, self).__getstate__()
ignored_attrs = set([self.NN_MODEL_ATTRIBUTE, 'session', 'graph']) | self.PICKLE_IGNORED_ATTRIBUTES
for k in ignored_attrs:
if k in state:
del state[k]
return state
#end def
@property
def custom_objects(self): return {}
#end class
def keras_f1_score(y_true, y_pred):
'''Calculates the F score, the weighted harmonic mean of precision and recall.
This is useful for multi-label classification, where input samples can be
classified as sets of labels. By only using accuracy (precision) a model
would achieve a perfect score by simply assigning every class to every
input. In order to avoid this, a metric should penalize incorrect class
assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
computes this, as a weighted mean of the proportion of correct class
assignments vs. the proportion of incorrect class assignments.
With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
correct classes becomes more important, and with beta > 1 the metric is
instead weighted towards penalizing incorrect class assignments.
'''
# If there are no true positives, fix the F score at 0 like sklearn.
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0
beta = 1.0
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
p = true_positives / (predicted_positives + K.epsilon())
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
r = true_positives / (possible_positives + K.epsilon())
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
#end def
class EarlyStopping(Callback):
def __init__(self, monitor='val_accuracy', patience=5, min_delta=0.01, min_epoch=50):
super(EarlyStopping, self).__init__()
self.patience = patience
self.min_delta = min_delta
self.min_epoch = min_epoch
self.monitor = monitor
self.val_scores = []
#end def
def on_epoch_end(self, epoch, logs={}):
monitor_score = logs[self.monitor]
self.val_scores.append(monitor_score)
if len(self.val_scores) < self.patience or epoch <= self.min_epoch or monitor_score < 0.2: # hard limit
return
if self.min_epoch == epoch + 1:
logger.info('Epoch {} > {} (min_epoch). Starting early stopping monitor.'.format(epoch, self.min_epoch))
m = np.mean(self.val_scores[-self.patience - 1:-1])
delta = abs(monitor_score - m)
min_delta = self.min_delta * m
# logger.debug('mean({}[-{}:])={}; delta={}; min_delta={};'.format(self.monitor, self.patience, m, delta, min_delta))
if delta < min_delta:
logger.info('Model delta fell below `min_delta` threshold. Early stopped.')
self.model.stop_training = True
#end if
#end def
def __str__(self):
return 'EarlyStopping(monitor={}, patience={}, min_delta={}, min_epoch={})'.format(self.monitor, self.patience, self.min_delta, self.min_epoch)
#end def
| apache-2.0 | 5,220,070,568,954,902,000 | 39.567775 | 299 | 0.621737 | false |
Silvian/samaritan | samaritan/constants.py | 1 | 2682 | """
@author: Silvian Dragan
@Date: 05/05/2016
@Copyright: Copyright 2016, Samaritan CMA - Published under GNU General Public Licence v3
@Details: https://github.com/Silvian/samaritan
Main file for storing constants classes
"""
from django.conf import settings
from django.utils.timezone import now
class SettingsConstants:
"""Settings constants."""
author = settings.AUTHOR
copyright = settings.COPYRIGHT.format(year=now().year)
licence = settings.LICENCE
version = settings.VERSION
maintainer = settings.MAINTAINER
email = settings.EMAIL
def __init__(self):
return
@classmethod
def get_settings(cls):
return {
'author': cls.author,
'copyright': cls.copyright,
'licence': cls.licence,
'version': cls.version,
'maintainer': cls.maintainer,
'email': cls.email,
}
class WriterConstants:
"""Writer constants."""
TITLE_TEXT = "Report"
FIRST_NAME = "First Name"
LAST_NAME = "Last Name"
DATE_OF_BIRTH = "Date of Birth"
TELEPHONE = "Telephone"
EMAIL = "Email"
ADDRESS_NO = "No."
ADDRESS_STREET = "Street"
ADDRESS_LOCALITY = "Locality"
ADDRESS_CITY = "City"
ADDRESS_POSTCODE = "Postcode"
DETAILS = "Details"
IS_BAPTISED = "Is Baptised"
BAPTISMAL_DATE = "Baptismal Date"
BAPTISMAL_PLACE = "Baptismal Place"
IS_MEMBER = "Is Member"
MEMBERSHIP_TYPE = "Membership Type"
MEMBERSHIP_DATE = "Membership Date"
IS_ACTIVE = "Is Active"
GDPR = "GDPR"
CHURCH_ROLE = "Church Role"
NOTES = "Notes"
YES = "Yes"
NO = "No"
NOT_APPLICABLE = "N/A"
NOT_SPECIFIED = "Not specified"
DATE_FORMAT = "%d-%m-%Y"
FILE_NAME_DATE = "%Y-%m-%d-%H.%M.%S"
def __init__(self):
return
class AuthenticationConstants:
"""Authentication constants."""
LOGOUT_SUCCESS = "You've been logged out successfully"
ACCOUNT_DISABLED = "This account has been disabled"
INVALID_CREDENTIALS = "The username or password is incorrect"
INVALID_CODE = "The code entered is invalid"
LOCKOUT_MESSAGE = (
"Your account has been locked due to repeated failed login attempts! "
"Please contact the system administrator"
)
INCORRECT_PASSWORD = "Your current password is incorrect"
PASSWORD_MISMATCH = "The new password did not match password confirmation"
SAME_PASSWORD = "The new password cannot be the same as existing password"
WEAK_PASSWORD = "The password is too weak and cannot be used"
BREACHED_PASSWORD = "The password has been breached and cannot be used"
def __init__(self):
return
| gpl-3.0 | 2,562,244,859,031,018,000 | 26.367347 | 89 | 0.645787 | false |
okolisny/integration_tests | cfme/common/provider_views.py | 1 | 16335 | # -*- coding: utf-8 -*-
from widgetastic.utils import VersionPick, Version
from widgetastic.widget import View, Text, ConditionalSwitchableView
from widgetastic_manageiq import PaginationPane
from widgetastic_patternfly import Dropdown, BootstrapSelect, FlashMessages
from cfme.base.login import BaseLoggedInPage
from widgetastic_manageiq import (BreadCrumb,
SummaryTable,
Button,
TimelinesView,
DetailsToolBarViewSelector,
ItemsToolBarViewSelector,
Checkbox,
Input,
Table,
BaseEntitiesView,
DynaTree,
BootstrapTreeview,
ProviderEntity,
BaseNonInteractiveEntitiesView)
from cfme.common.host_views import HostEntitiesView
class ProviderDetailsToolBar(View):
"""
represents provider toolbar and its controls
"""
monitoring = Dropdown(text='Monitoring')
configuration = Dropdown(text='Configuration')
reload = Button(title='Reload Current Display')
policy = Dropdown(text='Policy')
authentication = Dropdown(text='Authentication')
view_selector = View.nested(DetailsToolBarViewSelector)
class ProviderDetailsView(BaseLoggedInPage):
"""
main Details page
"""
title = Text('//div[@id="main-content"]//h1')
breadcrumb = BreadCrumb(locator='//ol[@class="breadcrumb"]')
flash = FlashMessages('.//div[@id="flash_msg_div"]/div[@id="flash_text_div" or '
'contains(@class, "flash_text_div")]')
toolbar = View.nested(ProviderDetailsToolBar)
contents = ConditionalSwitchableView(reference='toolbar.view_selector',
ignore_bad_reference=True)
@contents.register('Summary View', default=True)
class ProviderDetailsSummaryView(View):
"""
represents Details page when it is switched to Summary aka Tables view
"""
properties = SummaryTable(title="Properties")
status = SummaryTable(title="Status")
relationships = SummaryTable(title="Relationships")
overview = SummaryTable(title="Overview")
smart_management = SummaryTable(title="Smart Management")
@contents.register('Dashboard View')
class ProviderDetailsDashboardView(View):
"""
represents Details page when it is switched to Dashboard aka Widgets view
"""
# todo: need to develop this page
pass
@property
def is_displayed(self):
if (not self.toolbar.view_selector.is_displayed or
self.toolbar.view_selector.selected == 'Summary View'):
subtitle = 'Summary'
else:
subtitle = 'Dashboard'
title = '{name} ({subtitle})'.format(name=self.context['object'].name,
subtitle=subtitle)
return (self.logged_in_as_current_user and
self.breadcrumb.is_displayed and
self.breadcrumb.active_location == title)
class InfraProviderDetailsView(ProviderDetailsView):
"""
Infra Details page
"""
@property
def is_displayed(self):
return (super(InfraProviderDetailsView, self).is_displayed and
self.navigation.currently_selected == ['Compute', 'Infrastructure', 'Providers'])
class CloudProviderDetailsView(ProviderDetailsView):
"""
Cloud Details page
"""
@property
def is_displayed(self):
return (super(CloudProviderDetailsView, self).is_displayed and
self.navigation.currently_selected == ['Compute', 'Clouds', 'Providers'])
class MiddlewareProviderDetailsView(ProviderDetailsView):
"""
Middleware Details page
"""
@property
def is_displayed(self):
return (super(MiddlewareProviderDetailsView, self).is_displayed and
self.navigation.currently_selected == ['Middleware', 'Providers'])
class ProviderTimelinesView(TimelinesView, BaseLoggedInPage):
"""
represents Timelines page
"""
@property
def is_displayed(self):
return (self.logged_in_as_current_user and
self.navigation.currently_selected == ['Compute', 'Infrastructure', 'Providers'] and
TimelinesView.is_displayed)
class InfraProvidersDiscoverView(BaseLoggedInPage):
"""
Discover View from Infrastructure Providers page
"""
title = Text('//div[@id="main-content"]//h1')
vmware = Checkbox('discover_type_virtualcenter')
scvmm = Checkbox('discover_type_scvmm')
rhevm = Checkbox('discover_type_rhevm')
from_ip1 = Input('from_first')
from_ip2 = Input('from_second')
from_ip3 = Input('from_third')
from_ip4 = Input('from_fourth')
to_ip4 = Input('to_fourth')
start = Button('Start')
cancel = Button('Cancel')
@property
def is_displayed(self):
return (self.logged_in_as_current_user and
self.navigation.currently_selected == ['Compute', 'Infrastructure', 'Providers'] and
self.title.text == 'Infrastructure Providers Discovery')
class CloudProvidersDiscoverView(BaseLoggedInPage):
"""
Discover View from Infrastructure Providers page
"""
title = Text('//div[@id="main-content"]//h1')
discover_type = BootstrapSelect('discover_type_selected')
fields = ConditionalSwitchableView(reference='discover_type')
@fields.register('Amazon EC2', default=True)
class Amazon(View):
username = Input(name='userid')
password = Input(name='password')
confirm_password = Input(name='verify')
@fields.register('Azure')
class Azure(View):
client_id = Input(name='client_id')
client_key = Input(name='client_key')
tenant_id = Input(name='azure_tenant_id')
subscription = Input(name='subscription')
start = Button('Start')
cancel = Button('Cancel')
@property
def is_displayed(self):
return (self.logged_in_as_current_user and
self.navigation.currently_selected == ['Compute', 'Clouds', 'Providers'] and
self.title.text == 'Cloud Providers Discovery')
class ProvidersManagePoliciesView(BaseLoggedInPage):
"""
Provider's Manage Policies view
"""
policies = VersionPick({Version.lowest(): DynaTree('protect_treebox'),
'5.7': BootstrapTreeview('protectbox')})
@View.nested
class entities(BaseNonInteractiveEntitiesView): # noqa
@property
def entity_class(self):
return ProviderEntity().pick(self.browser.product_version)
save = Button('Save')
reset = Button('Reset')
cancel = Button('Cancel')
@property
def is_displayed(self):
return False
class NodesToolBar(View):
"""
represents nodes toolbar and its controls (exists for Infra OpenStack provider)
"""
configuration = Dropdown(text='Configuration')
policy = Dropdown(text='Policy')
power = Dropdown(text='Power')
download = Dropdown(text='Download')
view_selector = View.nested(ItemsToolBarViewSelector)
class ProviderNodesView(BaseLoggedInPage):
"""
represents main Nodes view (exists for Infra OpenStack provider)
"""
title = Text('//div[@id="main-content"]//h1')
toolbar = View.nested(NodesToolBar)
including_entities = View.include(HostEntitiesView, use_parent=True)
@property
def is_displayed(self):
title = '{name} (All Managed Hosts)'.format(name=self.context['object'].name)
return (self.logged_in_as_current_user and
self.navigation.currently_selected == ['Compute', 'Infrastructure', 'Providers'] and
self.title.text == title)
class ProviderToolBar(View):
"""
represents provider toolbar and its controls
"""
configuration = Dropdown(text='Configuration')
policy = Dropdown(text='Policy')
authentication = Dropdown(text='Authentication')
download = Dropdown(text='Download')
view_selector = View.nested(ItemsToolBarViewSelector)
class ProviderSideBar(View):
"""
represents left side bar. it usually contains navigation, filters, etc
"""
pass
class ProviderEntitiesView(BaseEntitiesView):
"""
represents child class of Entities view for Provider entities
"""
@property
def entity_class(self):
return ProviderEntity().pick(self.browser.product_version)
class ProvidersView(BaseLoggedInPage):
"""
represents Main view displaying all providers
"""
@property
def is_displayed(self):
return self.logged_in_as_current_user
paginator = PaginationPane()
toolbar = View.nested(ProviderToolBar)
sidebar = View.nested(ProviderSideBar)
including_entities = View.include(ProviderEntitiesView, use_parent=True)
class ContainersProvidersView(ProvidersView):
"""
represents Main view displaying all Containers providers
"""
table = Table(locator="//div[@id='list_grid']//table")
@property
def is_displayed(self):
return (super(ContainersProvidersView, self).is_displayed and
self.navigation.currently_selected == ['Compute', 'Containers', 'Providers'] and
self.entities.title.text == 'Containers Providers')
class InfraProvidersView(ProvidersView):
"""
represents Main view displaying all Infra providers
"""
@property
def is_displayed(self):
return (super(InfraProvidersView, self).is_displayed and
self.navigation.currently_selected == ['Compute', 'Infrastructure', 'Providers'] and
self.entities.title.text == 'Infrastructure Providers')
class CloudProvidersView(ProvidersView):
"""
represents Main view displaying all Cloud providers
"""
@property
def is_displayed(self):
return (super(CloudProvidersView, self).is_displayed and
self.navigation.currently_selected == ['Compute', 'Clouds', 'Providers'] and
self.entities.title.text == 'Cloud Providers')
class MiddlewareProvidersView(ProvidersView):
"""
represents Main view displaying all Middleware providers
"""
@property
def is_displayed(self):
return (super(MiddlewareProvidersView, self).is_displayed and
self.navigation.currently_selected == ['Middleware', 'Providers'] and
self.entities.title.text == 'Middleware Providers')
class BeforeFillMixin(object):
"""
this mixin is used to activate appropriate tab before filling this tab
"""
def before_fill(self):
if self.exists and not self.is_active():
self.select()
class ProviderAddView(BaseLoggedInPage):
"""
represents Provider Add View
"""
title = Text('//div[@id="main-content"]//h1')
name = Input('name')
prov_type = BootstrapSelect(id='emstype')
zone = Input('zone')
flash = FlashMessages('.//div[@id="flash_msg_div"]/div[@id="flash_text_div" or '
'contains(@class, "flash_text_div")]')
add = Button('Add')
cancel = Button('Cancel')
@View.nested
class endpoints(View): # NOQA
# this is switchable view that gets replaced with concrete view.
# it gets changed according to currently chosen provider type
# look at cfme.common.provider.BaseProvider.create() method
pass
@property
def is_displayed(self):
return self.logged_in_as_current_user
class InfraProviderAddView(ProviderAddView):
api_version = BootstrapSelect(id='api_version') # only for OpenStack
@property
def is_displayed(self):
return (super(InfraProviderAddView, self).is_displayed and
self.navigation.currently_selected == ['Compute', 'Infrastructure', 'Providers'] and
self.title.text == 'Add New Infrastructure Provider')
class CloudProviderAddView(ProviderAddView):
"""
represents Cloud Provider Add View
"""
# bug in cfme this field has different ids for cloud and infra add views
prov_type = BootstrapSelect(id='ems_type')
region = BootstrapSelect(id='provider_region') # Azure/AWS/GCE
tenant_id = Input('azure_tenant_id') # only for Azure
subscription = Input('subscription') # only for Azure
project_id = Input('project') # only for Azure
# bug in cfme this field has different ids for cloud and infra add views
api_version = BootstrapSelect(id='ems_api_version') # only for OpenStack
keystone_v3_domain_id = Input('keystone_v3_domain_id') # OpenStack only
infra_provider = BootstrapSelect(id='ems_infra_provider_id') # OpenStack only
tenant_mapping = Checkbox(name='tenant_mapping_enabled') # OpenStack only
@property
def is_displayed(self):
return (super(CloudProviderAddView, self).is_displayed and
self.navigation.currently_selected == ['Compute', 'Clouds', 'Providers'] and
self.title.text == 'Add New Cloud Provider')
class ContainersProviderAddView(ProviderAddView):
"""
represents Containers Provider Add View
"""
prov_type = BootstrapSelect(id='ems_type')
@property
def is_displayed(self):
return (super(ProviderAddView, self).is_displayed and
self.navigation.currently_selected == ['Compute', 'Containers', 'Providers'] and
self.title.text == 'Add New Containers Provider')
class MiddlewareProviderAddView(ProviderAddView):
"""
represents Middleware Provider Add View
"""
@property
def is_displayed(self):
return (super(MiddlewareProviderAddView, self).is_displayed and
self.navigation.currently_selected == ['Middleware', 'Providers'] and
self.title.text == 'Add New Middleware Provider')
class ProviderEditView(ProviderAddView):
"""
represents Provider Edit View
"""
prov_type = Text(locator='//label[@name="emstype"]')
# only in edit view
vnc_start_port = Input('host_default_vnc_port_start')
vnc_end_port = Input('host_default_vnc_port_end')
flash = FlashMessages('.//div[@id="flash_msg_div"]/div[@id="flash_text_div" or '
'contains(@class, "flash_text_div")]')
save = Button('Save')
reset = Button('Reset')
cancel = Button('Cancel')
@property
def is_displayed(self):
return self.logged_in_as_current_user
class InfraProviderEditView(ProviderEditView):
"""
represents Infra Provider Edit View
"""
@property
def is_displayed(self):
return (super(InfraProviderEditView, self).is_displayed and
self.navigation.currently_selected == ['Compute', 'Infrastructure', 'Providers'] and
self.title.text == 'Edit Infrastructure Provider')
class CloudProviderEditView(ProviderEditView):
"""
represents Cloud Provider Edit View
"""
@property
def is_displayed(self):
return (super(CloudProviderEditView, self).is_displayed and
self.navigation.currently_selected == ['Compute', 'Clouds', 'Providers'] and
self.title.text == 'Edit Cloud Provider')
class ContainersProviderEditView(ProviderEditView):
"""
represents Containers Provider Edit View
"""
@property
def is_displayed(self):
return (super(ProviderEditView, self).is_displayed and
self.navigation.currently_selected == ['Compute', 'Containers', 'Providers'] and
self.title.text == 'Edit Containers Provider')
class MiddlewareProviderEditView(ProviderEditView):
"""
represents Middleware Provider Edit View
"""
@property
def is_displayed(self):
expected_title = ("Edit Middleware Providers '{name}'"
.format(name=self.context['object'].name))
return (super(MiddlewareProviderEditView, self).is_displayed and
self.navigation.currently_selected == ['Middleware', 'Providers'] and
self.title.text == expected_title)
| gpl-2.0 | -1,872,359,693,813,018,000 | 33.17364 | 100 | 0.640404 | false |
dnjohnstone/hyperspy | hyperspy/tests/model/test_model.py | 1 | 28010 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from unittest import mock
import numpy as np
import pytest
import hyperspy.api as hs
from hyperspy.decorators import lazifyTestClass
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.misc.test_utils import ignore_warning
from hyperspy.misc.utils import slugify
class TestModelJacobians:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(1))
m = s.create_model()
self.low_loss = 7.0
self.weights = 0.3
m.axis.axis = np.array([1, 0])
m.channel_switches = np.array([0, 1], dtype=bool)
m.append(hs.model.components1D.Gaussian())
m[0].A.value = 1
m[0].centre.value = 2.0
m[0].sigma.twin = m[0].centre
m._low_loss = mock.MagicMock()
m.low_loss.return_value = self.low_loss
self.model = m
m.convolution_axis = np.zeros(2)
def test_jacobian_not_convolved(self):
m = self.model
m.convolved = False
jac = m._jacobian((1, 2, 3), None, weights=self.weights)
np.testing.assert_array_almost_equal(
jac.squeeze(),
self.weights
* np.array([m[0].A.grad(0), m[0].sigma.grad(0) + m[0].centre.grad(0)]),
)
assert m[0].A.value == 1
assert m[0].centre.value == 2
assert m[0].sigma.value == 2
def test_jacobian_convolved(self):
m = self.model
m.convolved = True
m.append(hs.model.components1D.Gaussian())
m[0].convolved = False
m[1].convolved = True
jac = m._jacobian((1, 2, 3, 4, 5), None, weights=self.weights)
np.testing.assert_array_almost_equal(
jac.squeeze(),
self.weights
* np.array(
[
m[0].A.grad(0),
m[0].sigma.grad(0) + m[0].centre.grad(0),
m[1].A.grad(0) * self.low_loss,
m[1].centre.grad(0) * self.low_loss,
m[1].sigma.grad(0) * self.low_loss,
]
),
)
assert m[0].A.value == 1
assert m[0].centre.value == 2
assert m[0].sigma.value == 2
assert m[1].A.value == 3
assert m[1].centre.value == 4
assert m[1].sigma.value == 5
class TestModelCallMethod:
def setup_method(self, method):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m.append(hs.model.components1D.Gaussian())
self.model = m
def test_call_method_no_convolutions(self):
m = self.model
m.convolved = False
m[1].active = False
r1 = m()
r2 = m(onlyactive=True)
np.testing.assert_allclose(m[0].function(0) * 2, r1)
np.testing.assert_allclose(m[0].function(0), r2)
m.convolved = True
r1 = m(non_convolved=True)
r2 = m(non_convolved=True, onlyactive=True)
np.testing.assert_allclose(m[0].function(0) * 2, r1)
np.testing.assert_allclose(m[0].function(0), r2)
def test_call_method_with_convolutions(self):
m = self.model
m._low_loss = mock.MagicMock()
m.low_loss.return_value = 0.3
m.convolved = True
m.append(hs.model.components1D.Gaussian())
m[1].active = False
m[0].convolved = True
m[1].convolved = False
m[2].convolved = False
m.convolution_axis = np.array([0.0])
r1 = m()
r2 = m(onlyactive=True)
np.testing.assert_allclose(m[0].function(0) * 2.3, r1)
np.testing.assert_allclose(m[0].function(0) * 1.3, r2)
def test_call_method_binned(self):
m = self.model
m.convolved = False
m.remove(1)
m.signal.metadata.Signal.binned = True
m.signal.axes_manager[-1].scale = 0.3
r1 = m()
np.testing.assert_allclose(m[0].function(0) * 0.3, r1)
class TestModelPlotCall:
def setup_method(self, method):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
m.__call__ = mock.MagicMock()
m.__call__.return_value = np.array([0.5, 0.25])
m.axis = mock.MagicMock()
m.fetch_stored_values = mock.MagicMock()
m.channel_switches = np.array([0, 1, 1, 0, 0], dtype=bool)
self.model = m
def test_model2plot_own_am(self):
m = self.model
m.axis.axis.shape = (5,)
res = m._model2plot(m.axes_manager)
np.testing.assert_array_equal(
res, np.array([np.nan, 0.5, 0.25, np.nan, np.nan])
)
assert m.__call__.called
assert m.__call__.call_args[1] == {"non_convolved": False, "onlyactive": True}
assert not m.fetch_stored_values.called
def test_model2plot_other_am(self):
m = self.model
res = m._model2plot(m.axes_manager.deepcopy(), out_of_range2nans=False)
np.testing.assert_array_equal(res, np.array([0.5, 0.25]))
assert m.__call__.called
assert m.__call__.call_args[1] == {"non_convolved": False, "onlyactive": True}
assert 2 == m.fetch_stored_values.call_count
class TestModelSettingPZero:
def setup_method(self, method):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m[0].A.value = 1.1
m[0].centre._number_of_elements = 2
m[0].centre.value = (2.2, 3.3)
m[0].sigma.value = 4.4
m[0].sigma.free = False
m[0].A._bounds = (0.1, 0.11)
m[0].centre._bounds = ((0.2, 0.21), (0.3, 0.31))
m[0].sigma._bounds = (0.4, 0.41)
self.model = m
def test_setting_p0(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
m.p0 = None
m._set_p0()
assert m.p0 == (1.1, 2.2, 3.3)
def test_fetching_from_p0(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
m[-1].A.value = 100
m[-1].sigma.value = 200
m[-1].centre.value = 300
m.p0 = (1.2, 2.3, 3.4, 5.6, 6.7, 7.8)
m._fetch_values_from_p0()
assert m[0].A.value == 1.2
assert m[0].centre.value == (2.3, 3.4)
assert m[0].sigma.value == 4.4
assert m[1].A.value == 100
assert m[1].sigma.value == 200
assert m[1].centre.value == 300
def test_setting_boundaries(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
with pytest.warns(
VisibleDeprecationWarning,
match=r".* has been deprecated and will be made private",
):
m.set_boundaries()
assert m.free_parameters_boundaries == [(0.1, 0.11), (0.2, 0.21), (0.3, 0.31)]
def test_setting_mpfit_parameters_info(self):
m = self.model
m[0].A.bmax = None
m[0].centre.bmin = None
m[0].centre.bmax = 0.31
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
with pytest.warns(
VisibleDeprecationWarning,
match=r".* has been deprecated and will be made private",
):
m.set_mpfit_parameters_info()
assert m.mpfit_parinfo == [
{"limited": [True, False], "limits": [0.1, 0]},
{"limited": [False, True], "limits": [0, 0.31]},
{"limited": [False, True], "limits": [0, 0.31]},
]
class TestModel1D:
def setup_method(self, method):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
self.model = m
def test_errfunc(self):
m = self.model
m._model_function = mock.MagicMock()
m._model_function.return_value = 3.0
np.testing.assert_equal(m._errfunc(None, 1.0, None), 2.0)
np.testing.assert_equal(m._errfunc(None, 1.0, 0.3), 0.6)
def test_errfunc_sq(self):
m = self.model
m._model_function = mock.MagicMock()
m._model_function.return_value = 3.0 * np.ones(2)
np.testing.assert_equal(m._errfunc_sq(None, np.ones(2), None), 8.0)
np.testing.assert_equal(m._errfunc_sq(None, np.ones(2), 0.3), 0.72)
def test_gradient_ls(self):
m = self.model
m._errfunc = mock.MagicMock()
m._errfunc.return_value = 0.1
m._jacobian = mock.MagicMock()
m._jacobian.return_value = np.ones((1, 2)) * 7.0
np.testing.assert_allclose(m._gradient_ls(None, None), 2.8)
def test_gradient_ml(self):
m = self.model
m._model_function = mock.MagicMock()
m._model_function.return_value = 3.0 * np.ones(2)
m._jacobian = mock.MagicMock()
m._jacobian.return_value = np.ones((1, 2)) * 7.0
np.testing.assert_allclose(m._gradient_ml(None, 1.2), 8.4)
def test_gradient_huber(self):
m = self.model
m._errfunc = mock.MagicMock()
m._errfunc.return_value = 0.1
m._jacobian = mock.MagicMock()
m._jacobian.return_value = np.ones((1, 2)) * 7.0
np.testing.assert_allclose(m._gradient_huber(None, None), 1.4)
def test_model_function(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[0].A.value = 1.3
m[0].centre.value = 0.003
m[0].sigma.value = 0.1
param = (100, 0.1, 0.2)
np.testing.assert_array_almost_equal(176.03266338, m._model_function(param))
assert m[0].A.value == 100
assert m[0].centre.value == 0.1
assert m[0].sigma.value == 0.2
def test_append_existing_component(self):
g = hs.model.components1D.Gaussian()
m = self.model
m.append(g)
with pytest.raises(ValueError, match="Component already in model"):
m.append(g)
def test_append_component(self):
g = hs.model.components1D.Gaussian()
m = self.model
m.append(g)
assert g in m
assert g.model is m
assert g._axes_manager is m.axes_manager
assert all([hasattr(p, "map") for p in g.parameters])
def test_calculating_convolution_axis(self):
m = self.model
# setup
m.axis.offset = 10
m.axis.size = 10
ll_axis = mock.MagicMock()
ll_axis.size = 7
ll_axis.value2index.return_value = 3
m._low_loss = mock.MagicMock()
m.low_loss.axes_manager.signal_axes = [
ll_axis,
]
# calculation
m.set_convolution_axis()
# tests
np.testing.assert_array_equal(m.convolution_axis, np.arange(7, 23))
np.testing.assert_equal(ll_axis.value2index.call_args[0][0], 0)
def test_access_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
assert m["test"] is g2
def test_access_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
assert m[1] is g2
def test_component_name_when_append(self):
m = self.model
gs = [
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian(),
]
m.extend(gs)
assert m["Gaussian"] is gs[0]
assert m["Gaussian_0"] is gs[1]
assert m["Gaussian_1"] is gs[2]
def test_several_component_with_same_name(self):
m = self.model
gs = [
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian(),
]
m.extend(gs)
m[0]._name = "hs.model.components1D.Gaussian"
m[1]._name = "hs.model.components1D.Gaussian"
m[2]._name = "hs.model.components1D.Gaussian"
with pytest.raises(ValueError, match=r"Component name .* not found in model"):
m["Gaussian"]
def test_no_component_with_that_name(self):
m = self.model
with pytest.raises(ValueError, match=r"Component name .* not found in model"):
m["Voigt"]
def test_component_already_in_model(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
with pytest.raises(ValueError, match="Component already in model"):
m.extend((g1, g1))
def test_remove_component(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
m.remove(g1)
assert len(m) == 0
def test_remove_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
m.remove(0)
assert len(m) == 0
def test_remove_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
m.remove(g1.name)
assert len(m) == 0
def test_delete_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
del m[0]
assert g1 not in m
def test_delete_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
del m[g1.name]
assert g1 not in m
def test_delete_slice(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g3 = hs.model.components1D.Gaussian()
g3.A.twin = g1.A
g1.sigma.twin = g2.sigma
m.extend([g1, g2, g3])
del m[:2]
assert g1 not in m
assert g2 not in m
assert g3 in m
assert not g1.sigma.twin
assert not g1.A._twins
def test_get_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
assert m._get_component("test") is g2
def test_get_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
assert m._get_component(1) is g2
def test_get_component_by_component(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
assert m._get_component(g2) is g2
def test_get_component_wrong(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
with pytest.raises(ValueError, match="Not a component or component id"):
m._get_component(1.2)
def test_components_class_default(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
assert getattr(m.components, g1.name) is g1
def test_components_class_change_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g1.name = "test"
assert getattr(m.components, g1.name) is g1
def test_components_class_change_name_del_default(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g1.name = "test"
with pytest.raises(AttributeError, match="object has no attribute 'Gaussian'"):
getattr(m.components, "Gaussian")
def test_components_class_change_invalid_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g1.name = "1, Test This!"
assert getattr(m.components, slugify(g1.name, valid_variable_name=True)) is g1
def test_components_class_change_name_del_default2(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
invalid_name = "1, Test This!"
g1.name = invalid_name
g1.name = "test"
with pytest.raises(AttributeError, match=r"object has no attribute .*"):
getattr(m.components, slugify(invalid_name))
def test_snap_parameter_bounds(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g2 = hs.model.components1D.Gaussian()
m.append(g2)
g3 = hs.model.components1D.Gaussian()
m.append(g3)
g4 = hs.model.components1D.Gaussian()
m.append(g4)
p = hs.model.components1D.Polynomial(3, legacy=False)
m.append(p)
g1.A.value = 3.0
g1.centre.bmin = 300.0
g1.centre.value = 1.0
g1.sigma.bmax = 15.0
g1.sigma.value = 30
g2.A.value = 1
g2.A.bmin = 0.0
g2.A.bmax = 3.0
g2.centre.value = 0
g2.centre.bmin = 1
g2.centre.bmax = 3.0
g2.sigma.value = 4
g2.sigma.bmin = 1
g2.sigma.bmax = 3.0
g3.A.bmin = 0
g3.A.value = -3
g3.A.free = False
g3.centre.value = 15
g3.centre.bmax = 10
g3.centre.free = False
g3.sigma.value = 1
g3.sigma.bmin = 0
g3.sigma.bmax = 0
g4.active = False
g4.A.value = 300
g4.A.bmin = 500
g4.centre.value = 0
g4.centre.bmax = -1
g4.sigma.value = 1
g4.sigma.bmin = 10
p.a0.value = 1
p.a1.value = 2
p.a2.value = 3
p.a3.value = 4
p.a0.bmin = 2
p.a1.bmin = 2
p.a2.bmin = 2
p.a3.bmin = 2
p.a0.bmax = 3
p.a1.bmax = 3
p.a2.bmax = 3
p.a3.bmax = 3
m.ensure_parameters_in_bounds()
np.testing.assert_allclose(g1.A.value, 3.0)
np.testing.assert_allclose(g2.A.value, 1.0)
np.testing.assert_allclose(g3.A.value, -3.0)
np.testing.assert_allclose(g4.A.value, 300.0)
np.testing.assert_allclose(g1.centre.value, 300.0)
np.testing.assert_allclose(g2.centre.value, 1.0)
np.testing.assert_allclose(g3.centre.value, 15.0)
np.testing.assert_allclose(g4.centre.value, 0)
np.testing.assert_allclose(g1.sigma.value, 15.0)
np.testing.assert_allclose(g2.sigma.value, 3.0)
np.testing.assert_allclose(g3.sigma.value, 0.0)
np.testing.assert_allclose(g4.sigma.value, 1)
np.testing.assert_almost_equal(p.a0.value, 2)
np.testing.assert_almost_equal(p.a1.value, 2)
np.testing.assert_almost_equal(p.a2.value, 3)
np.testing.assert_almost_equal(p.a3.value, 3)
class TestModelPrintCurrentValues:
def setup_method(self, method):
np.random.seed(1)
s = hs.signals.Signal1D(np.arange(10, 100, 0.1))
s.axes_manager[0].scale = 0.1
s.axes_manager[0].offset = 10
m = s.create_model()
with ignore_warning(message="The API of the `Polynomial` component"):
m.append(hs.model.components1D.Polynomial(1))
m.append(hs.model.components1D.Offset())
self.s = s
self.m = m
@pytest.mark.parametrize("only_free", [True, False])
@pytest.mark.parametrize("skip_multi", [True, False])
def test_print_current_values(self, only_free, skip_multi):
self.m.print_current_values(only_free, skip_multi)
def test_print_current_values_component_list(self):
self.m.print_current_values(component_list=list(self.m))
class TestStoreCurrentValues:
def setup_method(self, method):
self.m = hs.signals.Signal1D(np.arange(10)).create_model()
self.o = hs.model.components1D.Offset()
self.m.append(self.o)
def test_active(self):
self.o.offset.value = 2
self.o.offset.std = 3
self.m.store_current_values()
assert self.o.offset.map["values"][0] == 2
assert self.o.offset.map["is_set"][0]
def test_not_active(self):
self.o.active = False
self.o.offset.value = 2
self.o.offset.std = 3
self.m.store_current_values()
assert self.o.offset.map["values"][0] != 2
class TestSetCurrentValuesTo:
def setup_method(self, method):
self.m = hs.signals.Signal1D(np.arange(10).reshape(2, 5)).create_model()
self.comps = [hs.model.components1D.Offset(), hs.model.components1D.Offset()]
self.m.extend(self.comps)
def test_set_all(self):
for c in self.comps:
c.offset.value = 2
self.m.assign_current_values_to_all()
assert (self.comps[0].offset.map["values"] == 2).all()
assert (self.comps[1].offset.map["values"] == 2).all()
def test_set_1(self):
self.comps[1].offset.value = 2
self.m.assign_current_values_to_all([self.comps[1]])
assert (self.comps[0].offset.map["values"] != 2).all()
assert (self.comps[1].offset.map["values"] == 2).all()
def test_fetch_values_from_arrays():
m = hs.signals.Signal1D(np.arange(10)).create_model()
gaus = hs.model.components1D.Gaussian(A=100, sigma=10, centre=3)
m.append(gaus)
values = np.array([1.2, 3.4, 5.6])
stds = values - 1
m.fetch_values_from_array(values, array_std=stds)
parameters = sorted(gaus.free_parameters, key=lambda x: x.name)
for v, s, p in zip(values, stds, parameters):
assert p.value == v
assert p.std == s
class TestAsSignal:
def setup_method(self, method):
self.m = hs.signals.Signal1D(np.arange(20).reshape(2, 2, 5)).create_model()
self.comps = [hs.model.components1D.Offset(), hs.model.components1D.Offset()]
self.m.extend(self.comps)
for c in self.comps:
c.offset.value = 2
self.m.assign_current_values_to_all()
@pytest.mark.parallel
def test_threaded_identical(self):
# all components
s = self.m.as_signal(parallel=True)
s1 = self.m.as_signal(parallel=False)
np.testing.assert_allclose(s1.data, s.data)
# more complicated
self.m[0].active_is_multidimensional = True
self.m[0]._active_array[0] = False
for component in [0, 1]:
s = self.m.as_signal(component_list=[component], parallel=True)
s1 = self.m.as_signal(component_list=[component], parallel=False)
np.testing.assert_allclose(s1.data, s.data)
@pytest.mark.parametrize("parallel", [True, False])
def test_all_components_simple(self, parallel):
s = self.m.as_signal(parallel=parallel)
assert np.all(s.data == 4.0)
@pytest.mark.parametrize("parallel", [True, False])
def test_one_component_simple(self, parallel):
s = self.m.as_signal(component_list=[0], parallel=parallel)
assert np.all(s.data == 2.0)
assert self.m[1].active
@pytest.mark.parametrize("parallel", [True, False])
def test_all_components_multidim(self, parallel):
self.m[0].active_is_multidimensional = True
s = self.m.as_signal(parallel=parallel)
assert np.all(s.data == 4.0)
self.m[0]._active_array[0] = False
s = self.m.as_signal(parallel=parallel)
np.testing.assert_array_equal(
s.data, np.array([np.ones((2, 5)) * 2, np.ones((2, 5)) * 4])
)
assert self.m[0].active_is_multidimensional
@pytest.mark.parametrize("parallel", [True, False])
def test_one_component_multidim(self, parallel):
self.m[0].active_is_multidimensional = True
s = self.m.as_signal(component_list=[0], parallel=parallel)
assert np.all(s.data == 2.0)
assert self.m[1].active
assert not self.m[1].active_is_multidimensional
s = self.m.as_signal(component_list=[1], parallel=parallel)
np.testing.assert_equal(s.data, 2.0)
assert self.m[0].active_is_multidimensional
self.m[0]._active_array[0] = False
s = self.m.as_signal(component_list=[1], parallel=parallel)
assert np.all(s.data == 2.0)
s = self.m.as_signal(component_list=[0], parallel=parallel)
np.testing.assert_array_equal(
s.data, np.array([np.zeros((2, 5)), np.ones((2, 5)) * 2])
)
def test_as_signal_parallel():
np.random.seed(1)
s = hs.signals.Signal1D(np.random.random((50, 10)))
m = s.create_model()
m.append(hs.model.components1D.PowerLaw())
m.set_signal_range(2, 5)
# HyperSpy 2.0: remove setting iterpath='serpentine'
m.multifit(iterpath="serpentine")
s1 = m.as_signal(out_of_range_to_nan=True, parallel=True)
s2 = m.as_signal(out_of_range_to_nan=True, parallel=True)
np.testing.assert_allclose(s1, s2)
@lazifyTestClass
class TestCreateModel:
def setup_method(self, method):
self.s = hs.signals.Signal1D(np.asarray([0]))
self.im = hs.signals.Signal2D(np.ones([1, 1]))
def test_create_model(self):
from hyperspy.models.model1d import Model1D
from hyperspy.models.model2d import Model2D
assert isinstance(self.s.create_model(), Model1D)
assert isinstance(self.im.create_model(), Model2D)
class TestAdjustPosition:
def setup_method(self, method):
self.s = hs.signals.Signal1D(np.random.rand(10, 10, 20))
self.m = self.s.create_model()
def test_enable_adjust_position(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
assert len(self.m._position_widgets) == 1
# Check that both line and label was added
assert len(list(self.m._position_widgets.values())[0]) == 2
def test_disable_adjust_position(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
self.m.disable_adjust_position()
assert len(self.m._position_widgets) == 0
def test_enable_all(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
self.m.append(hs.model.components1D.Gaussian())
assert len(self.m._position_widgets) == 2
def test_enable_all_zero_start(self):
self.m.enable_adjust_position()
self.m.append(hs.model.components1D.Gaussian())
assert len(self.m._position_widgets) == 1
def test_manual_close(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
list(self.m._position_widgets.values())[0][0].close()
assert len(self.m._position_widgets) == 2
assert len(list(self.m._position_widgets.values())[0]) == 1
list(self.m._position_widgets.values())[0][0].close()
assert len(self.m._position_widgets) == 1
assert len(list(self.m._position_widgets.values())[0]) == 2
self.m.disable_adjust_position()
assert len(self.m._position_widgets) == 0
def test_deprecated_private_functions():
s = hs.signals.Signal1D(np.zeros(1))
m = s.create_model()
with pytest.warns(VisibleDeprecationWarning, match=r".* has been deprecated"):
m.set_boundaries()
with pytest.warns(VisibleDeprecationWarning, match=r".* has been deprecated"):
m.set_mpfit_parameters_info()
| gpl-3.0 | 4,234,241,289,844,927,500 | 32.746988 | 87 | 0.585362 | false |
AlexBaranosky/EmacsV2 | floobits/floo/common/migrations.py | 1 | 3449 | import os
import json
import errno
from collections import defaultdict
try:
from . import shared as G
from . import utils
except (ImportError, ValueError):
import shared as G
import utils
def rename_floobits_dir():
# TODO: one day this can be removed (once all our users have updated)
old_colab_dir = os.path.realpath(os.path.expanduser(os.path.join('~', '.floobits')))
if os.path.isdir(old_colab_dir) and not os.path.exists(G.BASE_DIR):
print('renaming %s to %s' % (old_colab_dir, G.BASE_DIR))
os.rename(old_colab_dir, G.BASE_DIR)
os.symlink(G.BASE_DIR, old_colab_dir)
def get_legacy_projects():
a = ['msgs.floobits.log', 'persistent.json']
owners = os.listdir(G.COLAB_DIR)
floorc_json = defaultdict(defaultdict)
for owner in owners:
if len(owner) > 0 and owner[0] == '.':
continue
if owner in a:
continue
workspaces_path = os.path.join(G.COLAB_DIR, owner)
try:
workspaces = os.listdir(workspaces_path)
except OSError:
continue
for workspace in workspaces:
workspace_path = os.path.join(workspaces_path, workspace)
workspace_path = os.path.realpath(workspace_path)
try:
fd = open(os.path.join(workspace_path, '.floo'), 'rb')
url = json.loads(fd.read())['url']
fd.close()
except Exception:
url = utils.to_workspace_url({
'port': 3448, 'secure': True, 'host': 'floobits.com', 'owner': owner, 'workspace': workspace
})
floorc_json[owner][workspace] = {
'path': workspace_path,
'url': url
}
return floorc_json
def migrate_symlinks():
data = {}
old_path = os.path.join(G.COLAB_DIR, 'persistent.json')
if not os.path.exists(old_path):
return
old_data = utils.get_persistent_data(old_path)
data['workspaces'] = get_legacy_projects()
data['recent_workspaces'] = old_data.get('recent_workspaces')
utils.update_persistent_data(data)
try:
os.unlink(old_path)
os.unlink(os.path.join(G.COLAB_DIR, 'msgs.floobits.log'))
except Exception:
pass
def __load_floorc():
"""try to read settings out of the .floorc file"""
s = {}
try:
fd = open(G.FLOORC_PATH, 'r')
except IOError as e:
if e.errno == errno.ENOENT:
return s
raise
default_settings = fd.read().split('\n')
fd.close()
for setting in default_settings:
# TODO: this is horrible
if len(setting) == 0 or setting[0] == '#':
continue
try:
name, value = setting.split(' ', 1)
except IndexError:
continue
s[name.upper()] = value
return s
def migrate_floorc():
s = __load_floorc()
default_host = s.get('DEFAULT_HOST', G.DEFAULT_HOST)
floorc_json = {
'auth': {
default_host: {}
}
}
for k, v in s.items():
k = k.lower()
try:
v = int(v)
except Exception:
pass
if k in ['username', 'secret', 'api_key']:
floorc_json['auth'][default_host][k] = v
else:
floorc_json[k] = v
with open(G.FLOORC_JSON_PATH, 'w') as fd:
fd.write(json.dumps(floorc_json, indent=4, sort_keys=True))
| gpl-3.0 | -8,162,171,141,782,456,000 | 28.228814 | 112 | 0.554364 | false |
sdanzige/cmonkey-python | test/meme430_test.py | 1 | 3547 | """meme430_test.py - integration tests for meme module
This file is part of cMonkey Python. Please see README and LICENSE for
more information and licensing details.
"""
import cmonkey.meme as meme
import cmonkey.motif as motif
import unittest
import cmonkey.util as util
import cmonkey.organism as org
import cmonkey.datamatrix as dm
import os, os.path
import testutil
class FakeMembership:
def num_clusters(self):
return 1
def rows_for_cluster(self, cluster):
return ["VNG0457G", "VNG0715G", "VNG1143G", "VNG1182H", "VNG1190G",
"VNG1408G", "VNG1551G", "VNG1562H", "VNG1698G", "VNG1760G",
"VNG2191H", "VNG2199H", "VNG2344G", "VNG2410G", "VNG2567C"]
class Meme430Test(unittest.TestCase): # pylint: disable-msg=R0904
"""This class tests a Halo setup"""
def setUp(self): # pylint: disable-msg=C0103
if not os.path.exists('out'):
os.mkdir('out')
def test_meme_simple(self):
"""simplest of all: just run meme and parse the output, just tests
if there will be appropriate output for the input"""
meme_suite = meme.MemeSuite430({'MEME': {'max_width': 24, 'background_order': 3,
'use_revcomp': 'True', 'arg_mod': 'zoops'}})
motif_infos, out = meme_suite.meme('testdata/meme_input1.fasta',
'testdata/meme1.bg',
num_motifs=1)
self.assertEquals(1, len(motif_infos))
self.assertEquals(24, motif_infos[0].width)
self.assertEquals(3, motif_infos[0].num_sites)
self.assertEquals(79, motif_infos[0].llr)
self.assertAlmostEquals(1700, motif_infos[0].evalue)
def test_motif_scoring(self):
"""tests the motif scoring in integration"""
search_distances = {'upstream': (-20, 150)}
scan_distances = {'upstream': (-30, 250)}
matrix_factory = dm.DataMatrixFactory([dm.nochange_filter, dm.center_scale_filter])
infile = util.read_dfile('example_data/hal/halo_ratios5.tsv',
has_header=True, quote='\"')
ratio_matrix = matrix_factory.create_from(infile)
organism = testutil.make_halo(search_distances, scan_distances, ratio_matrix)
membership = FakeMembership()
config_params = {'memb.min_cluster_rows_allowed': 3,
'memb.max_cluster_rows_allowed': 70,
'multiprocessing': False,
'num_clusters': 1,
'output_dir': 'out',
'debug': {},
'search_distances': {'upstream': (-20, 150)},
'num_iterations': 2000,
'MEME': {'schedule': lambda i: True,
'version': '4.3.0',
'global_background': False,
'arg_mod': 'zoops',
'nmotifs_rvec': 'c(rep(1, num_iterations/3), rep(2, num_iterations/3))',
'use_revcomp': 'True', 'max_width': 24, 'background_order': 3},
'Motifs': {'schedule': lambda i: True, 'scaling': ('scaling_const', 1.0)}}
func = motif.MemeScoringFunction(organism, membership, ratio_matrix,
config_params=config_params)
iteration_result = { 'iteration': 100 }
matrix = func.compute(iteration_result)
| lgpl-3.0 | -7,187,935,520,110,407,000 | 45.064935 | 106 | 0.548351 | false |
tangentlabs/django-fancypages | fancypages/managers.py | 1 | 2544 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import django
from django.db import models
from django.utils.translation import get_language
from .queryset import PageQuerySet
class PageManager(models.Manager):
def get_select_related_queryset(self):
"""
Get the base query set that pulls the related ``PageNode`` whenever
the page queryset is used. The reason for this is that the page node
is essential and we don't want to have multiple queries every time.
:rtype: QuerySet
"""
return PageQuerySet(self.model).select_related('node')
def get_queryset(self):
"""
The default queryset ordering the pages by the node paths to make sure
that they are returned in the order they are in the tree.
:rtype: QuerySet
"""
return self.get_select_related_queryset().order_by('node__path')
def get_query_set(self):
"""
Method for backwards compatability only. Support for ``get_query_set``
will be dropped in Django 1.8.
"""
return self.get_queryset()
def top_level(self):
"""
Returns only the top level pages based on the depth provided in the
page node.
:rtype: QuerySet
"""
return self.get_queryset().filter(node__depth=1)
def visible(self, **kwargs):
return self.get_select_related_queryset().visible(**kwargs)
def visible_in(self, group):
return self.get_select_related_queryset().visible_in(group=group)
class ContainerManager(models.Manager):
def get_queryset(self):
if django.VERSION[:2] == (1, 5):
return super(ContainerManager, self).get_query_set()
return super(ContainerManager, self).get_queryset()
def get_language_query_set(self, **kwargs):
if 'language_code' not in kwargs:
kwargs['language_code'] = get_language()
return self.get_queryset().filter(**kwargs)
def all(self):
return self.get_language_query_set()
def filter(self, **kwargs):
return self.get_language_query_set(**kwargs)
def create(self, **kwargs):
if 'language_code' not in kwargs:
kwargs['language_code'] = get_language()
return super(ContainerManager, self).create(**kwargs)
def get_or_create(self, **kwargs):
if 'language_code' not in kwargs:
kwargs['language_code'] = get_language()
return self.get_queryset().get_or_create(**kwargs)
| bsd-3-clause | -7,800,725,164,194,478,000 | 30.407407 | 78 | 0.632862 | false |
alex/readthedocs.org | readthedocs/doc_builder/backends/sphinx.py | 1 | 5736 | import os
import shutil
from django.template.loader import render_to_string
from django.template import Template, Context
from django.contrib.auth.models import SiteProfileNotAvailable
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from doc_builder.base import BaseBuilder, restoring_chdir
from projects.utils import safe_write, run
from core.utils import copy_to_app_servers
RTD_CONF_ADDITIONS = """
{% load projects_tags %}
#Add RTD Template Path.
if 'templates_path' in locals():
templates_path.insert(0, '{{ template_path }}')
else:
templates_path = ['{{ template_path }}', 'templates', '_templates', '.templates']
#Add RTD Static Path. Add to the end because it overwrites previous files.
if 'html_static_path' in locals():
html_static_path.append('{{ static_path }}')
else:
html_static_path = ['_static', '{{ static_path }}']
#Add RTD CSS File only if they aren't overriding it already
using_rtd_theme = False
if 'html_theme' in locals():
if html_theme in ['default']:
if not 'html_style' in locals():
html_style = 'rtd.css'
html_theme = 'default'
html_theme_options = {}
using_rtd_theme = True
else:
html_style = 'rtd.css'
html_theme = 'default'
html_theme_options = {}
using_rtd_theme = True
#Add sponsorship and project information to the template context.
context = {
'using_theme': using_rtd_theme,
'current_version': "{{ current_version.slug }}",
'MEDIA_URL': "{{ settings.MEDIA_URL }}",
'versions': [{% for version in versions|sort_version_aware %}
("{{ version.slug }}", "{{ version.get_absolute_url }}"),{% endfor %}
],
'slug': '{{ project.slug }}',
'name': '{{ project.name }}',
'badge_revsys': {{ project.sponsored }},
'analytics_code': '{{ project.analytics_code }}',
}
if 'html_context' in locals():
html_context.update(context)
else:
html_context = context
"""
TEMPLATE_DIR = '%s/readthedocs/templates/sphinx' % settings.SITE_ROOT
STATIC_DIR = '%s/_static' % TEMPLATE_DIR
class Builder(BaseBuilder):
"""
The parent for most sphinx builders.
Also handles the default sphinx output of html.
"""
def _whitelisted(self):
"""Modify the given ``conf.py`` file from a whitelisted user's project.
"""
project = self.version.project
#Open file for appending.
outfile = open(project.conf_file(self.version.slug), 'a')
outfile.write("\n")
rtd_ctx = Context({
'versions': project.active_versions(),
'current_version': self.version,
'project': project,
'settings': settings,
'static_path': STATIC_DIR,
'template_path': TEMPLATE_DIR,
})
rtd_string = Template(RTD_CONF_ADDITIONS).render(rtd_ctx)
outfile.write(rtd_string)
def _sanitize(self):
project = self.version.project
conf_template = render_to_string('sphinx/conf.py.conf',
{'project': project,
'template_dir': TEMPLATE_DIR,
'badge': project.sponsored
})
rtd_ctx = Context({
'versions': project.active_versions(),
'current_version': self.version,
'project': project,
'settings': settings,
'static_path': STATIC_DIR,
'template_path': TEMPLATE_DIR,
})
rtd_string = Template(RTD_CONF_ADDITIONS).render(rtd_ctx)
conf_template = conf_template + "\n" + rtd_string
safe_write(project.conf_file(self.version.slug), conf_template)
def clean(self):
try:
if self.version.project.whitelisted and self.version.project.is_imported:
print "Project whitelisted"
self._whitelisted()
else:
print "Writing conf to disk"
self._sanitize()
except (OSError, SiteProfileNotAvailable, ObjectDoesNotExist):
try:
print "Writing conf to disk on error."
self._sanitize()
except (OSError, IOError):
print "Conf file not found. Error writing to disk."
return ('', 'Conf file not found. Error writing to disk.', -1)
@restoring_chdir
def build(self):
project = self.version.project
os.chdir(project.conf_dir(self.version.slug))
if project.use_virtualenv and project.whitelisted:
build_command = '%s -b html . _build/html' % project.venv_bin(
version=self.version.slug, bin='sphinx-build')
else:
build_command = "sphinx-build -b html . _build/html"
build_results = run(build_command)
if 'no targets are out of date.' in build_results[1]:
self._changed = False
return build_results
def move(self):
project = self.version.project
if project.full_build_path(self.version.slug):
target = project.rtd_build_path(self.version.slug)
if getattr(settings, "MULTIPLE_APP_SERVERS", None):
print "Copying docs to remote server."
copy_to_app_servers(project.full_build_path(self.version.slug), target)
else:
if os.path.exists(target):
shutil.rmtree(target)
print "Copying docs on the local filesystem"
shutil.copytree(project.full_build_path(self.version.slug), target)
else:
print "Not moving docs, because the build dir is unknown."
| mit | -7,620,218,241,863,675,000 | 36.246753 | 87 | 0.588389 | false |
lukeroge/CloudbotX | stratus/loader/pluginloader.py | 1 | 16561 | import asyncio
import enum
import glob
import importlib
import inspect
import logging
import os
import re
import itertools
from stratus.event import Event, HookEvent
logger = logging.getLogger("stratus")
class HookType(enum.Enum):
"""
"""
on_start = 1,
on_stop = 2,
sieve = 3,
event = 4,
regex = 5,
command = 6,
irc_raw = 7,
def find_plugins(plugin_directories):
"""
Args:
plugin_directories: A list of
Returns:
"""
for directory_pattern in plugin_directories:
for directory in glob.iglob(directory_pattern):
logger.info("Loading plugins from {}".format(directory))
if not os.path.exists(os.path.join(directory, "__init__.py")):
with open(os.path.join(directory, "__init__.py"), 'w') as file:
file.write('\n') # create blank __init__.py file if none exists
for plugin in glob.iglob(os.path.join(directory, '*.py')):
yield plugin
def find_hooks(title, module):
"""
:type title: str
:type module: object
:rtype: dict[HookType, list[Hook]
"""
# set the loaded flag
module._plugins_loaded = True
hooks_dict = dict()
for hook_type in HookType:
hooks_dict[hook_type] = list()
for name, func in module.__dict__.items():
if hasattr(func, "bot_hooks"):
# if it has stratus hook
for hook in func.bot_hooks:
hook_type = hook.type
hook_class = _hook_classes[hook_type]
hooks_dict[hook_type].append(hook_class(title, hook))
# delete the hook to free memory
del func.bot_hooks
return hooks_dict
def _prepare_parameters(hook, base_event, hook_event):
"""
Prepares arguments for the given hook
:type hook: stratus.loader.Hook
:type base_event: stratus.event.Event
:type hook_event: stratus.event.HookEvent
:rtype: list
"""
parameters = []
for required_arg in hook.required_args:
if hasattr(base_event, required_arg):
value = getattr(base_event, required_arg)
parameters.append(value)
elif hasattr(hook_event, required_arg):
value = getattr(hook_event, required_arg)
parameters.append(value)
else:
logger.warning("Plugin {} asked for invalid argument '{}', cancelling execution!"
.format(hook.description, required_arg))
logger.debug("Valid arguments are: {}".format(dir(base_event) + dir(hook_event)))
return None
return parameters
class Loader:
"""
Loader is the core of Stratus plugin loading.
Loader loads Plugins, and adds their Hooks to easy-access dicts/lists.
Each Plugin represents a file, and loads hooks onto itself using find_hooks.
Plugins are the lowest level of abstraction in this class. There are four different plugin types:
- CommandPlugin is for bot commands
- RawPlugin hooks onto irc_raw irc lines
- RegexPlugin loads a regex parameter, and executes on irc lines which match the regex
- SievePlugin is a catch-all sieve, which all other plugins go through before being executed.
:type bot: stratus.engine.Stratus
:type commands: dict[str, CommandHook]
:type raw_triggers: dict[str, list[RawHook]]
:type catch_all_triggers: list[RawHook]
:type event_type_hooks: dict[stratus.event.EventType, list[EventHook]]
:type regex_hooks: list[(re.__Regex, RegexHook)]
:type sieves: list[SieveHook]
"""
def __init__(self, bot):
"""
Creates a new Loader. You generally only need to do this from inside stratus.bot.Stratus
:type bot: stratus.engine.Stratus
"""
self.bot = bot
self.commands = {}
self.raw_triggers = {}
self.catch_all_triggers = []
self.event_type_hooks = {}
self.regex_hooks = []
self.sieves = []
self.shutdown_hooks = []
self._hook_locks = {}
async def load_all(self, plugin_directories):
"""
Load a plugin from each *.py file in the given directory.
:type plugin_directories: collections.Iterable[str]
"""
path_list = find_plugins(plugin_directories)
# Load plugins asynchronously :O
await asyncio.gather(*(self.load_plugin(path) for path in path_list), loop=self.bot.loop)
async def load_plugin(self, path):
"""
Loads a plugin from the given path and plugin object, then registers all hooks from that plugin.
:type path: str
"""
file_path = os.path.abspath(path)
relative_path = os.path.relpath(file_path, os.path.curdir)
module_name = os.path.splitext(relative_path)[0].replace(os.path.sep, '.')
if os.path.altsep:
module_name = module_name.replace(os.path.altsep, '.')
title = module_name
if module_name.startswith('plugins.'): # if it is in the default plugin dir, don't prepend plugins. to title
title = title[len('plugins.'):]
try:
plugin_module = importlib.import_module(module_name)
except Exception:
logger.exception("Error loading {}:".format(file_path))
return
hooks = find_hooks(title, plugin_module)
# proceed to register hooks
# run on_start hooks
on_start_event = Event(bot=self.bot)
for on_start_hook in hooks[HookType.on_start]:
success = await self.launch(on_start_hook, on_start_event)
if not success:
logger.warning("Not registering hooks from plugin {}: on_start hook errored".format(title))
return
# register events
for event_hook in hooks[HookType.event]:
for event_type in event_hook.types:
if event_type in self.event_type_hooks:
self.event_type_hooks[event_type].append(event_hook)
else:
self.event_type_hooks[event_type] = [event_hook]
self._log_hook(event_hook)
# register commands
for command_hook in hooks[HookType.command]:
for alias in command_hook.aliases:
if alias in self.commands:
logger.warning(
"Plugin {} attempted to register command {} which was already registered by {}. "
"Ignoring new assignment.".format(title, alias, self.commands[alias].plugin))
else:
self.commands[alias] = command_hook
self._log_hook(command_hook)
# register raw hooks
for raw_hook in hooks[HookType.irc_raw]:
if raw_hook.is_catch_all():
self.catch_all_triggers.append(raw_hook)
else:
for trigger in raw_hook.triggers:
if trigger in self.raw_triggers:
self.raw_triggers[trigger].append(raw_hook)
else:
self.raw_triggers[trigger] = [raw_hook]
self._log_hook(raw_hook)
# register regex hooks
for regex_hook in hooks[HookType.regex]:
for regex in regex_hook.triggers:
self.regex_hooks.append((regex, regex_hook))
self._log_hook(regex_hook)
# register sieves
for sieve_hook in hooks[HookType.sieve]:
self.sieves.append(sieve_hook)
self._log_hook(sieve_hook)
# register shutdown hooks
for stop_hook in hooks[HookType.on_stop]:
self.shutdown_hooks.append(stop_hook)
self._log_hook(stop_hook)
def _log_hook(self, hook):
"""
Logs registering a given hook
:type hook: Hook
"""
if self.bot.config.get("logging", {}).get("show_plugin_loading", True):
logger.debug("Loaded {}".format(repr(hook)))
async def _execute_hook(self, hook, base_event, hook_event):
"""
Runs the specific hook with the given bot and event.
Returns False if the hook errored, True otherwise.
:type hook: stratus.loader.Hook
:type base_event: stratus.event.Event
:type hook_event: stratus.event.HookEvent
:rtype: bool
"""
parameters = _prepare_parameters(hook, base_event, hook_event)
if parameters is None:
return False
try:
# _internal_run_threaded and _internal_run_coroutine prepare the database, and run the hook.
# _internal_run_* will prepare parameters and the database session, but won't do any error catching.
if hook.threaded:
out = await self.bot.loop.run_in_executor(None, hook.function, *parameters)
else:
out = await hook.function(*parameters)
except Exception:
logger.exception("Error in hook {}".format(hook.description))
base_event.message("Error in plugin '{}'.".format(hook.plugin))
return False
if out is not None:
if isinstance(out, (list, tuple)):
# if there are multiple items in the response, return them on multiple lines
base_event.reply(*out)
else:
base_event.reply(*str(out).split('\n'))
return True
async def _sieve(self, sieve, event, hook_event):
"""
:type sieve: stratus.loader.Hook
:type event: stratus.event.Event
:type hook_event: stratus.event.HookEvent
:rtype: stratus.event.Event
"""
try:
if sieve.threaded:
result = await self.bot.loop.run_in_executor(None, sieve.function, event, hook_event)
else:
result = await sieve.function(event, hook_event)
except Exception:
logger.exception("Error running sieve {} on {}:".format(sieve.description, hook_event.hook.description))
return None
else:
return result
async def launch(self, hook, base_event, hevent=None):
"""
Dispatch a given event to a given hook using a given bot object.
Returns False if the hook didn't run successfully, and True if it ran successfully.
:type base_event: stratus.event.Event
:type hevent: stratus.event.HookEvent | stratus.event.CommandHookEvent
:type hook: stratus.loader.Hook | stratus.loader.CommandHook
:rtype: bool
"""
if hevent is None:
hevent = HookEvent(base_event=base_event, hook=hook)
if hook.type not in (HookType.on_start, HookType.on_stop): # we don't need sieves on on_start or on_stop hooks.
for sieve in self.bot.loader.sieves:
base_event = await self._sieve(sieve, base_event, hevent)
if base_event is None:
return False
if hook.type is HookType.command and hook.auto_help and not hevent.text and hook.doc is not None:
hevent.notice_doc()
return False
if hook.single_thread:
# There should only be once instance of this hook running at a time, so let's use a lock for it.
key = (hook.plugin, hook.function_name)
if key not in self._hook_locks:
self._hook_locks[key] = asyncio.Lock(loop=self.bot.loop)
# Run the plugin with the message, and wait for it to finish
with (await self._hook_locks[key]):
result = await self._execute_hook(hook, base_event, hevent)
else:
# Run the plugin with the message, and wait for it to finish
result = await self._execute_hook(hook, base_event, hevent)
# Return the result
return result
async def run_shutdown_hooks(self):
shutdown_event = Event(bot=self.bot)
tasks = (self.launch(hook, shutdown_event) for hook in self.shutdown_hooks)
await asyncio.gather(*tasks, loop=self.bot.loop)
class Hook:
"""
Each hook is specific to one function. This class is never used by itself, rather extended.
:type type: HookType
:type plugin: str
:type function: callable
:type function_name: str
:type required_args: list[str]
:type threaded: bool
:type run_first: bool
:type permissions: list[str]
:type single_thread: bool
"""
type = None # to be assigned in subclasses
def __init__(self, plugin, hook_decorator):
"""
:type plugin: str
"""
self.plugin = plugin
self.function = hook_decorator.function
self.function_name = self.function.__name__
self.required_args = inspect.getargspec(self.function)[0]
if self.required_args is None:
self.required_args = []
if asyncio.iscoroutine(self.function) or asyncio.iscoroutinefunction(self.function):
self.threaded = False
else:
self.threaded = True
self.permissions = hook_decorator.kwargs.pop("permissions", [])
self.single_thread = hook_decorator.kwargs.pop("single_instance", False)
self.run_first = hook_decorator.kwargs.pop("run_first", False)
if hook_decorator.kwargs:
# we should have popped all the args, so warn if there are any left
logger.warning("Ignoring extra args {} from {}".format(hook_decorator.kwargs, self.description))
@property
def description(self):
return "{}:{}".format(self.plugin, self.function_name)
def __repr__(self, **kwargs):
result = "type: {}, plugin: {}, permissions: {}, run_first: {}, single_instance: {}, threaded: {}".format(
self.type.name, self.plugin, self.permissions, self.run_first, self.single_thread, self.threaded)
if kwargs:
result = ", ".join(itertools.chain(("{}: {}".format(*item) for item in kwargs.items()), (result,)))
return "{}[{}]".format(type(self).__name__, result)
class OnStartHook(Hook):
type = HookType.on_start
class OnStopHook(Hook):
type = HookType.on_stop
class SieveHook(Hook):
type = HookType.sieve
class EventHook(Hook):
"""
:type types: set[stratus.event.EventType]
"""
type = HookType.event
def __init__(self, plugin, decorator):
"""
:type plugin: Plugin
:type decorator: stratus.hook.EventDecorator
"""
self.types = decorator.triggers
super().__init__(plugin, decorator)
class RegexHook(Hook):
"""
:type triggers: set[re.__Regex]
"""
type = HookType.regex
def __init__(self, plugin, decorator):
"""
:type plugin: Plugin
:type decorator: stratus.hook.RegexDecorator
"""
self.triggers = decorator.triggers
super().__init__(plugin, decorator)
def __repr__(self):
return super().__repr__(triggers=", ".join(regex.pattern for regex in self.triggers))
class CommandHook(Hook):
"""
:type name: str
:type aliases: list[str]
:type doc: str
:type auto_help: bool
"""
type = HookType.command
def __init__(self, plugin, decorator):
"""
:type plugin: str
:type decorator: stratus.hook.CommandDecorator
"""
self.auto_help = decorator.kwargs.pop("autohelp", True)
self.name = decorator.main_alias
self.aliases = list(decorator.triggers) # turn the set into a list
self.aliases.remove(self.name)
self.aliases.insert(0, self.name) # make sure the name, or 'main alias' is in position 0
self.doc = decorator.doc
super().__init__(plugin, decorator)
def __repr__(self):
return super().__repr__(name=self.name, aliases=self.aliases[1:])
class RawHook(Hook):
"""
:type triggers: set[str]
"""
type = HookType.irc_raw
def __init__(self, plugin, decorator):
"""
:type plugin: Plugin
:type decorator: stratus.hook.IrcRawDecorator
"""
self.triggers = decorator.triggers
super().__init__(plugin, decorator)
def is_catch_all(self):
return "*" in self.triggers
def __repr__(self):
return super().__repr__(triggers=self.triggers)
_hook_classes = {
HookType.on_start: OnStartHook,
HookType.on_stop: OnStopHook,
HookType.sieve: SieveHook,
HookType.event: EventHook,
HookType.regex: RegexHook,
HookType.command: CommandHook,
HookType.irc_raw: RawHook,
}
| gpl-3.0 | 3,569,536,791,953,528,300 | 32.122 | 120 | 0.596522 | false |
ddurieux/alignak | alignak/property.py | 1 | 11612 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- mode: python ; coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Hartmut Goebel, [email protected]
# Guillaume Bour, [email protected]
# Frédéric Vachon, [email protected]
# aviau, [email protected]
# Nicolas Dupeux, [email protected]
# Grégory Starck, [email protected]
# Gerhard Lausser, [email protected]
# Sebastien Coavoux, [email protected]
# Christophe Simon, [email protected]
# Jean Gabes, [email protected]
# Romain Forlot, [email protected]
# Christophe SIMON, [email protected]
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import re
from alignak.util import to_float, to_split, to_char, to_int, unique_value, list_split
import logging
__all__ = ['UnusedProp', 'BoolProp', 'IntegerProp', 'FloatProp',
'CharProp', 'StringProp', 'ListProp',
'FULL_STATUS', 'CHECK_RESULT']
# Suggestion
# Is this useful? see above
__author__ = "Hartmut Goebel <[email protected]>"
__copyright__ = "Copyright 2010-2011 by Hartmut Goebel <[email protected]>"
__licence__ = "GNU Affero General Public License version 3 (AGPL v3)"
FULL_STATUS = 'full_status'
CHECK_RESULT = 'check_result'
none_object = object()
class Property(object):
"""Baseclass of all properties.
Same semantic for all subclasses (except UnusedProp): The property
is required if, and only if, the default value is `None`.
"""
def __init__(self, default=none_object, class_inherit=None,
unmanaged=False, help='', no_slots=False,
fill_brok=None, conf_send_preparation=None,
brok_transformation=None, retention=False,
retention_preparation=None, to_send=False,
override=False, managed=True, split_on_coma=True, merging='uniq'):
"""
`default`: default value to be used if this property is not set.
If default is None, this property is required.
`class_inherit`: List of 2-tuples, (Service, 'blabla'): must
set this property to the Service class with name
blabla. if (Service, None): must set this property
to the Service class with same name
`unmanaged`: ....
`help`: usage text
`no_slots`: do not take this property for __slots__
`fill_brok`: if set, send to broker. There are two categories:
FULL_STATUS for initial and update status,
CHECK_RESULT for check results
`retention`: if set, we will save this property in the retention files
`retention_preparation`: function, if set, will go this function before
being save to the retention data
`split_on_coma`: indicates that list property value should not be
splitted on coma delimiter (values conain comas that
we want to keep).
Only for the initial call:
conf_send_preparation: if set, will pass the property to this
function. It's used to 'flatten' some dangerous
properties like realms that are too 'linked' to
be send like that.
brok_transformation: if set, will call the function with the
value of the property when flattening
data is necessary (like realm_name instead of
the realm object).
override: for scheduler, if the property must override the
value of the configuration we send it
managed: property that is managed in Nagios but not in Alignak
merging: for merging properties, should we take only one or we can
link with ,
"""
self.default = default
self.has_default = (default is not none_object)
self.required = not self.has_default
self.class_inherit = class_inherit or []
self.help = help or ''
self.unmanaged = unmanaged
self.no_slots = no_slots
self.fill_brok = fill_brok or []
self.conf_send_preparation = conf_send_preparation
self.brok_transformation = brok_transformation
self.retention = retention
self.retention_preparation = retention_preparation
self.to_send = to_send
self.override = override
self.managed = managed
self.unused = False
self.merging = merging
self.split_on_coma = split_on_coma
def pythonize(self, val):
return val
class UnusedProp(Property):
"""A unused Property. These are typically used by Nagios but
no longer useful/used by Alignak.
This is just to warn the user that the option he uses is no more used
in Alignak.
"""
# Since this property is not used, there is no use for other
# parameters than 'text'.
# 'text' a some usage text if present, will print it to explain
# why it's no more useful
def __init__(self, text=None):
if text is None:
text = ("This parameter is no longer useful in the "
"Alignak architecture.")
self.text = text
self.has_default = False
self.class_inherit = []
self.unused = True
self.managed = True
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
class BoolProp(Property):
"""A Boolean Property.
Boolean values are currently case insensitively defined as 0,
false, no, off for False, and 1, true, yes, on for True).
"""
@staticmethod
def pythonize(val):
if isinstance(val, bool):
return val
val = unique_value(val).lower()
if val in _boolean_states.keys():
return _boolean_states[val]
else:
raise PythonizeError("Cannot convert '%s' to a boolean value" % val)
class IntegerProp(Property):
"""Please Add a Docstring to describe the class here"""
def pythonize(self, val):
val = unique_value(val)
return to_int(val)
class FloatProp(Property):
"""Please Add a Docstring to describe the class here"""
def pythonize(self, val):
val = unique_value(val)
return to_float(val)
class CharProp(Property):
"""Please Add a Docstring to describe the class here"""
def pythonize(self, val):
val = unique_value(val)
return to_char(val)
class StringProp(Property):
"""Please Add a Docstring to describe the class here"""
def pythonize(self, val):
val = unique_value(val)
return val
class PathProp(StringProp):
""" A string property representing a "running" (== VAR) file path """
class ConfigPathProp(StringProp):
""" A string property representing a config file path """
class ListProp(Property):
"""Please Add a Docstring to describe the class here"""
def pythonize(self, val):
if isinstance(val, list):
return [s.strip() for s in list_split(val, self.split_on_coma)]
else:
return [s.strip() for s in to_split(val, self.split_on_coma)]
class LogLevelProp(StringProp):
""" A string property representing a logging level """
def pythonize(self, val):
val = unique_value(val)
return logging.getLevelName(val)
class DictProp(Property):
def __init__(self, elts_prop=None, *args, **kwargs):
"""Dictionary of values.
If elts_prop is not None, must be a Property subclass
All dict values will be casted as elts_prop values when pythonized
elts_prop = Property of dict members
"""
super(DictProp, self).__init__(*args, **kwargs)
if elts_prop is not None and not issubclass(elts_prop, Property):
raise TypeError("DictProp constructor only accept Property"
"sub-classes as elts_prop parameter")
if elts_prop is not None:
self.elts_prop = elts_prop()
def pythonize(self, val):
val = unique_value(val)
def split(kv):
m = re.match("^\s*([^\s]+)\s*=\s*([^\s]+)\s*$", kv)
if m is None:
raise ValueError
return (
m.group(1),
# >2.4 only. we keep it for later. m.group(2) if self.elts_prop is None
# else self.elts_prop.pythonize(m.group(2))
(self.elts_prop.pythonize(m.group(2)), m.group(2))[self.elts_prop is None]
)
if val is None:
return(dict())
if self.elts_prop is None:
return val
# val is in the form "key1=addr:[port],key2=addr:[port],..."
print ">>>", dict([split(kv) for kv in to_split(val)])
return dict([split(kv) for kv in to_split(val)])
class AddrProp(Property):
"""Address property (host + port)"""
def pythonize(self, val):
"""
i.e: val = "192.168.10.24:445"
NOTE: port is optional
"""
val = unique_value(val)
m = re.match("^([^:]*)(?::(\d+))?$", val)
if m is None:
raise ValueError
addr = {'address': m.group(1)}
if m.group(2) is not None:
addr['port'] = int(m.group(2))
return addr
class ToGuessProp(Property):
"""Unknown property encountered while parsing"""
@staticmethod
def pythonize(val):
if isinstance(val, list) and len(set(val)) == 1:
# If we have a list with a unique value just use it
return val[0]
else:
# Well, can't choose to remove somthing.
return val
class IntListProp(ListProp):
"""Integer List property"""
def pythonize(self, val):
val = super(IntListProp, self).pythonize(val)
try:
return [int(e) for e in val]
except ValueError as value_except:
raise PythonizeError(str(value_except))
class PythonizeError(Exception):
pass
| agpl-3.0 | 6,923,826,551,157,229,000 | 32.359195 | 90 | 0.617797 | false |
peter1010/symbols | test/test_x86_decode.py | 1 | 11213 | #!/usr/bin/env python3
import unittest
import sys
import subprocess
import random
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
sys.path.append("..")
from symbols import __main__ as symbols
from symbols import x86_decode as decode
from symbols import errors
class Arch:
def __init__(self):
self.mode = 32
def assemble(arch, lines_of_code, lines_of_mcode=[]):
machine_code = []
num_of = len(lines_of_code)
to_assemble = "\n".join(lines_of_code) + "\n"
to_assemble = to_assemble.encode("ascii")
if arch.mode == 32:
args = ["as", "-al", "--32", "--listing-lhs-width=20", "--"]
else:
args = ["as", "-al", "--64", "--listing-lhs-width=20", "--"]
proc = subprocess.Popen(args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = proc.communicate(to_assemble)
if stderr:
for line in stderr.splitlines():
tokens = line.split(b":", maxsplit=4)
if tokens[2].endswith(b"Error"):
idx = int(tokens[1])
print(lines_of_code[idx-1], line, lines_of_mcode[idx-1])
for line in stdout.splitlines():
line = line.strip()
if not line:
continue
try:
line_num, rest = line.split(maxsplit=1)
except ValueError:
continue
if line_num == b'GAS':
continue
line_num = int(line_num)
try:
mcode, assembly = rest.split(b"\t")
except ValueError:
assert False
addr, mcode = mcode.split(b" ", maxsplit=1)
mcode = b"".join(mcode.split())
logger.debug("%i: %s %s", line_num, str(mcode), assembly)
mcode = [mcode[i:i+2] for i in range(0, len(mcode), 2)]
mcode = bytes([int(i, 16) for i in mcode])
machine_code.append(mcode)
assert num_of == len(machine_code)
return machine_code
class X86DecodeTest(unittest.TestCase):
def setUp(self):
pass
def chk_disassembler(self, arch, lines_of_code, lines_of_mcode):
if not isinstance(lines_of_code, list):
lines_of_code = [lines_of_code]
lines_of_mcode = [lines_of_mcode]
lines_of_mcode2 = assemble(arch, lines_of_code, lines_of_mcode)
for idx in range(len(lines_of_mcode)):
instruction = lines_of_code[idx]
mcode = lines_of_mcode[idx]
mcode2 = lines_of_mcode2[idx]
if mcode != mcode2: # Some assembly lead to different mcode
instruction2, idx = decode.decode(arch, mcode2)
self.assertEqual(instruction2, instruction)
else:
self.assertEqual(mcode, mcode2)
def chk_disassembler2(self, arch, lines_of_code, lines_of_mcode):
if not isinstance(lines_of_code, list):
lines_of_code = [lines_of_code]
lines_of_mcode = [lines_of_mcode]
lines_of_mcode2 = assemble(arch, lines_of_code, lines_of_mcode)
for idx in range(len(lines_of_mcode)):
instruction = lines_of_code[idx]
mcode = lines_of_mcode[idx]
mcode2 = lines_of_mcode2[idx]
self.assertEqual(mcode, mcode2)
def testAdd00_32bit_fixed_sib(self):
arch = Arch()
lines_of_code = []
lines_of_mcode = []
for i in range(256):
mcode = bytes([0, i, 1, 2, 3, 4, 5, 6, 7, 8])
instruction, idx = decode.decode(arch, mcode)
lines_of_code.append(instruction)
lines_of_mcode.append(mcode[:idx])
self.chk_disassembler(arch, lines_of_code, lines_of_mcode)
def testAdd00_32bit_var_sib(self):
arch = Arch()
lines_of_code = []
lines_of_mcode = []
for j in (0x0C, 0x4c, 0x8c):
for i in range(256):
mcode = bytes([0, j, i, 1, 2, 3, 4, 5, 6, 7])
instruction, idx = decode.decode(arch, mcode)
lines_of_code.append(instruction)
lines_of_mcode.append(mcode[:idx])
self.chk_disassembler(arch, lines_of_code, lines_of_mcode)
def testAdd00_16bit_addr(self):
arch = Arch()
lines_of_code = []
lines_of_mcode = []
for i in range(256):
mcode = bytes([0x67, 0, i, 1, 2, 3, 4, 5, 6, 7])
instruction, idx = decode.decode(arch, mcode)
lines_of_code.append(instruction)
lines_of_mcode.append(mcode[:idx])
self.chk_disassembler(arch, lines_of_code, lines_of_mcode)
def testAdd01_32bit_fixed_sib(self):
arch = Arch()
lines_of_code = []
lines_of_mcode = []
for i in range(256):
mcode = bytes([1, i, 2, 3, 4, 5, 6, 7, 8])
instruction, idx = decode.decode(arch, mcode)
lines_of_code.append(instruction)
lines_of_mcode.append(mcode[:idx])
self.chk_disassembler(arch, lines_of_code, lines_of_mcode)
def testAdd01_32bit_var_sib(self):
arch = Arch()
lines_of_code = []
lines_of_mcode = []
for j in (0x0C, 0x4c, 0x8c):
for i in range(256):
mcode = bytes([1, j, i, 1, 2, 3, 4, 5, 6, 7])
instruction, idx = decode.decode(arch, mcode)
lines_of_code.append(instruction)
lines_of_mcode.append(mcode[:idx])
self.chk_disassembler(arch, lines_of_code, lines_of_mcode)
def testAdd01_16bit_addr(self):
arch = Arch()
lines_of_code = []
lines_of_mcode = []
for i in range(256):
mcode = bytes([0x67, 1, i, 1, 2, 3, 4, 5, 6, 7])
instruction, idx = decode.decode(arch, mcode)
lines_of_code.append(instruction)
lines_of_mcode.append(mcode[:idx])
self.chk_disassembler(arch, lines_of_code, lines_of_mcode)
def testAdd01_16bit_op(self):
arch = Arch()
lines_of_code = []
lines_of_mcode = []
for i in range(256):
mcode = bytes([0x66, 1, i, 1, 2, 3, 4, 5, 6, 7])
instruction, idx = decode.decode(arch, mcode)
lines_of_code.append(instruction)
lines_of_mcode.append(mcode[:idx])
self.chk_disassembler(arch, lines_of_code, lines_of_mcode)
def testAdd01_16bit_addr_and_op(self):
arch = Arch()
lines_of_code = []
lines_of_mcode = []
for i in range(256):
mcode = bytes([0x67, 0x66, 1, i, 1, 2, 3, 4, 5, 6, 7])
instruction, idx = decode.decode(arch, mcode)
lines_of_code.append(instruction)
lines_of_mcode.append(mcode[:idx])
self.chk_disassembler(arch, lines_of_code, lines_of_mcode)
def testAdd02(self):
"""00 and 02 are same except ops are swapped"""
arch = Arch()
data = b"\x02\x05\x0a\x00\x00\x00"
instruction, idx = decode.decode(arch, data)
self.chk_disassembler(arch, instruction, data)
def testAdd03(self):
"""01 and 03 are same except ops are swapped"""
arch = Arch()
data = b"\x66\x03\x05\x0a\x00\x00\x00"
instruction, idx = decode.decode(arch, data)
self.chk_disassembler(arch, instruction, data)
def testAdd04(self):
arch = Arch()
data = b"\x04\x05"
instruction, idx = decode.decode(arch, data)
self.chk_disassembler2(arch, instruction, data)
def testAdd05(self):
arch = Arch()
lines_of_mcode = [
b"\x05\x01\x02\x03\x04",
b"\x66\x05\x01\x02"
]
lines_of_code = [decode.decode(arch, data)[0] for data in lines_of_mcode]
self.chk_disassembler2(arch, lines_of_code, lines_of_mcode)
def testOneByteCode(self):
arch = Arch()
lines_of_mcode = []
lines_of_code = []
for i in (
0x06, 0x07, 0x0E, 0x16, 0x17, 0x1e, 0x1f, 0x27, 0x2f,
0x37, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46,
0x47, 0x48, 0x49, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55,
0x56, 0x57, 0x58, 0x59, 0x90
):
mcode = bytes([i,])
lines_of_mcode.append(mcode)
lines_of_code.append(decode.decode(arch, mcode)[0])
self.chk_disassembler2(arch, lines_of_code, lines_of_mcode)
def test06toFF(self):
arch = Arch()
lines_of_mcode = [
b"\x08\xf1",
b"\x09\xf1",
b"\x0A\xf1",
b"\x0B\xf1",
b"\x0C\xf1",
b"\x0D\x01\x02\x03\x04",
b"\x10\xf1",
b"\x11\xf1",
b"\x12\xf1",
b"\x13\xf1",
b"\x14\xf1",
b"\x15\x01\x02\x03\x04",
b"\x18\xf1",
b"\x19\xf1",
b"\x1A\xf1",
b"\x1B\xf1",
b"\x1C\xf1",
b"\x1D\x01\x02\x03\x04",
b"\x20\xf1",
b"\x21\xf1",
b"\x22\xf1",
b"\x23\xf1",
b"\x24\xf1",
b"\x25\x01\x02\x03\x04",
b"\x28\xf1",
b"\x29\xf1",
b"\x2A\xf1",
b"\x2B\xf1",
b"\x2C\xf1",
b"\x2D\x01\x02\x03\x04",
b"\x30\xf1",
b"\x31\xf1",
b"\x32\xf1",
b"\x33\xf1",
b"\x34\xf1",
b"\x35\x01\x02\x03\x04",
b"\x38\xf1",
b"\x39\xf1",
b"\x3A\xf1",
b"\x3B\xf1",
b"\x3C\xf1",
b"\x3D\x01\x02\x03\x04",
b"\x66\x40",
b"\x60",
b"\x66\x60",
b"\x61",
b"\x66\x61",
b"\x62\x01",
b"\x63\x01",
]
lines_of_code = [decode.decode(arch, data)[0] for data in lines_of_mcode]
self.chk_disassembler(arch, lines_of_code, lines_of_mcode)
@unittest.skip("")
def test_fuzzy(self):
arch = Arch()
for i in range(1000):
data = bytes([random.randint(0, 255) for i in range(16)])
try:
instruction, idx = decode.decode(Arch(), data)
except ValueError:
continue
except TypeError:
continue
except AttributeError:
continue
except IndexError:
continue
print(instruction)
data2 = assemble(arch, [instruction], [data])[0]
if data[:idx] != data2:
instruction2, idx = decode.decode(Arch(), data2)
self.assertEqual(instruction2, instruction)
else:
self.assertEqual(data[:idx], data2)
def main():
symbols.config_logging(also_to_console=False)
unittest.main()
if __name__ == "__main__":
main()
# 4 0007 6700470A add %al, 10(%bx)
# 5 000b 00430A add %al, 10(%ebx)
# 6 000e 67660147 add %ax, 10(%bx)
# 6 0A
# 7 0013 6601430A add %ax, 10(%ebx)
# 8 0017 6701470A add %eax, 10(%bx)
# 9 001b 01430A add %eax, 10(%ebx)
# 10 001e 02050A00 add (10),%al
# 10 0000
# 11 0024 6603050A add (10),%ax
# 11 000000
# 12 002b 03050A00 add (10),%eax
# 12 0000
| gpl-3.0 | 7,103,103,392,059,291,000 | 31.78655 | 81 | 0.522786 | false |
mola/qgis | python/plugins/GdalTools/tools/doTranslate.py | 1 | 12011 | # -*- coding: utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from ui_widgetTranslate import Ui_GdalToolsWidget as Ui_Widget
from widgetBatchBase import GdalToolsBaseBatchWidget as BaseBatchWidget
from dialogSRS import GdalToolsSRSDialog as SRSDialog
import GdalTools_utils as Utils
class GdalToolsDialog(QWidget, Ui_Widget, BaseBatchWidget):
def __init__(self, iface):
QWidget.__init__(self)
self.iface = iface
self.canvas = self.iface.mapCanvas()
self.setupUi(self)
BaseBatchWidget.__init__(self, self.iface, "gdal_translate")
# set the default QSpinBoxes and QProgressBar value
self.outsizeSpin.setValue(25)
self.progressBar.setValue(0)
self.progressBar.hide()
self.formatLabel.hide()
self.formatCombo.hide()
if Utils.Version( Utils.GdalConfig.version() ) < "1.7":
index = self.expandCombo.findText('gray', Qt.MatchFixedString)
if index >= 0:
self.expandCombo.removeItem(index)
self.outputFormat = Utils.fillRasterOutputFormat()
self.setParamsStatus(
[
(self.inputLayerCombo, [SIGNAL("currentIndexChanged(int)"), SIGNAL("editTextChanged(const QString &)")] ),
(self.outputFileEdit, SIGNAL("textChanged(const QString &)")),
(self.targetSRSEdit, SIGNAL("textChanged(const QString &)"), self.targetSRSCheck),
(self.selectTargetSRSButton, None, self.targetSRSCheck),
(self.creationOptionsTable, [SIGNAL("cellValueChanged(int, int)"), SIGNAL("rowRemoved()")], self.creationGroupBox),
(self.outsizeSpin, SIGNAL("valueChanged(const QString &)"), self.outsizeCheck),
(self.nodataSpin, SIGNAL("valueChanged(int)"), self.nodataCheck),
(self.expandCombo, SIGNAL("currentIndexChanged(int)"), self.expandCheck, "1.6.0"),
(self.sdsCheck, SIGNAL("stateChanged(int)")),
(self.srcwinEdit, SIGNAL("textChanged(const QString &)"), self.srcwinCheck),
(self.prjwinEdit, SIGNAL("textChanged(const QString &)"), self.prjwinCheck)
]
)
#self.connect(self.canvas, SIGNAL("layersChanged()"), self.fillInputLayerCombo)
self.connect(self.inputLayerCombo, SIGNAL("currentIndexChanged(int)"), self.fillTargetSRSEditDefault)
self.connect( self.selectInputFileButton, SIGNAL( "clicked()" ), self.fillInputFile )
self.connect(self.selectOutputFileButton, SIGNAL("clicked()"), self.fillOutputFileEdit)
self.connect(self.selectTargetSRSButton, SIGNAL("clicked()"), self.fillTargetSRSEdit)
self.connect( self.batchCheck, SIGNAL( "stateChanged( int )" ), self.switchToolMode )
# add raster filters to combo
self.formatCombo.addItems( Utils.FileFilter.allRastersFilter().split( ";;" ) )
# add layers to combo
self.fillInputLayerCombo()
def switchToolMode( self ):
self.setCommandViewerEnabled( not self.batchCheck.isChecked() )
self.inputLayerCombo.clear()
self.inputLayerCombo.clearEditText()
self.inputLayerCombo.setCurrentIndex(-1)
self.outputFileEdit.clear()
if self.batchCheck.isChecked():
self.inFileLabel = self.label_3.text()
self.outFileLabel = self.label_2.text()
self.label_3.setText( QCoreApplication.translate( "GdalTools", "&Input directory" ) )
self.label_2.setText( QCoreApplication.translate( "GdalTools", "&Output directory" ) )
self.progressBar.show()
self.formatLabel.show()
self.formatCombo.show()
QObject.disconnect( self.selectInputFileButton, SIGNAL( "clicked()" ), self.fillInputFile )
QObject.disconnect( self.selectOutputFileButton, SIGNAL( "clicked()" ), self.fillOutputFileEdit )
QObject.connect( self.selectInputFileButton, SIGNAL( "clicked()" ), self. fillInputDir )
QObject.connect( self.selectOutputFileButton, SIGNAL( "clicked()" ), self.fillOutputDir )
else:
self.label_3.setText( self.inFileLabel )
self.label_2.setText( self.outFileLabel )
self.base.textEditCommand.setEnabled( True )
self.fillInputLayerCombo()
self.progressBar.hide()
self.formatLabel.hide()
self.formatCombo.hide()
QObject.disconnect( self.selectInputFileButton, SIGNAL( "clicked()" ), self.fillInputDir )
QObject.disconnect( self.selectOutputFileButton, SIGNAL( "clicked()" ), self.fillOutputDir )
QObject.connect( self.selectInputFileButton, SIGNAL( "clicked()" ), self.fillInputFile )
QObject.connect( self.selectOutputFileButton, SIGNAL( "clicked()" ), self.fillOutputFileEdit )
def fillInputLayerCombo(self):
self.inputLayerCombo.clear()
( self.layers, names ) = Utils.getRasterLayers()
self.inputLayerCombo.addItems( names )
def fillInputFile( self ):
lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter()
inputFile = Utils.FileDialog.getOpenFileName( self, self.tr( "Select the input file for Translate" ), Utils.FileFilter.allRastersFilter(), lastUsedFilter )
if inputFile.isEmpty():
return
Utils.FileFilter.setLastUsedRasterFilter( lastUsedFilter )
self.inputLayerCombo.setCurrentIndex(-1)
self.inputLayerCombo.setEditText( inputFile )
# get SRS for target file if necessary and possible
self.refreshTargetSRS()
def fillInputDir( self ):
inputDir = Utils.FileDialog.getExistingDirectory( self, self.tr( "Select the input directory with files to Translate" ))
if inputDir.isEmpty():
return
self.inputLayerCombo.setCurrentIndex(-1)
self.inputLayerCombo.setEditText( inputDir )
filter = Utils.getRasterExtensions()
workDir = QDir( inputDir )
workDir.setFilter( QDir.Files | QDir.NoSymLinks | QDir.NoDotAndDotDot )
workDir.setNameFilters( filter )
# search for a valid SRS, then use it as default target SRS
srs = QString()
for fname in workDir.entryList():
fl = inputDir + "/" + fname
srs = Utils.getRasterSRS( self, fl )
if not srs.isEmpty():
break
self.targetSRSEdit.setText( srs )
def fillOutputFileEdit(self):
lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter()
outputFile = Utils.FileDialog.getSaveFileName(self, self.tr( "Select the raster file to save the results to" ), Utils.FileFilter.allRastersFilter(), lastUsedFilter )
if outputFile.isEmpty():
return
Utils.FileFilter.setLastUsedRasterFilter(lastUsedFilter)
self.outputFormat = Utils.fillRasterOutputFormat(lastUsedFilter, outputFile)
self.outputFileEdit.setText(outputFile)
def fillOutputDir( self ):
outputDir = Utils.FileDialog.getExistingDirectory( self, self.tr( "Select the output directory to save the results to" ) )
if outputDir.isEmpty():
return
self.outputFileEdit.setText( outputDir )
def fillTargetSRSEditDefault(self, index):
if index < 0:
return
self.refreshTargetSRS()
def refreshTargetSRS(self):
self.targetSRSEdit.setText( Utils.getRasterSRS( self, self.getInputFileName() ) )
def fillTargetSRSEdit(self):
dialog = SRSDialog( "Select the target SRS" )
if dialog.exec_():
self.targetSRSEdit.setText(dialog.getProjection())
def getArguments(self):
arguments = QStringList()
if self.targetSRSCheck.isChecked() and not self.targetSRSEdit.text().isEmpty():
arguments << "-a_srs"
arguments << self.targetSRSEdit.text()
if self.creationGroupBox.isChecked():
for opt in self.creationOptionsTable.options():
arguments << "-co"
arguments << opt
if self.outsizeCheck.isChecked() and self.outsizeSpin.value() != 100:
arguments << "-outsize"
arguments << self.outsizeSpin.text()
arguments << self.outsizeSpin.text()
if self.expandCheck.isChecked():
arguments << "-expand"
arguments << self.expandCombo.currentText().toLower()
if self.nodataCheck.isChecked():
arguments << "-a_nodata"
arguments << str(self.nodataSpin.value())
if self.sdsCheck.isChecked():
arguments << "-sds"
if self.srcwinCheck.isChecked() and not self.srcwinEdit.text().isEmpty():
#coordList = []
coordList = self.srcwinEdit.text().split( ' ', QString.SkipEmptyParts )
if len(coordList) == 4 and not coordList[3].isEmpty():
try:
for x in coordList:
test = int(x)
except ValueError:
#print "Coordinates must be integer numbers."
QMessageBox.critical(self, self.tr("Translate - srcwin"), self.tr("Image coordinates (pixels) must be integer numbers."))
else:
arguments << "-srcwin"
for x in coordList:
arguments << x
if self.prjwinCheck.isChecked() and not self.prjwinEdit.text().isEmpty():
#coordList = []
coordList = self.prjwinEdit.text().split( ' ', QString.SkipEmptyParts )
if len(coordList) == 4 and not coordList[3].isEmpty():
try:
for x in coordList:
test = float(x)
except ValueError:
#print "Coordinates must be integer numbers."
QMessageBox.critical(self, self.tr("Translate - prjwin"), self.tr("Image coordinates (geographic) must be numbers."))
else:
arguments << "-projwin"
for x in coordList:
arguments << x
if self.isBatchEnabled():
if self.formatCombo.currentIndex() != 0:
arguments << "-of"
arguments << Utils.fillRasterOutputFormat( self.formatCombo.currentText() )
return arguments
else:
return arguments
if not self.outputFileEdit.text().isEmpty():
arguments << "-of"
arguments << self.outputFormat
arguments << self.getInputFileName()
arguments << self.getOutputFileName()
return arguments
def getInputFileName(self):
if self.inputLayerCombo.currentIndex() >= 0:
return self.layers[self.inputLayerCombo.currentIndex()].source()
return self.inputLayerCombo.currentText()
def getOutputFileName(self):
return self.outputFileEdit.text()
def addLayerIntoCanvas(self, fileInfo):
self.iface.addRasterLayer(fileInfo.filePath())
def isBatchEnabled(self):
return self.batchCheck.isChecked()
def setProgressRange(self, maximum):
self.progressBar.setRange(0, maximum)
def updateProgress(self, index, total):
if index < total:
self.progressBar.setValue( index + 1 )
else:
self.progressBar.setValue( 0 )
def batchRun(self):
exts = self.formatCombo.currentText().remove( QRegExp('^.*\(') ).remove( QRegExp('\).*$') ).split( " " )
if not exts.isEmpty() and exts != "*" and exts != "*.*":
outExt = exts[ 0 ].remove( "*" )
else:
outExt = ".tif"
self.base.enableRun( False )
self.base.setCursor( Qt.WaitCursor )
inDir = self.getInputFileName()
outDir = self.getOutputFileName()
filter = Utils.getRasterExtensions()
workDir = QDir( inDir )
workDir.setFilter( QDir.Files | QDir.NoSymLinks | QDir.NoDotAndDotDot )
workDir.setNameFilters( filter )
files = workDir.entryList()
self.inFiles = []
self.outFiles = []
for f in files:
self.inFiles.append( inDir + "/" + f )
if outDir != None:
outFile = f.replace( QRegExp( "\.[a-zA-Z0-9]{2,4}" ), outExt )
self.outFiles.append( outDir + "/" + outFile )
self.errors = QStringList()
self.batchIndex = 0
self.batchTotal = len( self.inFiles )
self.setProgressRange( self.batchTotal )
self.runItem( self.batchIndex, self.batchTotal )
| gpl-2.0 | 8,092,497,440,117,212,000 | 39.577703 | 171 | 0.650737 | false |
kvirund/codingame | medium/network.cabling/solution.py | 1 | 1412 | #!/usr/bin/python
# Author: Anton Gorev aka Veei
# Date: 2014-11-06
import sys
n = int(raw_input())
ys = []
sum = 0
first = True
minx = maxx = 0
miny = maxy = 0
for i in xrange(n):
b = [int(a) for a in raw_input().split(" ")]
ys += [b[1]]
sum += b[1]
if first or minx > b[0]:
minx = b[0]
if first or maxx < b[0]:
maxx = b[0]
if first or miny > b[1]:
miny = b[1]
if first or maxy < b[1]:
maxy = b[1]
first = False
def length(ys, y):
return reduce(lambda a, b: a + abs(b - y), ys, 0)
result = y = miny
lmin = length(ys, miny)
lmax = length(ys, maxy)
while miny != maxy:
print >> sys.stderr, miny, maxy
if 1 == maxy - miny:
if lmin < lmax:
maxy = miny
else:
miny = maxy
break
midy = (maxy + miny)/2
lmid = length(ys, midy)
if lmid < lmin and lmid < lmax:
nl = length(ys, midy + 1)
if nl > lmid:
maxy = midy
lmax = lmid
else:
miny = midy
lmin = lmid
elif lmid < lmin and lmid >= lmax:
miny = midy
lmin = lmid
elif lmid >= lmin and lmid < lmax:
lmax = lmid
maxy = midy
else:
print >> sys.stderr, "Broken logic", lmin, lmid, lmax, miny, midy, miny
break
print >> sys.stderr, miny, length(ys, miny)
print length(ys, miny) + maxx - minx
| mit | 5,430,850,358,133,231,000 | 21.0625 | 79 | 0.5 | false |
e02d96ec16/CumulusCI | cumulusci/tasks/release_notes/tests/test_generator.py | 1 | 11463 | # coding=utf-8
import datetime
import httplib
import json
import os
import unittest
from github3 import login
import responses
from cumulusci.tasks.release_notes.generator import BaseReleaseNotesGenerator
from cumulusci.tasks.release_notes.generator import StaticReleaseNotesGenerator
from cumulusci.tasks.release_notes.generator import DirectoryReleaseNotesGenerator
from cumulusci.tasks.release_notes.generator import GithubReleaseNotesGenerator
from cumulusci.tasks.release_notes.parser import BaseChangeNotesParser
from cumulusci.tasks.release_notes.tests.util_github_api import GithubApiTestMixin
from cumulusci.tasks.release_notes.tests.utils import MockUtil
__location__ = os.path.split(os.path.realpath(__file__))[0]
PARSER_CONFIG = [
{
'class_path': 'cumulusci.tasks.release_notes.parser.GithubLinesParser',
'title': 'Critical Changes',
},
{
'class_path': 'cumulusci.tasks.release_notes.parser.GithubLinesParser',
'title': 'Changes',
},
{
'class_path': 'cumulusci.tasks.release_notes.parser.GithubIssuesParser',
'title': 'Issues Closed',
},
]
class DummyParser(BaseChangeNotesParser):
def parse(self, change_note):
pass
def _render(self):
return 'dummy parser output'.format(self.title)
class TestBaseReleaseNotesGenerator(unittest.TestCase):
def test_render_no_parsers(self):
release_notes = BaseReleaseNotesGenerator()
content = release_notes.render()
self.assertEqual(content, '')
def test_render_dummy_parsers(self):
release_notes = BaseReleaseNotesGenerator()
release_notes.parsers.append(DummyParser('Dummy 1'))
release_notes.parsers.append(DummyParser('Dummy 2'))
expected = u'# Dummy 1\r\n\r\ndummy parser output\r\n\r\n' +\
u'# Dummy 2\r\n\r\ndummy parser output'
self.assertEqual(release_notes.render(), expected)
class TestStaticReleaseNotesGenerator(unittest.TestCase):
def test_init_parser(self):
release_notes = StaticReleaseNotesGenerator([])
assert len(release_notes.parsers) == 3
class TestDirectoryReleaseNotesGenerator(unittest.TestCase):
def test_init_parser(self):
release_notes = DirectoryReleaseNotesGenerator('change_notes')
assert len(release_notes.parsers) == 3
def test_full_content(self):
change_notes_dir = os.path.join(
__location__,
'change_notes',
'full',
)
release_notes = DirectoryReleaseNotesGenerator(
change_notes_dir,
)
content = release_notes()
expected = "# Critical Changes\r\n\r\n* This will break everything!\r\n\r\n# Changes\r\n\r\nHere's something I did. It was really cool\r\nOh yeah I did something else too!\r\n\r\n# Issues Closed\r\n\r\n#2345\r\n#6236"
print(expected)
print('-------------------------------------')
print(content)
self.assertEqual(content, expected)
class TestGithubReleaseNotesGenerator(unittest.TestCase, GithubApiTestMixin):
def setUp(self):
self.current_tag = 'prod/1.4'
self.last_tag = 'prod/1.3'
self.github_info = {
'github_owner': 'TestOwner',
'github_repo': 'TestRepo',
'github_username': 'TestUser',
'github_password': 'TestPass',
}
self.gh = login('TestUser', 'TestPass')
self.mock_util = MockUtil('TestOwner', 'TestRepo')
@responses.activate
def test_init_without_last_tag(self):
github_info = self.github_info.copy()
self.mock_util.mock_get_repo()
generator = GithubReleaseNotesGenerator(
self.gh,
github_info,
PARSER_CONFIG,
self.current_tag,
)
self.assertEqual(generator.github_info, github_info)
self.assertEqual(generator.current_tag, self.current_tag)
self.assertEqual(generator.last_tag, None)
self.assertEqual(generator.change_notes.current_tag, self.current_tag)
self.assertEqual(generator.change_notes._last_tag, None)
@responses.activate
def test_init_with_last_tag(self):
github_info = self.github_info.copy()
self.mock_util.mock_get_repo()
generator = GithubReleaseNotesGenerator(
self.gh,
github_info,
PARSER_CONFIG,
self.current_tag,
self.last_tag,
)
self.assertEqual(generator.github_info, github_info)
self.assertEqual(generator.current_tag, self.current_tag)
self.assertEqual(generator.last_tag, self.last_tag)
self.assertEqual(generator.change_notes.current_tag, self.current_tag)
self.assertEqual(generator.change_notes._last_tag, self.last_tag)
class TestPublishingGithubReleaseNotesGenerator(unittest.TestCase, GithubApiTestMixin):
def setUp(self):
self.init_github()
self.github_info = {
'github_owner': 'TestOwner',
'github_repo': 'TestRepo',
'github_username': 'TestUser',
'github_password': 'TestPass',
'master_branch': 'master',
}
self.gh = login('TestUser', 'TestPass')
self.mock_util = MockUtil('TestOwner', 'TestRepo')
@responses.activate
def test_publish_update_unicode(self):
tag = 'prod/1.4'
note = u'“Unicode quotes”'
expected_release_body = u'# Changes\r\n\r\n{}'.format(note)
# mock GitHub API responses
self.mock_util.mock_get_repo()
# create generator instance
generator = self._create_generator(tag)
# inject content into Changes parser
generator.parsers[1].content.append(note)
# render content
content = generator.render()
# verify
self.assertEqual(len(responses.calls._calls), 1)
self.assertEqual(content, expected_release_body)
@responses.activate
def test_publish_update_no_body(self):
tag = 'prod/1.4'
expected_release_body = '# Changes\r\n\r\nfoo'
# mock GitHub API responses
self.mock_util.mock_get_repo()
# create generator
generator = self._create_generator(tag)
# inject content into Changes parser
generator.parsers[1].content.append('foo')
# render content
content = generator.render()
# verify
self.assertEqual(len(responses.calls._calls), 1)
self.assertEqual(content, expected_release_body)
@responses.activate
def test_publish_update_content_before(self):
tag = 'prod/1.4'
expected_release_body = 'foo\r\n# Changes\r\n\r\nbaz'
# mock GitHub API responses
self.mock_util.mock_get_repo()
self.mock_util.mock_list_releases(tag=tag, body='foo\n# Changes\nbar')
# create generator
generator = self._create_generator(tag)
# inject content into parser
generator.parsers[1].content.append('baz')
# render and update content
content = generator.render()
release = generator._get_release()
content = generator._update_release_content(release, content)
# verify
self.assertEqual(len(responses.calls._calls), 3)
self.assertEqual(content, expected_release_body)
@responses.activate
def test_publish_update_content_after(self):
tag = 'prod/1.4'
expected_release_body = '# Changes\r\n\r\nbaz\r\n\r\n# Foo\r\nfoo'
# mock GitHub API responses
self.mock_util.mock_get_repo()
self.mock_util.mock_list_releases(
tag=tag,
body='# Changes\nbar\n# Foo\nfoo',
)
# create generator
generator = self._create_generator(tag)
# inject content into parser
generator.parsers[1].content.append('baz')
# render and update content
content = generator.render()
release = generator._get_release()
content = generator._update_release_content(release, content)
# verify
self.assertEqual(len(responses.calls._calls), 3)
self.assertEqual(content, expected_release_body)
@responses.activate
def test_publish_update_content_before_and_after(self):
tag = 'prod/1.4'
expected_release_body = (
'foo\r\n# Changes\r\n\r\nbaz\r\n\r\n# Foo\r\nfoo'
)
# mock GitHub API responses
self.mock_util.mock_get_repo()
self.mock_util.mock_list_releases(
tag=tag,
body='foo\n# Changes\nbar\n# Foo\nfoo',
)
# create generator
generator = self._create_generator(tag)
# inject content into parser
generator.parsers[1].content.append('baz')
# render and update content
content = generator.render()
release = generator._get_release()
content = generator._update_release_content(release, content)
# verify
self.assertEqual(len(responses.calls._calls), 3)
self.assertEqual(content, expected_release_body)
@responses.activate
def test_publish_update_content_between(self):
tag = 'prod/1.4'
expected_release_body = (
'# Critical Changes\r\n\r\nfaz\r\n\r\n'
'# Foo\r\nfoo\r\n# Changes\r\n\r\nfiz'
)
# mock GitHub API responses
self.mock_util.mock_get_repo()
self.mock_util.mock_list_releases(
tag=tag,
body='# Critical Changes\nbar\n# Foo\nfoo\n# Changes\nbiz',
)
# create generator
generator = self._create_generator(tag)
# inject content into parser
generator.parsers[0].content.append('faz')
generator.parsers[1].content.append('fiz')
# render and update content
content = generator.render()
release = generator._get_release()
content = generator._update_release_content(release, content)
# verify
self.assertEqual(len(responses.calls._calls), 3)
self.assertEqual(content, expected_release_body)
@responses.activate
def test_publish_update_content_before_after_and_between(self):
tag = 'prod/1.4'
expected_release_body = (
'goo\r\n# Critical Changes\r\n\r\nfaz\r\n\r\n'
'# Foo\r\nfoo\r\n# Changes\r\n\r\nfiz\r\n\r\n# Zoo\r\nzoo'
)
# mock GitHub API responses
self.mock_util.mock_get_repo()
self.mock_util.mock_list_releases(
tag=tag,
body=(
'goo\n# Critical Changes\nbar\n'
'# Foo\nfoo\n# Changes\nbiz\n# Zoo\nzoo'
),
)
# create generator
generator = self._create_generator(tag)
# inject content into parser
generator.parsers[0].content.append('faz')
generator.parsers[1].content.append('fiz')
# render and update content
content = generator.render()
release = generator._get_release()
content = generator._update_release_content(release, content)
# verify
self.assertEqual(len(responses.calls._calls), 3)
self.assertEqual(content, expected_release_body)
def _create_generator(self, current_tag, last_tag=None):
generator = GithubReleaseNotesGenerator(
self.gh,
self.github_info.copy(),
PARSER_CONFIG,
current_tag,
last_tag,
)
return generator
| bsd-3-clause | 3,159,689,252,933,058,000 | 35.148265 | 225 | 0.623702 | false |
infobloxopen/infoblox-netmri | infoblox_netmri/api/remote/models/changed_port_net_explorer_inv_summary_grid_remote.py | 1 | 2979 | from ..remote import RemoteModel
class ChangedPortNetExplorerInvSummaryGridRemote(RemoteModel):
"""
| ``DeviceIPDotted:`` none
| ``attribute type:`` string
| ``DeviceIPNumeric:`` none
| ``attribute type:`` string
| ``VirtualNetworkID:`` none
| ``attribute type:`` string
| ``Network:`` none
| ``attribute type:`` string
| ``DeviceName:`` none
| ``attribute type:`` string
| ``DeviceType:`` none
| ``attribute type:`` string
| ``DeviceAssurance:`` none
| ``attribute type:`` string
| ``DeviceID:`` none
| ``attribute type:`` string
| ``InterfaceID:`` none
| ``attribute type:`` string
| ``ifName:`` none
| ``attribute type:`` string
| ``ifNameSort:`` none
| ``attribute type:`` string
| ``VirtualNetworkMemberName:`` none
| ``attribute type:`` string
| ``ifIPDotted:`` none
| ``attribute type:`` string
| ``ifIPNumeric:`` none
| ``attribute type:`` string
| ``ifType:`` none
| ``attribute type:`` string
| ``VlanIndex:`` none
| ``attribute type:`` string
| ``RootBridgeAddress:`` none
| ``attribute type:`` string
| ``VlanName:`` none
| ``attribute type:`` string
| ``VlanID:`` none
| ``attribute type:`` string
| ``RootVlanMemberID:`` none
| ``attribute type:`` string
| ``ifTrunkStatus:`` none
| ``attribute type:`` string
| ``ifSpeed:`` none
| ``attribute type:`` string
| ``State:`` none
| ``attribute type:`` string
| ``TimeSinceLastChange:`` none
| ``attribute type:`` string
| ``ifLastChange:`` none
| ``attribute type:`` string
| ``ifOperStatus:`` none
| ``attribute type:`` string
| ``ifAdminStatus:`` none
| ``attribute type:`` string
| ``ifPortControlInd:`` none
| ``attribute type:`` string
| ``ifIndex:`` none
| ``attribute type:`` string
"""
properties = ("DeviceIPDotted",
"DeviceIPNumeric",
"VirtualNetworkID",
"Network",
"DeviceName",
"DeviceType",
"DeviceAssurance",
"DeviceID",
"InterfaceID",
"ifName",
"ifNameSort",
"VirtualNetworkMemberName",
"ifIPDotted",
"ifIPNumeric",
"ifType",
"VlanIndex",
"RootBridgeAddress",
"VlanName",
"VlanID",
"RootVlanMemberID",
"ifTrunkStatus",
"ifSpeed",
"State",
"TimeSinceLastChange",
"ifLastChange",
"ifOperStatus",
"ifAdminStatus",
"ifPortControlInd",
"ifIndex",
)
| apache-2.0 | -6,505,330,156,821,366,000 | 22.456693 | 62 | 0.479355 | false |
heldergg/labs | lib/hc/draw.py | 1 | 8228 | # -*- coding: utf-8 -*-
'''
This module produces SVG files with hemicycles representations.
'''
##
# Imports
##
from pysvg.structure import svg, g, defs, use, title
from pysvg.builders import TransformBuilder, ShapeBuilder
from pysvg.shape import path
from pysvg.style import style
from math import sin, cos, pi, floor
import os.path
from chairs import Hemicycle
##
# Config
##
SVGBASE = '/home/helder/prg/hc/hc/share/hc'
TRANSX = 0
TRANSY = -50
##
# Exceptions
##
class SVGError(Exception):
pass
##
# Utils
##
def degrees(angle):
'''Converts radians to degrees'''
return angle * 180 / pi
##
# SGV
##
class HemicycleSGV(object):
'''
This class creates svg representations of hemicycles.
'''
def __init__(self, hc, parties=None):
'''
hc - hemicycle object
parties - list with the following structure:
[ { 'initials': '<legend name>',
'result': <number of chairs>,
'image': '<svg filename>,
'color': <foreground color>,
'background': <background color>
}, ...
]
'''
self.hc = hc
self.parties = parties
self.chairs = []
# Check if the number of chairs in the results matches the
# calculated hemicycle number of chairs.
nchairs = sum([party['result'] for party in parties])
if nchairs != hc.nchairs:
raise SVGError(
'Results chair number don\'t match the hemicycle size.')
def chair_dist(self):
'''Chair distribution on the hemicycle'''
def smallest(parties, first_row):
'''Returns the number of chairs for the smalest party in parties'''
remaining = (sum([party['result'] for party in parties]) -
sum([sum(party['seats']) for party in parties]))
smallest_party = parties[0]
dist_seats = sum(smallest_party['seats'])
remaining_seats = smallest_party['result'] - dist_seats
percent = float(remaining_seats) / remaining
nc = int(floor(percent * first_row))
if sum(smallest_party['seats']) == smallest_party['result']:
return 0
return 1 if not nc else nc
def fill_row(parties, seats):
parties.sort(key=lambda party: party['result'])
# Find how many seats we have for each party on this row
for i in range(len(parties)):
party = parties[i]
party_row_seats = smallest(parties[i:], seats)
party['seats'].append(party_row_seats)
seats -= party_row_seats
parties = self.parties
for party in parties:
party['seats'] = []
hc = [row['nchairs'] for row in self.hc.rows()]
for row in hc:
fill_row(parties, row)
parties.sort(key=lambda party: party['order'])
# Create an hemicicle matrix, each row is empty, we'll fill the
# rows afterwards
chairs = []
for i in range(self.hc.nrows):
row = []
for j in range(len(parties)):
party = parties[j]
for seat in range(party['seats'][i]):
row.append(j)
chairs.append(row)
self.chairs = chairs
def svg_dimention(self):
# The SVG coord system origin is on the lower left:
height = self.hc.outer_radius()
width = self.hc.outer_radius() * 2
return width, height
def chair_svg(self, row, column, id_attr):
angle, x, y = self.hc.chair_location(row, column)
width, height = self.svg_dimention()
# This '30' is half the size of the svg chair, should be configured
x = x + width / 2 - 30 * cos(pi / 2 - angle) + TRANSX
y = height - y - 30 * sin(pi / 2 - angle) + TRANSY
# Chair translation and rotation parametrization
th = TransformBuilder()
th.setRotation('%f' % (90 - degrees(angle)))
th.setTranslation('%f,%f' % (x, y))
u = use()
u._attributes['xlink:href'] = '#%s' % id_attr
u.set_transform(th.getTransform())
return u
def chair(self, id_attr, color_1, color_2):
head = ShapeBuilder().createCircle(30, 25, 20, stroke='black', strokewidth=5.0, fill=color_1)
head.set_class('head')
body = path(pathData="M 19.264266,38.267870 C 12.892238,41.659428 9.0221978,48.396703 6.6126745,55.405840 L 51.476471,55.405840 C 49.270169,48.545436 45.682644,41.911786 39.811885,38.267870 C 33.901416,38.010889 26.459633,38.267870 19.264266,38.267870 z ")
body.set_style('stroke-width:5.0;stroke:black;fill:%s;' % color_2)
body.set_class('body')
th = TransformBuilder()
th.setScaling('0.8', '0.8')
group = g()
group.addElement(body)
group.addElement(head)
group.set_id(id_attr)
group.set_transform(th.getTransform())
return group
def defs(self):
d = defs()
for party in self.parties:
d.addElement(self.chair(party['initials'], party['color_1'], party['color_2']))
return d
def svg(self):
if not self.chairs:
raise SVGError('You need to calculate the chair distribution.')
width, height = self.svg_dimention()
# SVG doc
s = svg(height="100%", width="100%")
s.set_viewBox("0 0 %d %d" % (width, height))
t = title()
t.appendTextContent('Parlamento')
s.addElement(t)
# Create the party groups
groups = {}
for i in range(len(self.parties)):
party = self.parties[i]
groups[i] = g()
# groups[i].set_fill(party['color'])
groups[i].set_id('%s_group' % party['initials'])
t = title()
t.appendTextContent('Grupo Parlamentar do %s' % party['initials'])
groups[i].addElement(t)
# Add the chair shape definition
s.addElement(self.defs())
# Distribute the chairs
for row in range(len(self.chairs)):
for col in range(len(self.chairs[row])):
angle, x, y = self.hc.chair_location(row, col)
x = x + width / 2
y = height - y
groups[self.chairs[row][col]].addElement(self.chair_svg(
row, col, self.parties[self.chairs[row][col]]['initials']))
# Insert the party groups into the svg
for i in range(len(self.parties)):
s.addElement(groups[i])
return s.getXML()
if __name__ == '__main__':
# Vote count
parties = [{'initials': 'BE', 'order': 0, 'result': 8, 'image': 'cadeira-BE.svg'},
{'initials': 'CDU', 'order': 1, 'result': 16, 'image': 'cadeira-CDU.svg'},
{'initials': 'PS', 'order': 2, 'result': 74, 'image': 'cadeira-PS.svg'},
{'initials': 'PSD', 'order': 3, 'result': 108, 'image': 'cadeira-PSD.svg'},
{'initials': 'CDS', 'order': 4, 'result': 24, 'image': 'cadeira-CDS.svg'},
]
parties = [
{'name': 'Bloco de Esquerda', 'initials': 'BE',
'order': 0, 'result': 7, 'color_1': 'purple', 'color_2': 'red'},
{'name': 'Coligação Democratica Unitária', 'initials': 'CDU',
'order': 1, 'result': 16, 'color_1': 'red', 'color_2': 'yellow'},
{'name': 'Partido Socialista', 'initials': 'PS',
'order': 2, 'result': 74, 'color_1': 'pink', 'color_2': 'pink'},
{'name': 'Partido Social Democrata', 'initials': 'PSD',
'order': 3, 'result': 109, 'color_1': 'orange', 'color_2': 'orange'},
{'name': 'Centro Democrático Social', 'initials': 'CDS',
'order': 4, 'result': 24, 'color_1': 'blue', 'color_2': 'white'},
]
# Create the hemicycle
hc = Hemicycle(chair_width=60,
chair_height=60,
nchairs=230,
nrows=8,
hangle=(4 / 3) * pi)
# Graphical representation of the hemicycle
hc_svg = HemicycleSGV(hc, parties)
hc_svg.chair_dist()
print hc_svg.svg()
| gpl-3.0 | -4,524,365,662,919,559,700 | 30.389313 | 264 | 0.544504 | false |
DjangoUnchained-CRUD/python | tutorial/tutorial/tutorial/urls.py | 1 | 1193 | """tutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from rest_framework import routers
from quickstart import views
from django.contrib import admin
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'groups', views.GroupViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| mit | -8,288,057,378,176,101,000 | 36.28125 | 82 | 0.720872 | false |
bburan/psiexperiment | psi/data/io/summarize_abr.py | 1 | 11625 | import argparse
from glob import glob
import os.path
import numpy as np
import pandas as pd
from psi.data.io import abr
columns = ['frequency', 'level', 'polarity']
def process_folder(folder, filter_settings=None):
glob_pattern = os.path.join(folder, '*abr*')
filenames = glob(glob_pattern)
process_files(filenames, filter_settings=filter_settings)
def process_files(filenames, offset=-0.001, duration=0.01,
filter_settings=None, reprocess=False):
for filename in filenames:
try:
processed = process_file(filename, offset, duration,
filter_settings, reprocess)
if processed:
print(f'\nProcessed {filename}\n')
else:
print('*', end='', flush=True)
except Exception as e:
raise
print(f'\nError processing {filename}\n{e}\n')
def _get_file_template(fh, offset, duration, filter_settings, suffix=None):
base_string = f'ABR {offset*1e3:.1f}ms to {(offset+duration)*1e3:.1f}ms'
if filter_settings == 'saved':
settings = _get_filter(fh)
if not settings['digital_filter']:
filter_string = None
else:
lb = settings['lb']
ub = settings['ub']
filter_string = f'{lb:.0f}Hz to {ub:.0f}Hz filter'
elif filter_settings is None:
filter_string = None
else:
lb = filter_settings['lb']
ub = filter_settings['ub']
filter_string = f'{lb:.0f}Hz to {ub:.0f}Hz filter'
order = filter_settings['order']
if order != 1:
filter_string = f'{order:.0f} order {filter_string}'
if filter_string is None:
file_string = f'{base_string}'
else:
file_string = f'{base_string} with {filter_string}'
if suffix is not None:
file_string = f'{file_string} {suffix}'
print(file_string)
return f'{file_string} {{}}.csv'
def _get_filter(fh):
if not isinstance(fh, (abr.ABRFile, abr.ABRSupersetFile)):
fh = abr.load(fh)
return {
'digital_filter': fh.get_setting_default('digital_filter', True),
'lb': fh.get_setting_default('digital_highpass', 300),
'ub': fh.get_setting_default('digital_lowpass', 3000),
# Filter order is not currently an option in the psiexperiment ABR
# program so it defaults to 1.
'order': 1,
}
def _get_epochs(fh, offset, duration, filter_settings, reject_ratio=None):
# We need to do the rejects in this code so that we can obtain the
# information for generating the CSV files. Set reject_threshold to np.inf
# to ensure that nothing gets rejected.
kwargs = {'offset': offset, 'duration': duration, 'columns': columns,
'reject_threshold': np.inf}
if filter_settings is None:
return fh.get_epochs(**kwargs)
if filter_settings == 'saved':
settings = _get_filter(fh)
if not settings['digital_filter']:
return fh.get_epochs(**kwargs)
lb = settings['lb']
ub = settings['ub']
order = settings['order']
kwargs.update({'filter_lb': lb, 'filter_ub': ub, 'filter_order': order})
return fh.get_epochs_filtered(**kwargs)
lb = filter_settings['lb']
ub = filter_settings['ub']
order = filter_settings['order']
kwargs.update({'filter_lb': lb, 'filter_ub': ub, 'filter_order': order})
return fh.get_epochs_filtered(**kwargs)
def _match_epochs(*epochs):
def _match_n(df):
grouping = df.groupby(['dataset', 'polarity'])
n = grouping.size().unstack()
if len(n) < 2:
return None
n = n.values.ravel().min()
return pd.concat([g.iloc[:n] for _, g in grouping])
epochs = pd.concat(epochs, keys=range(len(epochs)), names=['dataset'])
matched = epochs.groupby(['frequency', 'level']).apply(_match_n)
return [d.reset_index('dataset', drop=True) for _, d in \
matched.groupby('dataset', group_keys=False)]
def is_processed(filename, offset, duration, filter_settings, suffix=None):
t = _get_file_template(filename, offset, duration, filter_settings, suffix)
file_template = os.path.join(filename, t)
raw_epoch_file = file_template.format('individual waveforms')
mean_epoch_file = file_template.format('average waveforms')
n_epoch_file = file_template.format('number of epochs')
return os.path.exists(raw_epoch_file) and \
os.path.exists(mean_epoch_file) and \
os.path.exists(n_epoch_file)
def process_files_matched(filenames, offset, duration, filter_settings,
reprocess=True, suffix=None):
epochs = []
for filename in filenames:
fh = abr.load(filename)
if len(fh.erp_metadata) == 0:
raise IOError('No data in file')
e = _get_epochs(fh, offset, duration, filter_settings)
epochs.append(e)
epochs = _match_epochs(*epochs)
for filename, e in zip(filenames, epochs):
# Generate the filenames
t = _get_file_template(fh, offset, duration, filter_settings, suffix)
file_template = os.path.join(filename, t)
raw_epoch_file = file_template.format('individual waveforms')
mean_epoch_file = file_template.format('average waveforms')
n_epoch_file = file_template.format('number of epochs')
# Check to see if all of them exist before reprocessing
if not reprocess and \
(os.path.exists(raw_epoch_file) and \
os.path.exists(mean_epoch_file) and \
os.path.exists(n_epoch_file)):
continue
epoch_n = e.groupby(columns[:-1]).size()
epoch_mean = e.groupby(columns).mean().groupby(columns[:-1]).mean()
# Write the data to CSV files
epoch_n.to_csv(n_epoch_file, header=True)
epoch_mean.columns.name = 'time'
epoch_mean.T.to_csv(mean_epoch_file)
e.columns.name = 'time'
e.T.to_csv(raw_epoch_file)
def process_file(filename, offset, duration, filter_settings, reprocess=False,
n_epochs='auto', suffix=None):
'''
Extract ABR epochs, filter and save result to CSV files
Parameters
----------
filename : path
Path to ABR experiment. If it's a set of ABR experiments, epochs across
all experiments will be combined for the analysis.
offset : sec
The start of the epoch to extract, in seconds, relative to tone pip
onset. Negative values can be used to extract a prestimulus baseline.
duration: sec
The duration of the epoch to extract, in seconds, relative to the
offset. If offset is set to -0.001 sec and duration is set to 0.01 sec,
then the epoch will be extracted from -0.001 to 0.009 sec re tone pip
onset.
filter_settings : {None, 'saved', dict}
If None, no additional filtering is done. If 'saved', uses the digital
filter settings that were saved in the ABR file. If a dictionary, must
contain 'lb' (the lower bound of the passband in Hz) and 'ub' (the
upper bound of the passband in Hz).
reprocess : bool
If True, reprocess the file even if it already has been processed for
the specified filter settings.
n_epochs : {None, 'auto', int, dict}
If None, all epochs will be used. If 'auto', use the value defined at
acquisition time. If integer, will limit the number of epochs per
frequency and level to this number. If dict, the key must be a tuple of
(frequency, level) and the value will indicate the number of epochs to
use.
suffix : {None, str}
Suffix to use when creating save filenames.
'''
fh = abr.load(filename)
if len(fh.erp_metadata) == 0:
raise IOError('No data in file')
# Generate the filenames
t = _get_file_template(fh, offset, duration, filter_settings, suffix)
file_template = os.path.join(filename, t)
raw_epoch_file = file_template.format('individual waveforms')
mean_epoch_file = file_template.format('average waveforms')
n_epoch_file = file_template.format('number of epochs')
reject_ratio_file = file_template.format('reject ratio')
# Check to see if all of them exist before reprocessing
if not reprocess and \
(os.path.exists(raw_epoch_file) and \
os.path.exists(mean_epoch_file) and \
os.path.exists(n_epoch_file) and \
os.path.exists(reject_ratio_file)):
return False
# Load the epochs
epochs = _get_epochs(fh, offset, duration, filter_settings)
# Apply the reject
reject_threshold = fh.get_setting('reject_threshold')
m = np.abs(epochs) < reject_threshold
m = m.all(axis=1)
epochs = epochs.loc[m]
if n_epochs is not None:
if n_epochs == 'auto':
n_epochs = fh.get_setting('averages')
n = int(np.floor(n_epochs / 2))
epochs = epochs.groupby(columns) \
.apply(lambda x: x.iloc[:n])
epoch_reject_ratio = 1-m.groupby(columns[:-1]).mean()
epoch_mean = epochs.groupby(columns).mean() \
.groupby(columns[:-1]).mean()
# Write the data to CSV files
epoch_reject_ratio.name = 'epoch_reject_ratio'
epoch_reject_ratio.to_csv(reject_ratio_file, header=True)
epoch_reject_ratio.name = 'epoch_n'
epoch_n = epochs.groupby(columns[:-1]).size()
epoch_n.to_csv(n_epoch_file, header=True)
epoch_mean.columns.name = 'time'
epoch_mean.T.to_csv(mean_epoch_file)
epochs.columns.name = 'time'
epochs.T.to_csv(raw_epoch_file)
return True
def main_auto():
parser = argparse.ArgumentParser('Filter and summarize ABR files in folder')
parser.add_argument('folder', type=str, help='Folder containing ABR data')
args = parser.parse_args()
process_folder(args.folder, filter_settings='saved')
def main():
parser = argparse.ArgumentParser('Filter and summarize ABR data')
parser.add_argument('filenames', type=str,
help='Filename', nargs='+')
parser.add_argument('--offset', type=float,
help='Epoch offset',
default=-0.001)
parser.add_argument('--duration', type=float,
help='Epoch duration',
default=0.01)
parser.add_argument('--filter-lb', type=float,
help='Highpass filter cutoff',
default=None)
parser.add_argument('--filter-ub', type=float,
help='Lowpass filter cutoff',
default=None)
parser.add_argument('--order', type=float,
help='Filter order',
default=None)
parser.add_argument('--reprocess',
help='Redo existing results',
action='store_true')
args = parser.parse_args()
if args.filter_lb is not None or args.filter_ub is not None:
filter_settings = {
'lb': args.filter_lb,
'ub': args.filter_ub,
'order': args.order,
}
else:
filter_settings = None
process_files(args.filenames, args.offset, args.duration, filter_settings,
args.reprocess)
def main_gui():
import enaml
from enaml.qt.qt_application import QtApplication
with enaml.imports():
from .summarize_abr_gui import SummarizeABRGui
app = QtApplication()
view = SummarizeABRGui()
view.show()
app.start()
if __name__ == '__main__':
main_gui()
| mit | -145,646,216,243,829,300 | 36.140575 | 80 | 0.608516 | false |
shootsoft/practice | lintcode/NineChapters/06/add-two-numbers.py | 1 | 1272 | __author__ = 'yinjun'
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param l1: the first list
# @param l2: the second list
# @return: the sum list of l1 and l2
def addLists(self, l1, l2):
# write your code here
h = ListNode(0)
l = h
add = 0
while l1!=None and l2!=None:
l.next = ListNode(l1.val + l2.val + add)
if l.next.val >= 10:
add = 1
l.next.val -=10
else:
add = 0
l = l.next
l1 = l1.next
l2 = l2.next
while l1 != None:
l.next = ListNode(l1.val + add)
if l.next.val >= 10:
add = 1
l.next.val -= 10
else:
add = 0
l = l.next
l1 = l1.next
while l2 != None:
l.next = ListNode(l2.val + add)
if l.next.val >= 10:
add = 1
l.next.val -= 10
else:
add = 0
l = l.next
l2 = l2.next
if add > 0:
l.next = ListNode(add)
return h.next
| apache-2.0 | -7,880,751,064,825,004,000 | 23.461538 | 52 | 0.400943 | false |
GoogleCloudPlatform/cloud-ops-sandbox | src/loadgenerator/sre_recipe_utils.py | 1 | 6473 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""
This module contains code for intergrating SRE Recipes with LoadGen
"""
import time
import gevent
from flask import request
from flask import jsonify
from flask import make_response
from functools import wraps
from locust.env import Environment
from locust_tasks import get_sre_recipe_user_class
def return_as_json_response(fn):
"""
Python helper decorator for returning status code and JSON responses from
a Flask request handler.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
try:
body = fn(*args, **kwargs)
resp = make_response(jsonify(body), 200)
resp.headers["Content-Type"] = 'application/json'
return resp
except LookupError as e:
resp = make_response(jsonify({"err": str(e)}), 404)
resp.headers["Content-Type"] = 'application/json'
return resp
except ValueError as e:
resp = make_response(jsonify({"err": str(e)}), 400)
resp.headers["Content-Type"] = 'application/json'
return resp
except Exception as e:
resp = make_response(jsonify({"err": str(e)}), 500)
resp.headers["Content-Type"] = 'application/json'
return resp
return wrapper
def init_sre_recipe_api(env):
"""
Attach custom Flask request handlers to a locust environment's flask app
"""
if env and env.web_ui:
@env.web_ui.app.route("/api/ping")
@return_as_json_response
def ping():
return {"msg": "pong"}
@env.web_ui.app.route("/api/user_count")
@return_as_json_response
def user_count():
"""
Return the number of total users spawend for load generation.
Response:
- user_count: int
"""
return {"user_count": env.runner.user_count}
@env.web_ui.app.route("/api/spawn/<user_identifier>", methods=['POST'])
@return_as_json_response
def spawn_by_user_identifier(user_identifier=None):
"""
Spawn a number of users with the SRE Recipe user identifer.
Form Paramters:
- user_count: Required. The total number of users to spawn
- spawn_rate: Required. The spawn rate for the users.
- stop_after: Optional. If specified, run the load generation only
for the given number of seconds.
Response:
On success, returns status code 200 and an acknowledgement 'msg'
On error, returns status code 400 for invalid arguments, and 404
if load pattern for 'user_identifier' is not found, as well as an
'err' message.
"""
# Required Query Parameters
user_count = request.form.get("user_count", default=None, type=int)
spawn_rate = request.form.get("spawn_rate", default=None, type=int)
# The function returns None, if user_identifier is not found
user_class = get_sre_recipe_user_class(user_identifier)
if user_count is None:
raise ValueError(f"Must specify a valid, non-empty, integer value for query parameter 'user_count': {request.form.get('user_count', default=None)}")
elif spawn_rate is None:
raise ValueError(f"Must specify a valid, non-empty, integer value for query parameter 'spawn_rate': {request.form.get('spawn_rate', default=None)}")
elif user_count <= 0:
raise ValueError(f"Query parameter 'user_count' must be positive: {user_count}")
elif spawn_rate <= 0:
raise ValueError(f"Query parameter 'spawn_rate' must be positive: {spawn_rate}")
elif user_class is None:
raise LookupError(f"Cannot find SRE Recipe Load for: {user_identifier}")
# Optional Query Parameters
stop_after = request.form.get("stop_after", default=None, type=int)
if stop_after is not None and stop_after <= 0:
raise ValueError(f"Query parameter 'stop_after' must be positive: {stop_after}")
elif stop_after is None and "stop_after" in request.form:
raise ValueError(f"stop_after must be valid integer value: {request.form['stop_after']}")
# We currently only support running one SRE Recipe load each time
# for implementation simplicity.
if env.runner.user_count > 0:
env.runner.quit() # stop existing load generating users, if any
env.user_classes = [user_class] # replace with the new users
def spawn_when_all_users_stopped():
# Wait at most 10 seconds until all existing users are stopped, then
# start generating new load with the new user types
tries = 0
while tries < 10:
if env.runner.user_count == 0:
env.runner.start(user_count, spawn_rate)
break
tries += 1
time.sleep(1)
# Start anyway.
if tries == 10:
env.runner.start(user_count, spawn_rate)
# Stop later if applicable
if stop_after:
gevent.spawn_later(stop_after,
lambda: env.runner.quit())
gevent.spawn(spawn_when_all_users_stopped);
return {"msg": f"Spawn Request Received: spawning {user_count} users at {spawn_rate} users/second"}
@env.web_ui.app.route("/api/stop", methods=['POST'])
@return_as_json_response
def stop_all():
"""Stop all currently running users"""
env.runner.quit()
return {"msg": "All users stopped"}
| apache-2.0 | 5,255,987,713,217,035,000 | 41.032468 | 164 | 0.591534 | false |
suutari-ai/shoop | shuup/front/__init__.py | 2 | 2946 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.apps import AppConfig
from shuup.apps.settings import validate_templates_configuration
class ShuupFrontAppConfig(AppConfig):
name = "shuup.front"
verbose_name = "Shuup Frontend"
label = "shuup_front"
provides = {
"admin_category_form_part": [
"shuup.front.admin_module.sorts_and_filters.form_parts.ConfigurationCategoryFormPart"
],
"admin_module": [
"shuup.front.admin_module.CartAdminModule",
],
"admin_shop_form_part": [
"shuup.front.admin_module.sorts_and_filters.form_parts.ConfigurationShopFormPart"
],
"notify_event": [
"shuup.front.notify_events:OrderReceived",
"shuup.front.notify_events:ShipmentCreated",
"shuup.front.notify_events:ShipmentDeleted",
"shuup.front.notify_events:PaymentCreated",
"shuup.front.notify_events:RefundCreated",
],
"notify_script_template": [
"shuup.front.notify_script_templates:PaymentCreatedEmailScriptTemplate",
"shuup.front.notify_script_templates:RefundCreatedEmailScriptTemplate",
"shuup.front.notify_script_templates:ShipmentDeletedEmailScriptTemplate",
"shuup.front.notify_script_templates:OrderConfirmationEmailScriptTemplate",
"shuup.front.notify_script_templates:ShipmentCreatedEmailScriptTemplate",
],
"front_extend_product_list_form": [
"shuup.front.forms.product_list_modifiers.CategoryProductListFilter",
"shuup.front.forms.product_list_modifiers.LimitProductListPageSize",
"shuup.front.forms.product_list_modifiers.ProductPriceFilter",
"shuup.front.forms.product_list_modifiers.ProductVariationFilter",
"shuup.front.forms.product_list_modifiers.SortProductListByCreatedDate",
"shuup.front.forms.product_list_modifiers.SortProductListByAscendingCreatedDate",
"shuup.front.forms.product_list_modifiers.SortProductListByName",
"shuup.front.forms.product_list_modifiers.SortProductListByPrice",
"shuup.front.forms.product_list_modifiers.ManufacturerProductListFilter",
],
"front_product_order_form": [
"shuup.front.forms.order_forms:VariableVariationProductOrderForm",
"shuup.front.forms.order_forms:SimpleVariationProductOrderForm",
"shuup.front.forms.order_forms:SimpleProductOrderForm",
],
}
def ready(self):
# connect signals
import shuup.front.notify_events # noqa: F401
validate_templates_configuration()
default_app_config = "shuup.front.ShuupFrontAppConfig"
| agpl-3.0 | 1,166,574,447,828,370,000 | 43.636364 | 97 | 0.682281 | false |
syl20bnr/i3ci | _deprecated/scripts/menu/i3_actions.py | 1 | 17882 | #!/usr/bin/env python
# author: syl20bnr (2013)
# goal: i3 actions module.
import os
from subprocess import Popen, PIPE
import i3
from Xlib import display
import i3ci_menu
from constants import DMENU_MAX_ROW, DMENU_FONT, DMENU_HEIGHT
from feeders import (cur_workspace,
cur_workspaces,
free_workspaces,
cur_output)
MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
# DMENU = os.path.normpath(os.path.join(MODULE_PATH, '../../bin/i3ci_menu'))
class Action(object):
''' Define an i3-msg action. '''
def __init__(self):
self._actions = []
def add_action(self, action, args=None):
if args:
action = action.__call__(self, *args)
else:
action = action.__call__(self)
self._actions.append(action)
def get_command(self):
return 'i3-msg ' + ','.join(self._actions)
def process(self):
Popen(self.get_command(), shell=True)
def exec_(self, app):
return 'exec {0}'.format(app)
def set_mode(self, mode):
''' Set the specified mode '''
return 'mode {0}'.format(mode)
def set_default_mode(self):
''' Set the default mode '''
return self.set_mode('default')
def jump_to_window(self, window):
''' Jump to the specified window. '''
return '[con_id={0}] focus'.format(window)
def jump_to_workspace(self, workspace):
''' Jump to the given workspace.
Current used workspaces are prefixed with a dot '.'
Workspace '`' means "back_and_forth" command.
Workspace '=' is the scratch pad
'''
if workspace == '`':
return "workspace back_and_forth"
elif workspace == '=':
return "scratchpad show"
else:
return "workspace {0}".format(workspace)
def send_window_to_output(self, output):
''' Send the current window to the specified output. '''
return "move to output {0}".format(output)
def send_workspace_to_output(self, output):
''' Send the current workspace to the specified output. '''
return "move workspace to output {0}".format(output)
def send_window_to_workspace(self, workspace):
''' Send the current window to the passed workspace. '''
if workspace == '`':
return "move workspace back_and_forth"
elif workspace == '=':
return "move scratchpad"
else:
return "move workspace {0}".format(workspace)
def focus_output(self, output):
''' Focus the specified output. '''
return "focus output {0}".format(output)
def focus_window(self, id_):
''' Focus the specified output. '''
return "[con_id={0}] focus".format(id_)
def mark_window(self, id_, mark):
''' Set the passed mark to the window with the passed id_. '''
return '[con_id={0}] mark {1}'.format(id_, mark)
def unmark_window(self, mark):
''' Disable the passed mark. '''
return 'unmark {0}'.format(mark)
def rename_workspace(self, from_, to):
''' Rename the workspace '''
return '\'rename workspace "{0}" to "{1}"\''.format(from_, to)
def cmd(self, cmd):
# wonderful method :-)
return cmd
# ----------------------------------------------------------------------------
# Action groups
# ----------------------------------------------------------------------------
def default_mode(action=None):
''' Add or perform an action to set the default mode. '''
if action:
action.add_action(Action.set_default_mode)
else:
action = Action()
action.add_action(Action.set_default_mode)
action.process()
def get_max_row(rcount):
return max([0, min([DMENU_MAX_ROW, rcount])])
def launch_app(feeder, app=None, output='all', free=False):
''' Launch an application on the specified monitor.
output='all' means the current workspace on the current monitor.
If free is true then the application is opened in a new workspace.
'''
reply = app
if not reply:
input_ = feeder.feed().encode('utf-8')
size = get_max_row(len(input_))
proc = i3ci_menu.call(lmax=size,
f=DMENU_FONT,
h=DMENU_HEIGHT)
reply = proc.communicate(input_)[0]
if reply:
reply = reply.decode('utf-8')
if '-cd' in reply:
# MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
# DMENU = os.path.normpath(os.path.join(MODULE_PATH,
# '../../bin/i3ci_menu'))
xcwd = Popen('xcwd', stdin=PIPE, stdout=PIPE).communicate()[0]
reply = '"' + reply + ' ' + xcwd + '"'
if not free and (output == 'all' or
output == cur_output.get_current_output()):
# open on the current workspace
action = Action()
action.add_action(Action.exec_, (reply,))
default_mode(action)
action.process()
if not free and (output != 'all' and
output != cur_output.get_current_output()):
# open on the visible workspace on another output
otherw = cur_workspace.feed(output)
action = Action()
action.add_action(Action.jump_to_workspace, (otherw,))
action.add_action(Action.exec_, (reply,))
default_mode(action)
action.process()
elif free and (output == 'all' or
output == cur_output.get_current_output()):
# free workspace on the current output
freew = free_workspaces.get_free_workspaces()[0]
action = Action()
action.add_action(Action.jump_to_workspace, (freew,))
action.add_action(Action.exec_, (reply,))
default_mode(action)
action.process()
elif free and (output != 'all' and
output != cur_output.get_current_output()):
# free workspace on another output
freew = free_workspaces.get_free_workspaces()[0]
action = Action()
action.add_action(Action.focus_output, (output,))
action.add_action(Action.jump_to_workspace, (freew,))
action.add_action(Action.exec_, (reply,))
default_mode(action)
action.process()
else:
default_mode()
def clone_window(output='all', free=False):
from feeders import cur_window
win = cur_window.get_current_window()[0]
dpy = display.Display()
xwin = dpy.create_resource_object('window', win['window'])
inst, _ = xwin.get_wm_class()
if inst:
if inst == 'urxvt':
inst += ' -cd'
launch_app(None, inst, output, free)
def jump_to_window(feeder, inst, output='all'):
''' Jump to the window chosen by the user using i3ci_menu. '''
(wins, d) = feeder.feed(inst, output)
size = get_max_row(len(wins))
proc = i3ci_menu.call(f=DMENU_FONT,
lmax=size,
sb='#b58900')
reply = proc.communicate('\n'.join(wins).encode('utf-8'))[0]
if reply:
reply = reply.decode('utf-8')
action = Action()
action.add_action(Action.jump_to_window, (d.get(reply),))
default_mode(action)
action.process()
else:
default_mode()
def jump_to_workspace(feeder):
''' Jump to the workspace chosen by the user using i3ci_menu. '''
input_ = '\n'.join(feeder.feed()).encode('utf-8')
size = get_max_row(len(input_))
proc = i3ci_menu.call(h=DMENU_HEIGHT,
lmax=size,
r=True,
sb='#d33682')
reply = proc.communicate(input_)[0]
if reply:
reply = reply.decode('utf-8')
action = Action()
action.add_action(Action.jump_to_workspace, (reply,))
default_mode(action)
action.process()
else:
default_mode()
def jump_to_currently_used_workspace(feeder, output='all'):
''' Jump to a curently used workspace on the specified outputs
and chosen by the user using i3ci_menu.
'''
input_ = '\n'.join(feeder.feed(output)).encode('utf-8')
size = get_max_row(len(input_))
proc = i3ci_menu.call(f=DMENU_FONT,
h=DMENU_HEIGHT,
lmax=size,
r=True,
sb='#268bd2')
reply = proc.communicate(input_)[0]
if reply:
reply = reply.decode('utf-8')
action = Action()
action.add_action(Action.jump_to_workspace, (reply,))
default_mode(action)
action.process()
else:
default_mode()
def send_workspace_to_output(feeder, output='all'):
''' Send the current workspace to the selected output. '''
if output == 'all':
# be sure that the workspace exists
cur_wks = cur_workspace.get_current_workspace()
if not cur_wks:
return
outs = feeder.get_outputs_dictionary()
# remove the current output
coutput = cur_output.get_current_output()
fouts = [k for k, v in outs.iteritems() if v != coutput]
input_ = '\n'.join(sorted(fouts)).encode('utf-8')
size = get_max_row(len(input_))
proc = i3ci_menu.call(f=DMENU_FONT,
h=DMENU_HEIGHT,
lmax=size,
r=False,
sb='#268bd2')
reply = proc.communicate(input_)[0]
if reply:
reply = reply.decode('utf-8')
output = outs[reply]
action = Action()
action.add_action(Action.send_workspace_to_output, (output,))
default_mode(action)
action.process()
def send_window_to_output(feeder, output='all'):
''' Send the current window to the selected output. '''
if output == 'all':
outs = feeder.get_outputs_dictionary()
# remove the current output
coutput = cur_output.get_current_output()
fouts = [k for k, v in outs.iteritems() if v != coutput]
input_ = '\n'.join(sorted(fouts)).encode('utf-8')
size = get_max_row(len(input_))
proc = i3ci_menu.call(f=DMENU_FONT,
h=DMENU_HEIGHT,
lmax=size,
r=False,
sb='#268bd2')
reply = proc.communicate(input_)[0]
if reply:
reply = reply.decode('utf-8')
output = outs[reply]
action = Action()
action.add_action(Action.send_window_to_output, (output,))
action.add_action(Action.focus_output, (output,))
default_mode(action)
action.process()
def send_window_to_workspace(feeder):
''' Send the current window to the selected workspace. '''
input_ = '\n'.join(feeder.feed()).encode('utf-8')
size = get_max_row(len(input_))
proc = i3ci_menu.call(f=DMENU_FONT,
h=DMENU_HEIGHT,
lmax=size,
r=True,
sb='#6c71c4')
reply = proc.communicate(input_)[0]
if reply:
reply = reply.decode('utf-8')
action = Action()
action.add_action(Action.send_window_to_workspace, (reply,))
default_mode(action)
action.process()
else:
default_mode()
def send_window_to_free_workspace(feeder, output):
''' Send the current window to a free workspace on the given output. '''
freew = feeder.feed()
if freew:
from feeders import cur_output
w = freew[0]
action = Action()
action.add_action(Action.send_window_to_workspace, (w,))
action.add_action(Action.jump_to_workspace, (w,))
if output != 'all' and output != cur_output.feed():
action.add_action(Action.send_workspace_to_output, (output,))
default_mode(action)
action.process()
else:
default_mode()
def send_window_to_used_workspace(feeder, output):
''' Send the current window to a used workspace on the given output. '''
input_ = '\n'.join(feeder.feed(output)).encode('utf-8')
size = get_max_row(len(input_))
proc = i3ci_menu.call(f=DMENU_FONT,
h=DMENU_HEIGHT,
lmax=size,
r=True,
sb='#6c71c4')
reply = proc.communicate(input_)[0]
if reply:
reply = reply.decode('utf-8')
action = Action()
action.add_action(Action.send_window_to_workspace, (reply,))
action.add_action(Action.jump_to_workspace, (reply,))
default_mode(action)
action.process()
else:
default_mode()
def _choose_other_windows(feeder, output):
'''
Launch a i3ci_menu instance to select a window which is not on the current
worspace.
Return a tuple composed of the window name and the window id.
Return None if nothing has been selected.
'''
(wins, d) = feeder.feed(output=output)
size = get_max_row(len(wins))
proc = i3ci_menu.call(f=DMENU_FONT,
lmax=size,
sb='#6c71c4')
ws = cur_workspace.feed()
excluded_wins = _get_window_ids_of_workspace(ws)
if excluded_wins:
# remove the wins of the current output from the list
wins = [k for k, v in d.iteritems() if v not in excluded_wins]
reply = proc.communicate('\n'.join(wins).encode('utf-8'))[0]
if reply:
reply = reply.decode('utf-8')
return reply, d.get(reply)
else:
return None, None
def send_window_to_win_workspace(feeder, output='all'):
''' Send the current window to the workspace of the selected window. '''
win, win_id = _choose_other_windows(feeder, output)
if win:
ws = _get_window_workspace(win_id)
action = Action()
action.add_action(Action.send_window_to_workspace, (ws,))
action.add_action(Action.jump_to_workspace, (ws,))
default_mode(action)
action.process()
else:
default_mode()
def bring_window(feeder, output='all'):
''' Bring the chosen window to the current workspace. '''
win, win_id = _choose_other_windows(feeder, output)
if win:
# TODO
ws = cur_workspace.feed()
other_ws = _get_window_workspace(win_id)
action = Action()
# switch focus to the window to bring
action.add_action(Action.jump_to_workspace, (other_ws,))
action.focus_window(win_id)
# send the window to the original workspace
action.add_action(Action.send_window_to_workspace, (ws,))
action.add_action(Action.jump_to_workspace, (ws,))
# make sure the new window is focused at the end
action.focus_window(win_id)
# print action.get_command()
default_mode(action)
action.process()
else:
default_mode()
def focus_workspace(mon):
wks = cur_workspace.feed(mon)
action = Action()
action.add_action(Action.jump_to_workspace, (wks,))
default_mode(action)
action.process()
def focus_nth_window(nth, ws=None):
''' Roughly focus the nth window in the hierarchy (limited to 10 first) '''
wins = _get_windows_from_workspace(ws)
action = Action()
if nth == 0:
nth = 10
action.add_action(Action.focus_window, (wins[nth-1],))
action.process()
def logout():
from feeders import logout as logout_feeder
from feeders import confirm
proc = i3ci_menu.call(f=DMENU_FONT,
lmax=4,
nb='#002b36',
nf='#eee8dc',
sb='#cb4b16',
sf='#eee8d5')
reply = proc.communicate(
'\n'.join(logout_feeder.feed()).encode('utf-8'))[0]
if reply:
action = Action()
action.add_action(Action.set_mode, ("confirm {0} ?".format(reply),))
action.process()
proc = i3ci_menu.call(f=DMENU_FONT,
lmax=4,
nb='#002b36',
nf='#eee8dc',
sb='#cb4b16',
sf='#eee8d5')
conf = proc.communicate('\n'.join(confirm.feed()).encode('utf-8'))[0]
if conf == 'OK':
action = Action()
default_mode(action)
action.process()
exec_ = os.path.join(MODULE_PATH, 'i3-exit')
cmd = '{0} --{1}'.format(exec_, reply)
Popen(cmd, shell=True)
return
default_mode()
def execute_cmd(feeder, prefix):
''' Execute: i3-msg prefix *user_choice* '''
proc = i3ci_menu.call(p=feeder.get_prompt(prefix),
f=DMENU_FONT,
h=DMENU_HEIGHT,
sb='#cb4b16')
reply = proc.communicate('\n'.join(feeder.feed(prefix)).encode('utf-8'))[0]
if reply:
reply = reply.decode('utf-8')
cmd = reply
if prefix:
cmd = prefix + ' ' + cmd
action = Action()
action.add_action(Action.cmd, (cmd,))
action.process()
def _get_window_workspace(win_id):
cworkspaces = cur_workspaces.get_cur_workspaces()
for ws in cworkspaces:
ws_tree = i3.filter(name=ws)
if i3.filter(tree=ws_tree, id=win_id):
return ws
return None
def _get_window_ids_of_workspace(ws):
res = []
wks = i3.filter(name=ws)
wins = i3.filter(tree=wks, nodes=[])
for w in wins:
res.append(w['id'])
return res
def _get_windows_from_workspace(ws):
res = []
if ws is None:
ws = cur_workspace.feed()
workspace = i3.filter(name=ws)
if workspace:
workspace = workspace[0]
windows = i3.filter(workspace, nodes=[])
for window in windows:
res.append(window['id'])
return res
| mit | 1,454,420,463,426,443,000 | 32.612782 | 79 | 0.552455 | false |
ryanpetrello/draughtcraft | draughtcraft/tests/selenium/recipes/test_builder.py | 1 | 23046 | import time
from selenium.webdriver.support.ui import Select
from draughtcraft import model
from draughtcraft.tests.selenium import TestSeleniumApp
class TestAllGrainBuilder(TestSeleniumApp):
def setUp(self):
super(TestAllGrainBuilder, self).setUp()
model.Style(
name='American IPA',
min_og=1.056,
max_og=1.075,
min_fg=1.01,
max_fg=1.018,
min_ibu=40,
max_ibu=70,
min_srm=6,
max_srm=15,
min_abv=.055,
max_abv=.075,
category_number=14,
style_letter='B'
)
model.Style(
name='Spice, Herb, or Vegetable Beer',
category_number=21,
style_letter='A'
)
model.commit()
self.get("/")
self.b.find_element_by_link_text("Create Your Own Recipe").click()
time.sleep(.1)
self.b.find_element_by_id("name").clear()
self.b.find_element_by_id("name").send_keys("Rocky Mountain River IPA")
Select(
self.b.find_element_by_id("type")
).select_by_visible_text("All Grain")
self.b.find_element_by_css_selector("button.ribbon").click()
@property
def b(self):
return self.browser
def blur(self):
self.b.find_element_by_css_selector(".logo").click()
def test_defaults(self):
self.wait.until(
lambda driver:
self.b.find_element_by_name("name").get_attribute("value") ==
"Rocky Mountain River IPA"
)
self.assertEqual(
"DraughtCraft - Rocky Mountain River IPA",
self.b.title
)
self.assertEqual(
"5",
self.b.find_element_by_name("volume").get_attribute("value")
)
assert self.b.find_element_by_css_selector('.step.mash') is not None
assert self.b.find_element_by_css_selector('.step.boil') is not None
assert self.b.find_element_by_css_selector('.step.ferment') \
is not None
def test_name_change_save(self):
self.b.find_element_by_name("name").send_keys("!")
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("name").get_attribute("value") ==
"Rocky Mountain River IPA!"
)
def test_name_change_updates_page_title(self):
self.b.find_element_by_name("name").send_keys("!")
self.blur()
assert self.b.title == 'DraughtCraft - Rocky Mountain River IPA!'
def test_style_choose(self):
self.b.find_element_by_link_text("No Style Specified").click()
self.b.find_element_by_link_text("American IPA").click()
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_css_selector(".selectBox-label").text ==
"American IPA"
)
self.b.find_element_by_link_text("American IPA").click()
self.b.find_element_by_link_text("No Style Specified").click()
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_css_selector(".selectBox-label").text ==
"No Style Specified"
)
def test_volume_change_save(self):
self.b.find_element_by_name("volume").clear()
self.b.find_element_by_name("volume").send_keys("10")
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("volume").get_attribute("value") ==
"10"
)
def test_notes_change_save(self):
self.b.find_element_by_css_selector('.notes textarea').send_keys("ABC")
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_css_selector('.notes textarea')
.get_attribute("value") == "ABC"
)
def test_remove_addition(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil', 'Ferment'):
self.b.find_element_by_link_text(step).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
label = 'Add Dry Hops...' if step == 'Ferment' else 'Add Hops...'
self.b.find_element_by_link_text(label).click()
self.b.find_element_by_link_text("Simcoe (US)").click()
time.sleep(2)
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition:not(:empty)' % step.lower()
)) == 1
self.b.find_element_by_css_selector(
'.%s .ingredient-list .addition .close a' % step.lower()
).click()
time.sleep(2)
self.b.refresh()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
def test_add_malt(self):
model.Fermentable(
name='2-Row',
type='MALT',
origin='US',
ppg=36,
lovibond=2,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil'):
self.b.find_element_by_link_text(step).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
self.b.find_element_by_link_text(
"Add Malt/Fermentables..."
).click()
self.b.find_element_by_link_text("2-Row (US)").click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition:not(:empty)' % step.lower()
)) == 1
def test_add_extract(self):
model.Fermentable(
name="Cooper's Amber LME",
type='EXTRACT',
origin='AUSTRALIAN',
ppg=36,
lovibond=13.3,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil'):
self.b.find_element_by_link_text(step).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
self.b.find_element_by_link_text("Add Malt Extract...").click()
self.b.find_element_by_link_text(
"Cooper's Amber LME (Australian)"
).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition:not(:empty)' % step.lower()
)) == 1
def test_add_hop(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil', 'Ferment'):
self.b.find_element_by_link_text(step).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
label = 'Add Dry Hops...' if step == 'Ferment' else 'Add Hops...'
self.b.find_element_by_link_text(label).click()
self.b.find_element_by_link_text("Simcoe (US)").click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition:not(:empty)' % step.lower()
)) == 1
def test_add_extra(self):
model.Extra(
name="Whirlfloc Tablet",
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil', 'Ferment'):
self.b.find_element_by_link_text(step).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
self.b.find_element_by_link_text("Add Misc...").click()
self.b.find_element_by_link_text("Whirlfloc Tablet").click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition:not(:empty)' % step.lower()
)) == 1
def test_mash_method_change(self):
Select(
self.b.find_element_by_name('mash_method')
).select_by_visible_text("Multi-Step")
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("mash_method").
get_attribute("value") == "MULTISTEP"
)
def test_mash_instructions_change(self):
self.b.find_element_by_name('mash_instructions').clear()
self.b.find_element_by_name('mash_instructions').send_keys(
'Testing 1 2 3'
)
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("mash_instructions").
get_attribute("value") == "Testing 1 2 3"
)
def test_boil_minutes(self):
self.b.find_element_by_link_text('Boil').click()
self.b.find_element_by_name('boil_minutes').clear()
self.b.find_element_by_name('boil_minutes').send_keys('90')
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("boil_minutes").
get_attribute("value") == "90"
)
def test_fermentation_schedule_change(self):
self.b.find_element_by_link_text('Ferment').click()
self.b.find_element_by_link_text("Add...").click()
self.b.find_element_by_link_text("Add...").click()
days = self.b.find_elements_by_css_selector('.process select.days')
temps = self.b.find_elements_by_css_selector(
'.process select.fahrenheit'
)
assert len(days) == 3
assert len(temps) == 3
for i, el in enumerate(days):
Select(el).select_by_visible_text(str(14 + (7 * i)))
for j, el in enumerate(temps):
Select(el).select_by_visible_text(str(68 + (2 * j)))
self.blur()
time.sleep(2)
self.b.refresh()
time.sleep(1)
days = self.b.find_elements_by_css_selector('.process select.days')
temps = self.b.find_elements_by_css_selector(
'.process select.fahrenheit'
)
assert len(days) == 3
assert len(temps) == 3
for i, d in enumerate(days):
assert d.get_attribute('value') == str(14 + (7 * i))
for j, t in enumerate(temps):
assert t.get_attribute('value') == str(68 + (2 * j))
def test_change_fermentable_amount(self):
model.Fermentable(
name='2-Row',
type='MALT',
origin='US',
ppg=36,
lovibond=2,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil'):
self.b.find_element_by_link_text(step).click()
self.b.find_element_by_link_text(
"Add Malt/Fermentables..."
).click()
self.b.find_element_by_link_text("2-Row (US)").click()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
i.clear()
i.send_keys('10 lb')
self.blur()
time.sleep(2)
self.b.refresh()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
assert i.get_attribute('value') == '10 lb'
def test_metric_entry(self):
model.Fermentable(
name='2-Row',
type='MALT',
origin='US',
ppg=36,
lovibond=2,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil'):
self.b.find_element_by_link_text(step).click()
self.b.find_element_by_link_text(
"Add Malt/Fermentables..."
).click()
self.b.find_element_by_link_text("2-Row (US)").click()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
i.clear()
i.send_keys('1 kg')
self.blur()
time.sleep(2)
self.b.refresh()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
assert i.get_attribute('value') == '2.204 lb'
def test_change_hop_form(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil', 'Ferment'):
self.b.find_element_by_link_text(step).click()
label = 'Add Dry Hops...' if step == 'Ferment' else 'Add Hops...'
self.b.find_element_by_link_text(label).click()
self.b.find_element_by_link_text("Simcoe (US)").click()
s = Select(self.b.find_element_by_css_selector(
'.%s .addition .form select' % step.lower()
))
s.select_by_visible_text('Pellet')
self.blur()
time.sleep(2)
self.b.refresh()
s = self.b.find_element_by_css_selector(
'.%s .addition .form select' % step.lower()
)
assert s.get_attribute('value') == 'PELLET'
def test_change_hop_aa(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil', 'Ferment'):
self.b.find_element_by_link_text(step).click()
label = 'Add Dry Hops...' if step == 'Ferment' else 'Add Hops...'
self.b.find_element_by_link_text(label).click()
self.b.find_element_by_link_text("Simcoe (US)").click()
i = self.b.find_element_by_css_selector(
'.%s .addition .unit input' % step.lower()
)
i.clear()
i.send_keys('12')
self.blur()
time.sleep(2)
self.b.refresh()
i = self.b.find_element_by_css_selector(
'.%s .addition .unit input' % step.lower()
)
assert i.get_attribute('value') == '12'
def test_change_hop_boil_time(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
self.b.find_element_by_link_text('Boil').click()
self.b.find_element_by_link_text('Add Hops...').click()
self.b.find_element_by_link_text("Simcoe (US)").click()
selects = self.b.find_elements_by_css_selector(
'.boil .addition .time select'
)
Select(selects[1]).select_by_visible_text('45 min')
self.blur()
time.sleep(2)
self.b.refresh()
selects = self.b.find_elements_by_css_selector(
'.boil .addition .time select'
)
assert selects[1].get_attribute('value') == '45'
def test_change_hop_first_wort(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
self.b.find_element_by_link_text('Boil').click()
self.b.find_element_by_link_text('Add Hops...').click()
self.b.find_element_by_link_text("Simcoe (US)").click()
selects = self.b.find_elements_by_css_selector(
'.boil .addition .time select'
)
Select(selects[0]).select_by_visible_text('First Wort')
assert not selects[1].is_displayed()
def test_change_hop_flameout(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
self.b.find_element_by_link_text('Boil').click()
self.b.find_element_by_link_text('Add Hops...').click()
self.b.find_element_by_link_text("Simcoe (US)").click()
selects = self.b.find_elements_by_css_selector(
'.boil .addition .time select'
)
Select(selects[0]).select_by_visible_text('Flame Out')
assert not selects[1].is_displayed()
def test_yeast_step(self):
model.Yeast(
name='Wyeast 1056 - American Ale',
type='ALE',
form='LIQUID',
attenuation=.75,
flocculation='MEDIUM/HIGH'
)
model.commit()
self.b.refresh()
self.b.find_element_by_link_text('Ferment').click()
self.b.find_element_by_link_text('Add Yeast...').click()
self.b.find_element_by_link_text('Wyeast 1056 - American Ale').click()
Select(self.b.find_element_by_css_selector(
'.ferment .addition select'
)).select_by_visible_text('Secondary')
time.sleep(2)
self.b.refresh()
assert self.b.find_element_by_css_selector(
'.ferment .addition select'
).get_attribute('value') == 'SECONDARY'
class TestExtractBuilder(TestSeleniumApp):
def setUp(self):
super(TestExtractBuilder, self).setUp()
self.get("/")
self.b.find_element_by_link_text("Create Your Own Recipe").click()
time.sleep(.1)
self.b.find_element_by_id("name").clear()
self.b.find_element_by_id("name").send_keys("Rocky Mountain River IPA")
Select(
self.b.find_element_by_id("type")
).select_by_visible_text("Extract")
self.b.find_element_by_css_selector("button.ribbon").click()
@property
def b(self):
return self.browser
def test_mash_missing(self):
assert len(
self.b.find_elements_by_css_selector('.step.boil h2 li a')
) == 2
class TestMetricBuilder(TestSeleniumApp):
def setUp(self):
super(TestMetricBuilder, self).setUp()
self.get("/")
self.b.find_element_by_link_text("Create Your Own Recipe").click()
self.b.find_element_by_link_text("Want Metric Units?").click()
time.sleep(.1)
self.b.find_element_by_id("name").clear()
self.b.find_element_by_id("name").send_keys("Rocky Mountain River IPA")
Select(
self.b.find_element_by_id("type")
).select_by_visible_text("All Grain")
self.b.find_element_by_css_selector("button.ribbon").click()
@property
def b(self):
return self.browser
def blur(self):
self.b.find_element_by_css_selector(".logo").click()
def test_defaults(self):
self.wait.until(
lambda driver:
self.b.find_element_by_name("name").get_attribute("value") ==
"Rocky Mountain River IPA"
)
self.assertEqual(
"DraughtCraft - Rocky Mountain River IPA",
self.b.title
)
self.assertEqual(
"20",
self.b.find_element_by_name("volume").get_attribute("value")
)
assert self.b.find_element_by_css_selector('.step.mash') is not None
assert self.b.find_element_by_css_selector('.step.boil') is not None
assert self.b.find_element_by_css_selector('.step.ferment') \
is not None
def test_volume_change_save(self):
self.b.find_element_by_name("volume").clear()
self.b.find_element_by_name("volume").send_keys("10")
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("volume").get_attribute("value") ==
"10"
)
def test_metric_ingredient_amount(self):
model.Fermentable(
name='2-Row',
type='MALT',
origin='US',
ppg=36,
lovibond=2,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil'):
self.b.find_element_by_link_text(step).click()
self.b.find_element_by_link_text(
"Add Malt/Fermentables..."
).click()
self.b.find_element_by_link_text("2-Row (US)").click()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
i.clear()
i.send_keys('1 kg')
self.blur()
time.sleep(2)
self.b.refresh()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
assert i.get_attribute('value') == '1 kg'
def test_fermentation_schedule_change(self):
self.b.find_element_by_link_text('Ferment').click()
self.b.find_element_by_link_text("Add...").click()
self.b.find_element_by_link_text("Add...").click()
days = self.b.find_elements_by_css_selector('.process select.days')
temps = self.b.find_elements_by_css_selector(
'.process select.fahrenheit'
)
assert len(days) == 3
assert len(temps) == 3
for i, el in enumerate(days):
Select(el).select_by_visible_text(str(14 + (7 * i)))
for j, el in enumerate(temps):
Select(el).select_by_visible_text(str(20 + (2 * j)))
self.blur()
time.sleep(2)
self.b.refresh()
time.sleep(1)
days = self.b.find_elements_by_css_selector('.process select.days')
temps = self.b.find_elements_by_css_selector(
'.process select.fahrenheit'
)
assert len(days) == 3
assert len(temps) == 3
for i, d in enumerate(days):
assert d.get_attribute('value') == str(14 + (7 * i))
for j, t in enumerate(temps):
assert t.get_attribute('value') == str(20 + (2 * j))
| bsd-3-clause | 5,432,450,340,929,008,000 | 30.017497 | 79 | 0.525037 | false |
DarkRedman/PyGet | pyget.py | 1 | 6109 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Filename: pydl.py #
# Authors: Brian Tomlinson <[email protected]> #
# Manuel Debaux <[email protected]> #
# Brian Turner <[email protected]> #
# URL: [email protected]:darthlukan/piddle.git #
# Description: A simple CLI download manager written in Python. #
# Warning: If you received this program from any source other than #
# the above noted URL, please check the source code! You may have #
# downloaded a file with malicious code injected. #
# License: GPLv2, Please see the included LICENSE file. #
# Note: This software should be considered experimental! #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Explanation of import list:
# os and sys are needed to make sure that files and system level stuff
# are handled properly. urllib(2) for communications (we are downloading)
# fileinput handles looping over links in a file (txt for now, csv later)
# progressbar adds some bling for the user to look at while we work. To get
# progressbar to work, pip2 install progressbar.
import os
import sys
import urllib
import urllib2
import fileinput
from progressbar import *
#Introduce ourselves
print("""Hello! I am going to ensure that downloading your files, renaming them,
and specifying where to save them, are as simple as possible. Let's get to it!""")
# Warn the user about non-existent feature
print('Be warned! File Looping has been implemented but is experimental.')
print('Downloading large groups of files could lead to RAM abuse.')
# The function that actually gets stuff
def getDownload(urlToGetFile, fileNameToSave): # Grab the file(s)
filelen=0
data=""
retry=False
error=False
try:
data=str(urllib2.urlopen(urlToGetFile).info())
index=data.find("Content-Length")
assert(index != -1), "Impossible déterminer la taille du fichier"
data=data[index:]
data=data[16:data.find("\r")]
filelen+=int(data)
except Exception as err:
print(err)
if filelen == 0:
filelen=10.5
if ".flv" in urlToGetFile:
filelen=300000
# Placeholder for progressbar:
widgets = ['Download Progress: ', Percentage(), ' ',
Bar(marker=RotatingMarker(),left='[',right=']'),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=filelen)
pbar.start()
try:
webfile=urllib.urlopen(urlToGetFile)
byte = webfile.read(1)
data=byte
cur=0
while byte:
byte = webfile.read(1)
data+=byte
cur+=1
pbar.update(cur)
with open(fileNameToSave,'wb') as f:
f.write(data)
except IOError:
print("%s is an incorrect filename, cannot save the file" % fileNameToSave)
error=True
finally:
pbar.finish()
if error:
if raw_input('Do you want to retry with a new filename ? (y/n): ') == "y":
fileNameToSave=raw_input('Enter the desired path and filename: ')
getDownload(urlToGetFile, fileNameToSave)
# This looks redundant now, but just wait... :)
def getSpecialDownload(urlToGetFile, fileNameToSave):
urllib.urlretrieve(urlToGetFile, fileNameToSave)
# Placeholder for progressbar:
#widgets = ['Overall Progress: ', Percentage(), ' ',
# Bar(marker='#',left='[',right=']'),
# ' ', ETA(), ' ', FileTransferSpeed()]
#pbar = ProgressBar(widgets=widgets, maxval=nl)
#pbar.start()
# The function that sums the lengths of all files to download
# This function avoid to download all files to get lengths but it's take quite time to get few files length
def getOverallLength(fileNameUrls):
fi = fileinput.input(fileNameUrls)
overallLength=0
for line in fi:
data=str(urllib2.urlopen(line[:-1]).info())
data=data[data.find("Content-Length"):]
data=data[16:data.find("\r")]
overallLength+=int(data)
return overallLength
def fileLoopCheck():
specialDownload = raw_input('Do you need to import a file with links?(y/n): ')
if specialDownload == 'n':
urlToGetFile = raw_input('Please enter the download URL: ')
fileNameToSave = raw_input('Enter the desired path and filename: ')
getDownload(urlToGetFile,fileNameToSave)
elif specialDownload == 'y':
fileNameUrls = raw_input('Enter the filename (with path) that contains URLs: ')
baseDir = raw_input('Enter the directory where you want the files saved: ')
# Define how to handle pathing, default to preceding '/'
if not baseDir.endswith("/") and baseDir != '':
baseDir+="/"
# Grab the file and iterate over each line, this is not yet smart enough
# to discern between an actual url and erroneous text, so don't have anything
# other than links in your input file!
fi = fileinput.input(fileNameUrls)
nl=0 #numbers of line
for line in fi:
nl+=1 # iterate over the next line
# Re-read, this will be cleaned up later
fi = fileinput.input(fileNameUrls) # reset the fileinput : can't reuse it
cl=0 # currentline
# Progressbar() stuff, wheee!
widgets = ['Overall Progress: ', Percentage(), ' ',
Bar(marker='>',left='[',right=']'),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=overallLength)
pbar.start()
# Done with the prep work, time to do what the user wants
for line in fi:
urlToGetFile=line[:-1]
fileNameToSave=baseDir+urlToGetFile[urlToGetFile.rfind('/')+1:]
getSpecialDownload(urlToGetFile, fileNameToSave)
cl+=1
pbar.update(overallLength/nl*cl)
pbar.finish()
print('All done!')
else:
print('There was an error in your response, let\'s try again...')
fileLoopCheck()
# Call start function
fileLoopCheck()
| gpl-2.0 | 544,684,471,682,824,060 | 38.921569 | 107 | 0.628193 | false |
ellisonleao/vl | tests/test_cli.py | 1 | 9246 | import pytest
import responses
from requests.exceptions import HTTPError
from click.testing import CliRunner
from vl import cli
def reset_globals():
cli.ERRORS = []
cli.DUPES = []
cli.EXCEPTIONS = []
cli.WHITELISTED = []
cli.STATICS = []
@pytest.fixture
def runner():
return CliRunner()
@pytest.fixture
def valid_urls():
return """
Valid Urls
==========
* [Test Link1](http://www.test1.com)
* [Test Link2](http://www.test2.com)
* [Test Link3](http://www.test3.com)
"""
@pytest.fixture
def valid_urls_with_static():
return """
Valid Urls
==========
* [Test Link1](http://www.test1.com)
* [Test Link2](http://www.test2.com)
* [Test Link3](http://www.test3.com)
* [Test Link4](http://www.test3.com/1.gif)
"""
@pytest.fixture
def some_errors():
return """
Valid Urls and Some Errors
==========================
* [Test Link1](http://www.test1.com)
* [Bad Link](http://www.badlink1.com)
* [Bad Link2](http://www.badlink2.com)
* [Bad Link3](http://www.badlink3.com)
* [Bad Link3](http://www.badlink4.com)
* [Bad Link5](http://www.badlink5.com)
* [Bad Link6](http://www.badlink6.com)
* [Bad Link7](http://www.badlink7.com)
* [Bad Link8](http://www.badlink8.com)
* [Bad Link9](http://www.badlink9.com)
* [Exception](http://www.exception.com)
* [Test Link2](http://www.test2.com)
* [No Scheme](www.test2.com)
"""
@pytest.fixture
def dupes():
return """
Valid Urls With Some Dupes
==========================
* [Dupe1](http://www.dupe1.com)
* [Dupe1](http://www.dupe1.com)
* [Dupe1](http://www.dupe1.com)
* [Test Link2](http://www.test2.com)
"""
@pytest.fixture
def whitelists():
return """
Valid Urls With Some Dupes
==========================
* [link1](http://www.test.com)
* [link2](http://www.whitelisted.com)
* [link3](http://whitelisted.com)
* [link4](http://whitelisted.com/test.html)
* [link5](http://test.whitelisted.com/?arg=1&arg=2)
* [link6](http://white-listed.com/)
* [link7](http://www.test2.com)
"""
def test_cli_no_args(runner):
reset_globals()
result = runner.invoke(cli.main)
assert result.exit_code == 2
def test_cli_bad_allow_codes(runner, valid_urls):
reset_globals()
urls = (
('http://www.test1.com', 200),
('http://www.test2.com', 200),
('http://www.test3.com', 200),
)
for url, code in urls:
responses.add(responses.HEAD, url, status=code)
with runner.isolated_filesystem():
with open('valid_urls.md', 'w') as f:
f.write(valid_urls)
result = runner.invoke(cli.main, ['valid_urls.md', '--debug',
'--allow-codes', '123-456'])
assert result.exit_code == 2
@responses.activate
def test_cli_with_valid_urls(runner, valid_urls):
reset_globals()
urls = (
('http://www.test1.com', 200),
('http://www.test2.com', 200),
('http://www.test3.com', 200),
)
for url, code in urls:
responses.add(responses.HEAD, url, status=code)
with runner.isolated_filesystem():
with open('valid_urls.md', 'w') as f:
f.write(valid_urls)
result = runner.invoke(cli.main, ['valid_urls.md', '--debug'])
assert result.exit_code == 0
assert len(cli.ERRORS) == 0
assert len(cli.EXCEPTIONS) == 0
assert len(cli.DUPES) == 0
@responses.activate
def test_cli_with_valid_and_bad_urls(runner, some_errors):
reset_globals()
urls = (
('http://www.test1.com', 200),
('http://www.test2.com', 200),
('http://www.badlink1.com', 500),
('http://www.badlink2.com', 501),
('http://www.badlink3.com', 502),
('http://www.badlink4.com', 503),
('http://www.badlink5.com', 504),
('http://www.badlink6.com', 401),
('http://www.badlink7.com', 402),
('http://www.badlink8.com', 404),
('http://www.badlink9.com', 409),
)
for url, code in urls:
responses.add(responses.HEAD, url, status=code)
exception = HTTPError('BAD!')
responses.add(responses.HEAD, 'http://www.exception.com',
body=exception)
with runner.isolated_filesystem():
with open('some_errors.md', 'w') as f:
f.write(some_errors)
result = runner.invoke(cli.main, ['some_errors.md', '--debug'])
assert result.exit_code == 1
assert len(cli.ERRORS) == 9
assert len(cli.EXCEPTIONS) == 1
assert len(cli.DUPES) == 0
@responses.activate
def test_cli_with_dupes(runner, dupes):
reset_globals()
urls = (
('http://www.dupe1.com', 200),
('http://www.test2.com', 200),
)
for url, code in urls:
responses.add(responses.HEAD, url, status=code)
with runner.isolated_filesystem():
with open('dupes.md', 'w') as f:
f.write(dupes)
result = runner.invoke(cli.main, ['dupes.md', '--debug'])
assert result.exit_code == 0
assert len(cli.ERRORS) == 0
assert len(cli.EXCEPTIONS) == 0
assert len(cli.DUPES) == 1
@responses.activate
def test_cli_with_allow_codes(runner, valid_urls):
reset_globals()
urls = (
('http://www.test1.com', 200),
('http://www.test3.com', 500),
('http://www.test2.com', 404),
)
for url, code in urls:
responses.add(responses.HEAD, url, status=code)
with runner.isolated_filesystem():
with open('valid.md', 'w') as f:
f.write(valid_urls)
result = runner.invoke(cli.main, ['valid.md', '-a 404,500',
'--debug'])
assert result.exit_code == 0
assert len(cli.ERRORS) == 0
assert len(cli.EXCEPTIONS) == 0
assert len(cli.DUPES) == 0
assert len(cli.WHITELISTED) == 2
@responses.activate
def test_cli_with_whitelist(runner, whitelists):
reset_globals()
urls = (
('http://www.test.com', 200),
('http://www.whitelisted.com', 200),
('http://whitelisted.com', 200),
('http://whitelisted.com/test.html', 200),
('http://test.whitelisted.com/', 200),
('http://white-listed.com/', 200),
('http://www.test2.com', 200),
)
for url, code in urls:
responses.add(responses.HEAD, url, status=code)
with runner.isolated_filesystem():
with open('whitelist.md', 'w') as f:
f.write(whitelists)
result = runner.invoke(cli.main, ['whitelist.md', '-w whitelisted.com',
'--debug'])
assert result.exit_code == 0
assert len(cli.ERRORS) == 0
assert len(cli.EXCEPTIONS) == 0
assert len(cli.DUPES) == 0
assert len(cli.WHITELISTED) == 4
@responses.activate
def test_cli_with_bad_whitelist(runner, whitelists):
reset_globals()
urls = (
('http://www.test.com', 200),
('http://www.whitelisted.com', 200),
('http://whitelisted.com', 200),
('http://whitelisted.com/test.html', 200),
('http://test.whitelisted.com/', 200),
('http://white-listed.com/', 200),
('http://www.test2.com', 200),
)
for url, code in urls:
responses.add(responses.HEAD, url, status=code)
with runner.isolated_filesystem():
with open('whitelist.md', 'w') as f:
f.write(whitelists)
result = runner.invoke(cli.main, ['whitelist.md', '--whitelist ',
'--debug'])
assert result.exit_code == 2
@responses.activate
def test_cli_with_static(runner, valid_urls_with_static):
reset_globals()
urls = (
('http://www.test1.com', 200),
('http://www.test2.com', 200),
('http://www.test3.com', 200),
('http://www.test3.com/1.gif', 200),
)
for url, code in urls:
responses.add(responses.HEAD, url, status=code)
with runner.isolated_filesystem():
with open('with_static.md', 'w') as f:
f.write(valid_urls_with_static)
result = runner.invoke(cli.main, ['with_static.md', '--debug'])
assert result.exit_code == 0
assert len(cli.ERRORS) == 0
assert len(cli.EXCEPTIONS) == 0
assert len(cli.STATICS) == 1
@responses.activate
def test_cli_with_errors_only(runner, valid_urls):
reset_globals()
urls = (
('http://www.test1.com', 400),
('http://www.test2.com', 500),
('http://www.test3.com', 103),
)
for url, code in urls:
responses.add(responses.HEAD, url, status=code)
with runner.isolated_filesystem():
with open('errors.md', 'w') as f:
f.write(valid_urls)
result = runner.invoke(cli.main, ['errors.md', '--debug'])
assert result.exit_code == 1
assert len(cli.ERRORS) == 3
assert len(cli.EXCEPTIONS) == 0
assert len(cli.STATICS) == 0
@responses.activate
def test_cli_with_good_codes_on_allow_codes(runner, valid_urls):
reset_globals()
with runner.isolated_filesystem():
with open('errors.md', 'w') as f:
f.write(valid_urls)
result = runner.invoke(cli.main, ['errors.md', '-a 200,301',
'--debug'])
assert result.exit_code == 2
| gpl-3.0 | 3,751,028,425,636,338,000 | 26.933535 | 79 | 0.566299 | false |
gurneyalex/odoo | addons/stock_account/tests/test_stockvaluationlayer.py | 3 | 42528 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" Implementation of "INVENTORY VALUATION TESTS (With valuation layers)" spreadsheet. """
from odoo.addons.stock_account.tests.test_stockvaluation import _create_accounting_data
from odoo.tests import Form, tagged
from odoo.tests.common import SavepointCase, TransactionCase
class TestStockValuationCommon(SavepointCase):
@classmethod
def setUpClass(cls):
super(TestStockValuationCommon, cls).setUpClass()
cls.stock_location = cls.env.ref('stock.stock_location_stock')
cls.customer_location = cls.env.ref('stock.stock_location_customers')
cls.supplier_location = cls.env.ref('stock.stock_location_suppliers')
cls.uom_unit = cls.env.ref('uom.product_uom_unit')
cls.product1 = cls.env['product.product'].create({
'name': 'product1',
'type': 'product',
'categ_id': cls.env.ref('product.product_category_all').id,
})
cls.picking_type_in = cls.env.ref('stock.picking_type_in')
cls.picking_type_out = cls.env.ref('stock.picking_type_out')
def setUp(self):
super(TestStockValuationCommon, self).setUp()
# Counter automatically incremented by `_make_in_move` and `_make_out_move`.
self.days = 0
def _make_in_move(self, product, quantity, unit_cost=None, create_picking=False):
""" Helper to create and validate a receipt move.
"""
unit_cost = unit_cost or product.standard_price
in_move = self.env['stock.move'].create({
'name': 'in %s units @ %s per unit' % (str(quantity), str(unit_cost)),
'product_id': product.id,
'location_id': self.supplier_location.id,
'location_dest_id': self.stock_location.id,
'product_uom': self.uom_unit.id,
'product_uom_qty': quantity,
'price_unit': unit_cost,
'picking_type_id': self.picking_type_in.id,
})
if create_picking:
picking = self.env['stock.picking'].create({
'picking_type_id': in_move.picking_type_id.id,
'location_id': in_move.location_id.id,
'location_dest_id': in_move.location_dest_id.id,
})
in_move.write({'picking_id': picking.id})
in_move._action_confirm()
in_move._action_assign()
in_move.move_line_ids.qty_done = quantity
in_move._action_done()
self.days += 1
return in_move.with_context(svl=True)
def _make_out_move(self, product, quantity, force_assign=None, create_picking=False):
""" Helper to create and validate a delivery move.
"""
out_move = self.env['stock.move'].create({
'name': 'out %s units' % str(quantity),
'product_id': product.id,
'location_id': self.stock_location.id,
'location_dest_id': self.customer_location.id,
'product_uom': self.uom_unit.id,
'product_uom_qty': quantity,
'picking_type_id': self.picking_type_out.id,
})
if create_picking:
picking = self.env['stock.picking'].create({
'picking_type_id': out_move.picking_type_id.id,
'location_id': out_move.location_id.id,
'location_dest_id': out_move.location_dest_id.id,
})
out_move.write({'picking_id': picking.id})
out_move._action_confirm()
out_move._action_assign()
if force_assign:
self.env['stock.move.line'].create({
'move_id': out_move.id,
'product_id': out_move.product_id.id,
'product_uom_id': out_move.product_uom.id,
'location_id': out_move.location_id.id,
'location_dest_id': out_move.location_dest_id.id,
})
out_move.move_line_ids.qty_done = quantity
out_move._action_done()
self.days += 1
return out_move.with_context(svl=True)
def _make_dropship_move(self, product, quantity, unit_cost=None):
dropshipped = self.env['stock.move'].create({
'name': 'dropship %s units' % str(quantity),
'product_id': product.id,
'location_id': self.supplier_location.id,
'location_dest_id': self.customer_location.id,
'product_uom': self.uom_unit.id,
'product_uom_qty': quantity,
'picking_type_id': self.picking_type_out.id,
})
if unit_cost:
dropshipped.unit_cost = unit_cost
dropshipped._action_confirm()
dropshipped._action_assign()
dropshipped.move_line_ids.qty_done = quantity
dropshipped._action_done()
return dropshipped
def _make_return(self, move, quantity_to_return):
stock_return_picking = Form(self.env['stock.return.picking']\
.with_context(active_ids=[move.picking_id.id], active_id=move.picking_id.id, active_model='stock.picking'))
stock_return_picking = stock_return_picking.save()
stock_return_picking.product_return_moves.quantity = quantity_to_return
stock_return_picking_action = stock_return_picking.create_returns()
return_pick = self.env['stock.picking'].browse(stock_return_picking_action['res_id'])
return_pick.move_lines[0].move_line_ids[0].qty_done = quantity_to_return
return_pick.action_done()
return return_pick.move_lines
class TestStockValuationStandard(TestStockValuationCommon):
def setUp(self):
super(TestStockValuationStandard, self).setUp()
self.product1.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.product1.product_tmpl_id.standard_price = 10
def test_normal_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10)
move2 = self._make_in_move(self.product1, 10)
move3 = self._make_out_move(self.product1, 15)
self.assertEqual(self.product1.value_svl, 50)
self.assertEqual(self.product1.quantity_svl, 5)
def test_change_in_past_increase_in_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10)
move2 = self._make_in_move(self.product1, 10)
move3 = self._make_out_move(self.product1, 15)
move1.move_line_ids.qty_done = 15
self.assertEqual(self.product1.value_svl, 100)
self.assertEqual(self.product1.quantity_svl, 10)
def test_change_in_past_decrease_in_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10)
move2 = self._make_in_move(self.product1, 10)
move3 = self._make_out_move(self.product1, 15)
move1.move_line_ids.qty_done = 5
self.assertEqual(self.product1.value_svl, 0)
self.assertEqual(self.product1.quantity_svl, 0)
def test_change_in_past_add_ml_in_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10)
move2 = self._make_in_move(self.product1, 10)
move3 = self._make_out_move(self.product1, 15)
self.env['stock.move.line'].create({
'move_id': move1.id,
'product_id': move1.product_id.id,
'qty_done': 5,
'product_uom_id': move1.product_uom.id,
'location_id': move1.location_id.id,
'location_dest_id': move1.location_dest_id.id,
})
self.assertEqual(self.product1.value_svl, 100)
self.assertEqual(self.product1.quantity_svl, 10)
def test_change_in_past_increase_out_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10)
move2 = self._make_out_move(self.product1, 1)
move2.move_line_ids.qty_done = 5
self.assertEqual(self.product1.value_svl, 50)
self.assertEqual(self.product1.quantity_svl, 5)
def test_change_in_past_decrease_out_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10)
move2 = self._make_out_move(self.product1, 5)
move2.move_line_ids.qty_done = 1
self.assertEqual(self.product1.value_svl, 90)
self.assertEqual(self.product1.quantity_svl, 9)
def test_change_standard_price_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10)
move2 = self._make_in_move(self.product1, 10)
move3 = self._make_out_move(self.product1, 15)
# change cost from 10 to 15
self.product1._change_standard_price(15.0)
self.assertEqual(self.product1.value_svl, 75)
self.assertEqual(self.product1.quantity_svl, 5)
self.assertEqual(self.product1.stock_valuation_layer_ids.sorted()[-1].description, 'Product value manually modified (from 10.0 to 15.0)')
def test_negative_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10)
move2 = self._make_out_move(self.product1, 15)
self.env['stock.move.line'].create({
'move_id': move1.id,
'product_id': move1.product_id.id,
'qty_done': 10,
'product_uom_id': move1.product_uom.id,
'location_id': move1.location_id.id,
'location_dest_id': move1.location_dest_id.id,
})
self.assertEqual(self.product1.value_svl, 50)
self.assertEqual(self.product1.quantity_svl, 5)
def test_dropship_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_dropship_move(self.product1, 10)
valuation_layers = self.product1.stock_valuation_layer_ids
self.assertEqual(len(valuation_layers), 2)
self.assertEqual(valuation_layers[0].value, 100)
self.assertEqual(valuation_layers[1].value, -100)
self.assertEqual(self.product1.value_svl, 0)
self.assertEqual(self.product1.quantity_svl, 0)
def test_change_in_past_increase_dropship_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_dropship_move(self.product1, 10)
move1.move_line_ids.qty_done = 15
valuation_layers = self.product1.stock_valuation_layer_ids
self.assertEqual(len(valuation_layers), 4)
self.assertEqual(valuation_layers[0].value, 100)
self.assertEqual(valuation_layers[1].value, -100)
self.assertEqual(valuation_layers[2].value, 50)
self.assertEqual(valuation_layers[3].value, -50)
self.assertEqual(self.product1.value_svl, 0)
self.assertEqual(self.product1.quantity_svl, 0)
def test_empty_stock_move_valorisation(self):
product1 = self.env['product.product'].create({
'name': 'p1',
'type': 'product',
})
product2 = self.env['product.product'].create({
'name': 'p2',
'type': 'product',
})
picking = self.env['stock.picking'].create({
'picking_type_id': self.picking_type_in.id,
'location_id': self.supplier_location.id,
'location_dest_id': self.stock_location.id,
})
for product in (product1, product2):
product.unit_cost = 10
in_move = self.env['stock.move'].create({
'name': 'in %s units @ %s per unit' % (2, str(10)),
'product_id': product.id,
'location_id': self.supplier_location.id,
'location_dest_id': self.stock_location.id,
'product_uom': self.uom_unit.id,
'product_uom_qty': 2,
'price_unit': 10,
'picking_type_id': self.picking_type_in.id,
'picking_id': picking.id
})
picking.action_confirm()
# set quantity done only on one move
in_move.move_line_ids.qty_done = 2
res_dict = picking.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
res_dict_for_back_order = wizard.process()
self.assertTrue(product2.stock_valuation_layer_ids)
self.assertFalse(product1.stock_valuation_layer_ids)
class TestStockValuationAVCO(TestStockValuationCommon):
def setUp(self):
super(TestStockValuationAVCO, self).setUp()
self.product1.product_tmpl_id.categ_id.property_cost_method = 'average'
def test_normal_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
self.assertEqual(self.product1.standard_price, 10)
self.assertEqual(move1.stock_valuation_layer_ids.value, 100)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
self.assertEqual(self.product1.standard_price, 15)
self.assertEqual(move2.stock_valuation_layer_ids.value, 200)
move3 = self._make_out_move(self.product1, 15)
self.assertEqual(self.product1.standard_price, 15)
self.assertEqual(move3.stock_valuation_layer_ids.value, -225)
self.assertEqual(self.product1.value_svl, 75)
self.assertEqual(self.product1.quantity_svl, 5)
def test_change_in_past_increase_in_1(self):
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move3 = self._make_out_move(self.product1, 15)
move1.move_line_ids.qty_done = 15
self.assertEqual(self.product1.value_svl, 125)
self.assertEqual(self.product1.quantity_svl, 10)
def test_change_in_past_decrease_in_1(self):
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move3 = self._make_out_move(self.product1, 15)
move1.move_line_ids.qty_done = 5
self.assertEqual(self.product1.value_svl, 0)
self.assertEqual(self.product1.quantity_svl, 0)
def test_change_in_past_add_ml_in_1(self):
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move3 = self._make_out_move(self.product1, 15)
self.env['stock.move.line'].create({
'move_id': move1.id,
'product_id': move1.product_id.id,
'qty_done': 5,
'product_uom_id': move1.product_uom.id,
'location_id': move1.location_id.id,
'location_dest_id': move1.location_dest_id.id,
})
self.assertEqual(self.product1.value_svl, 125)
self.assertEqual(self.product1.quantity_svl, 10)
self.assertEqual(self.product1.standard_price, 12.5)
def test_change_in_past_add_move_in_1(self):
move1 = self._make_in_move(self.product1, 10, unit_cost=10, create_picking=True)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move3 = self._make_out_move(self.product1, 15)
self.env['stock.move.line'].create({
'product_id': move1.product_id.id,
'qty_done': 5,
'product_uom_id': move1.product_uom.id,
'location_id': move1.location_id.id,
'location_dest_id': move1.location_dest_id.id,
'state': 'done',
'picking_id': move1.picking_id.id,
})
self.assertEqual(self.product1.value_svl, 150)
self.assertEqual(self.product1.quantity_svl, 10)
self.assertEqual(self.product1.standard_price, 15)
def test_change_in_past_increase_out_1(self):
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move3 = self._make_out_move(self.product1, 15)
move3.move_line_ids.qty_done = 20
self.assertEqual(self.product1.value_svl, 0)
self.assertEqual(self.product1.quantity_svl, 0)
self.assertEqual(self.product1.standard_price, 15)
def test_change_in_past_decrease_out_1(self):
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move3 = self._make_out_move(self.product1, 15)
move3.move_line_ids.qty_done = 10
self.assertEqual(sum(self.product1.stock_valuation_layer_ids.mapped('remaining_qty')), 10)
self.assertEqual(self.product1.value_svl, 150)
self.assertEqual(self.product1.quantity_svl, 10)
self.assertEqual(self.product1.standard_price, 15)
def test_negative_1(self):
""" Ensures that, in AVCO, the `remaining_qty` field is computed and the vacuum is ran
when necessary.
"""
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move3 = self._make_out_move(self.product1, 30)
self.assertEqual(move3.stock_valuation_layer_ids.remaining_qty, -10)
move4 = self._make_in_move(self.product1, 10, unit_cost=30)
self.assertEqual(sum(self.product1.stock_valuation_layer_ids.mapped('remaining_qty')), 0)
move5 = self._make_in_move(self.product1, 10, unit_cost=40)
self.assertEqual(self.product1.value_svl, 400)
self.assertEqual(self.product1.quantity_svl, 10)
def test_negative_2(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
self.product1.standard_price = 10
move1 = self._make_out_move(self.product1, 1, force_assign=True)
move2 = self._make_in_move(self.product1, 1, unit_cost=15)
self.assertEqual(self.product1.value_svl, 0)
self.assertEqual(self.product1.quantity_svl, 0)
def test_negative_3(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_out_move(self.product1, 2, force_assign=True)
self.assertEqual(move1.stock_valuation_layer_ids.value, 0)
move2 = self._make_in_move(self.product1, 20, unit_cost=3.33)
self.assertEqual(move1.stock_valuation_layer_ids[1].value, -6.66)
self.assertEqual(self.product1.standard_price, 3.33)
self.assertEqual(self.product1.value_svl, 59.94)
self.assertEqual(self.product1.quantity_svl, 18)
def test_return_receipt_1(self):
move1 = self._make_in_move(self.product1, 1, unit_cost=10, create_picking=True)
move2 = self._make_in_move(self.product1, 1, unit_cost=20)
move3 = self._make_out_move(self.product1, 1)
move4 = self._make_return(move1, 1)
self.assertEqual(self.product1.value_svl, 0)
self.assertEqual(self.product1.quantity_svl, 0)
self.assertEqual(self.product1.standard_price, 15)
def test_return_delivery_1(self):
move1 = self._make_in_move(self.product1, 1, unit_cost=10)
move2 = self._make_in_move(self.product1, 1, unit_cost=20)
move3 = self._make_out_move(self.product1, 1, create_picking=True)
move4 = self._make_return(move3, 1)
self.assertEqual(self.product1.value_svl, 30)
self.assertEqual(self.product1.quantity_svl, 2)
self.assertEqual(self.product1.standard_price, 15)
self.assertEqual(sum(self.product1.stock_valuation_layer_ids.mapped('remaining_qty')), 2)
def test_rereturn_receipt_1(self):
move1 = self._make_in_move(self.product1, 1, unit_cost=10, create_picking=True)
move2 = self._make_in_move(self.product1, 1, unit_cost=20)
move3 = self._make_out_move(self.product1, 1)
move4 = self._make_return(move1, 1) # -15, current avco
move5 = self._make_return(move4, 1) # +10, original move's price unit
self.assertEqual(self.product1.value_svl, 15)
self.assertEqual(self.product1.quantity_svl, 1)
self.assertEqual(self.product1.standard_price, 15)
self.assertEqual(sum(self.product1.stock_valuation_layer_ids.mapped('remaining_qty')), 1)
def test_rereturn_delivery_1(self):
move1 = self._make_in_move(self.product1, 1, unit_cost=10)
move2 = self._make_in_move(self.product1, 1, unit_cost=20)
move3 = self._make_out_move(self.product1, 1, create_picking=True)
move4 = self._make_return(move3, 1)
move5 = self._make_return(move4, 1)
self.assertEqual(self.product1.value_svl, 15)
self.assertEqual(self.product1.quantity_svl, 1)
self.assertEqual(self.product1.standard_price, 15)
self.assertEqual(sum(self.product1.stock_valuation_layer_ids.mapped('remaining_qty')), 1)
def test_dropship_1(self):
move1 = self._make_in_move(self.product1, 1, unit_cost=10)
move2 = self._make_in_move(self.product1, 1, unit_cost=20)
move3 = self._make_dropship_move(self.product1, 1, unit_cost=10)
self.assertEqual(self.product1.value_svl, 30)
self.assertEqual(self.product1.quantity_svl, 2)
self.assertEqual(self.product1.standard_price, 15)
def test_rounding_slv_1(self):
move1 = self._make_in_move(self.product1, 1, unit_cost=1.00)
move2 = self._make_in_move(self.product1, 1, unit_cost=1.00)
move3 = self._make_in_move(self.product1, 1, unit_cost=1.01)
self.assertAlmostEqual(self.product1.value_svl, 3.01)
move_out = self._make_out_move(self.product1, 3, create_picking=True)
self.assertIn('Rounding Adjustment: -0.01', move_out.stock_valuation_layer_ids.description)
self.assertEqual(self.product1.value_svl, 0)
self.assertEqual(self.product1.quantity_svl, 0)
self.assertEqual(self.product1.standard_price, 1.00)
def test_rounding_slv_2(self):
self._make_in_move(self.product1, 1, unit_cost=1.02)
self._make_in_move(self.product1, 1, unit_cost=1.00)
self._make_in_move(self.product1, 1, unit_cost=1.00)
self.assertAlmostEqual(self.product1.value_svl, 3.02)
move_out = self._make_out_move(self.product1, 3, create_picking=True)
self.assertIn('Rounding Adjustment: +0.01', move_out.stock_valuation_layer_ids.description)
self.assertEqual(self.product1.value_svl, 0)
self.assertEqual(self.product1.quantity_svl, 0)
self.assertEqual(self.product1.standard_price, 1.01)
class TestStockValuationFIFO(TestStockValuationCommon):
def setUp(self):
super(TestStockValuationFIFO, self).setUp()
self.product1.product_tmpl_id.categ_id.property_cost_method = 'fifo'
def test_normal_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move3 = self._make_out_move(self.product1, 15)
self.assertEqual(self.product1.value_svl, 100)
self.assertEqual(self.product1.quantity_svl, 5)
self.assertEqual(sum(self.product1.stock_valuation_layer_ids.mapped('remaining_qty')), 5)
def test_negative_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move3 = self._make_out_move(self.product1, 30)
self.assertEqual(move3.stock_valuation_layer_ids.remaining_qty, -10)
move4 = self._make_in_move(self.product1, 10, unit_cost=30)
self.assertEqual(sum(self.product1.stock_valuation_layer_ids.mapped('remaining_qty')), 0)
move5 = self._make_in_move(self.product1, 10, unit_cost=40)
self.assertEqual(self.product1.value_svl, 400)
self.assertEqual(self.product1.quantity_svl, 10)
def test_change_in_past_decrease_in_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 20, unit_cost=10)
move2 = self._make_out_move(self.product1, 10)
move1.move_line_ids.qty_done = 10
self.assertEqual(self.product1.value_svl, 0)
self.assertEqual(self.product1.quantity_svl, 0)
def test_change_in_past_decrease_in_2(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 20, unit_cost=10)
move2 = self._make_out_move(self.product1, 10)
move3 = self._make_out_move(self.product1, 10)
move1.move_line_ids.qty_done = 10
move4 = self._make_in_move(self.product1, 20, unit_cost=15)
self.assertEqual(self.product1.value_svl, 150)
self.assertEqual(self.product1.quantity_svl, 10)
def test_change_in_past_increase_in_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_in_move(self.product1, 10, unit_cost=15)
move3 = self._make_out_move(self.product1, 20)
move1.move_line_ids.qty_done = 20
self.assertEqual(self.product1.value_svl, 100)
self.assertEqual(self.product1.quantity_svl, 10)
def test_change_in_past_increase_in_2(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_in_move(self.product1, 10, unit_cost=12)
move3 = self._make_out_move(self.product1, 15)
move4 = self._make_out_move(self.product1, 20)
move5 = self._make_in_move(self.product1, 100, unit_cost=15)
move1.move_line_ids.qty_done = 20
self.assertEqual(self.product1.value_svl, 1375)
self.assertEqual(self.product1.quantity_svl, 95)
def test_change_in_past_increase_out_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 20, unit_cost=10)
move2 = self._make_out_move(self.product1, 10)
move3 = self._make_in_move(self.product1, 20, unit_cost=15)
move2.move_line_ids.qty_done = 25
self.assertEqual(self.product1.value_svl, 225)
self.assertEqual(self.product1.quantity_svl, 15)
self.assertEqual(sum(self.product1.stock_valuation_layer_ids.mapped('remaining_qty')), 15)
def test_change_in_past_decrease_out_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 20, unit_cost=10)
move2 = self._make_out_move(self.product1, 15)
move3 = self._make_in_move(self.product1, 20, unit_cost=15)
move2.move_line_ids.qty_done = 5
self.assertEqual(self.product1.value_svl, 450)
self.assertEqual(self.product1.quantity_svl, 35)
self.assertEqual(sum(self.product1.stock_valuation_layer_ids.mapped('remaining_qty')), 35)
def test_change_in_past_add_ml_out_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 20, unit_cost=10)
move2 = self._make_out_move(self.product1, 10)
move3 = self._make_in_move(self.product1, 20, unit_cost=15)
self.env['stock.move.line'].create({
'move_id': move2.id,
'product_id': move2.product_id.id,
'qty_done': 5,
'product_uom_id': move2.product_uom.id,
'location_id': move2.location_id.id,
'location_dest_id': move2.location_dest_id.id,
})
self.assertEqual(self.product1.value_svl, 350)
self.assertEqual(self.product1.quantity_svl, 25)
self.assertEqual(sum(self.product1.stock_valuation_layer_ids.mapped('remaining_qty')), 25)
def test_return_delivery_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_out_move(self.product1, 10, create_picking=True)
move3 = self._make_in_move(self.product1, 10, unit_cost=20)
move4 = self._make_return(move2, 10)
self.assertEqual(self.product1.value_svl, 300)
self.assertEqual(self.product1.quantity_svl, 20)
def test_return_receipt_1(self):
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10, unit_cost=10, create_picking=True)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move3 = self._make_return(move1, 2)
self.assertEqual(self.product1.value_svl, 280)
self.assertEqual(self.product1.quantity_svl, 18)
def test_rereturn_receipt_1(self):
move1 = self._make_in_move(self.product1, 1, unit_cost=10, create_picking=True)
move2 = self._make_in_move(self.product1, 1, unit_cost=20)
move3 = self._make_out_move(self.product1, 1)
move4 = self._make_return(move1, 1)
move5 = self._make_return(move4, 1)
self.assertEqual(self.product1.value_svl, 20)
self.assertEqual(self.product1.quantity_svl, 1)
def test_rereturn_delivery_1(self):
move1 = self._make_in_move(self.product1, 1, unit_cost=10)
move2 = self._make_in_move(self.product1, 1, unit_cost=20)
move3 = self._make_out_move(self.product1, 1, create_picking=True)
move4 = self._make_return(move3, 1)
move5 = self._make_return(move4, 1)
self.assertEqual(self.product1.value_svl, 10)
self.assertEqual(self.product1.quantity_svl, 1)
def test_dropship_1(self):
orig_standard_price = self.product1.standard_price
move1 = self._make_in_move(self.product1, 1, unit_cost=10)
move2 = self._make_in_move(self.product1, 1, unit_cost=20)
move3 = self._make_dropship_move(self.product1, 1, unit_cost=10)
self.assertEqual(self.product1.value_svl, 30)
self.assertEqual(self.product1.quantity_svl, 2)
self.assertEqual(orig_standard_price, self.product1.standard_price)
class TestStockValuationChangeCostMethod(TestStockValuationCommon):
def test_standard_to_fifo_1(self):
""" The accounting impact of this cost method change is neutral.
"""
self.product1.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
self.product1.product_tmpl_id.standard_price = 10
move1 = self._make_in_move(self.product1, 10)
move2 = self._make_in_move(self.product1, 10)
move3 = self._make_out_move(self.product1, 1)
self.product1.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self.assertEqual(self.product1.value_svl, 190)
self.assertEqual(self.product1.quantity_svl, 19)
self.assertEqual(len(self.product1.stock_valuation_layer_ids), 5)
for svl in self.product1.stock_valuation_layer_ids.sorted()[-2:]:
self.assertEqual(svl.description, 'Costing method change for product category All: from standard to fifo.')
def test_standard_to_fifo_2(self):
""" We want the same result as `test_standard_to_fifo_1` but by changing the category of
`self.product1` to another one, not changing the current one.
"""
self.product1.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
self.product1.product_tmpl_id.standard_price = 10
move1 = self._make_in_move(self.product1, 10)
move2 = self._make_in_move(self.product1, 10)
move3 = self._make_out_move(self.product1, 1)
cat2 = self.env['product.category'].create({'name': 'fifo'})
cat2.property_cost_method = 'fifo'
self.product1.product_tmpl_id.categ_id = cat2
self.assertEqual(self.product1.value_svl, 190)
self.assertEqual(self.product1.quantity_svl, 19)
self.assertEqual(len(self.product1.stock_valuation_layer_ids), 5)
def test_avco_to_fifo(self):
""" The accounting impact of this cost method change is neutral.
"""
self.product1.product_tmpl_id.categ_id.property_cost_method = 'average'
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move3 = self._make_out_move(self.product1, 1)
self.product1.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self.assertEqual(self.product1.value_svl, 285)
self.assertEqual(self.product1.quantity_svl, 19)
def test_fifo_to_standard(self):
""" The accounting impact of this cost method change is not neutral as we will use the last
fifo price as the new standard price.
"""
self.product1.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move3 = self._make_out_move(self.product1, 1)
self.product1.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.assertEqual(self.product1.value_svl, 380)
self.assertEqual(self.product1.quantity_svl, 19)
def test_fifo_to_avco(self):
""" The accounting impact of this cost method change is not neutral as we will use the last
fifo price as the new AVCO.
"""
self.product1.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move3 = self._make_out_move(self.product1, 1)
self.product1.product_tmpl_id.categ_id.property_cost_method = 'average'
self.assertEqual(self.product1.value_svl, 380)
self.assertEqual(self.product1.quantity_svl, 19)
def test_avco_to_standard(self):
""" The accounting impact of this cost method change is neutral.
"""
self.product1.product_tmpl_id.categ_id.property_cost_method = 'average'
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move3 = self._make_out_move(self.product1, 1)
self.product1.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.assertEqual(self.product1.value_svl, 285)
self.assertEqual(self.product1.quantity_svl, 19)
def test_standard_to_avco(self):
""" The accounting impact of this cost method change is neutral.
"""
self.product1.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
self.product1.product_tmpl_id.standard_price = 10
move1 = self._make_in_move(self.product1, 10)
move2 = self._make_in_move(self.product1, 10)
move3 = self._make_out_move(self.product1, 1)
self.product1.product_tmpl_id.categ_id.property_cost_method = 'average'
self.assertEqual(self.product1.value_svl, 190)
self.assertEqual(self.product1.quantity_svl, 19)
class TestStockValuationChangeValuation(TestStockValuationCommon):
@classmethod
def setUpClass(cls):
super(TestStockValuationChangeValuation, cls).setUpClass()
cls.stock_input_account, cls.stock_output_account, cls.stock_valuation_account, cls.expense_account, cls.stock_journal = _create_accounting_data(cls.env)
cls.product1.categ_id.property_valuation = 'real_time'
cls.product1.write({
'property_account_expense_id': cls.expense_account.id,
})
cls.product1.categ_id.write({
'property_stock_account_input_categ_id': cls.stock_input_account.id,
'property_stock_account_output_categ_id': cls.stock_output_account.id,
'property_stock_valuation_account_id': cls.stock_valuation_account.id,
'property_stock_journal': cls.stock_journal.id,
})
def test_standard_manual_to_auto_1(self):
self.product1.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
self.product1.product_tmpl_id.standard_price = 10
move1 = self._make_in_move(self.product1, 10)
self.assertEqual(self.product1.value_svl, 100)
self.assertEqual(self.product1.quantity_svl, 10)
self.assertEqual(len(self.product1.stock_valuation_layer_ids.mapped('account_move_id')), 0)
self.assertEqual(len(self.product1.stock_valuation_layer_ids), 1)
self.product1.product_tmpl_id.categ_id.property_valuation = 'real_time'
self.assertEqual(self.product1.value_svl, 100)
self.assertEqual(self.product1.quantity_svl, 10)
# An accounting entry should only be created for the replenish now that the category is perpetual.
self.assertEqual(len(self.product1.stock_valuation_layer_ids.mapped('account_move_id')), 1)
self.assertEqual(len(self.product1.stock_valuation_layer_ids), 3)
for svl in self.product1.stock_valuation_layer_ids.sorted()[-2:]:
self.assertEqual(svl.description, 'Valuation method change for product category All: from manual_periodic to real_time.')
def test_standard_manual_to_auto_2(self):
self.product1.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
self.product1.product_tmpl_id.standard_price = 10
move1 = self._make_in_move(self.product1, 10)
self.assertEqual(self.product1.value_svl, 100)
self.assertEqual(self.product1.quantity_svl, 10)
self.assertEqual(len(self.product1.stock_valuation_layer_ids.mapped('account_move_id')), 0)
self.assertEqual(len(self.product1.stock_valuation_layer_ids), 1)
cat2 = self.env['product.category'].create({'name': 'standard auto'})
cat2.property_cost_method = 'standard'
cat2.property_valuation = 'real_time'
cat2.write({
'property_stock_account_input_categ_id': self.stock_input_account.id,
'property_stock_account_output_categ_id': self.stock_output_account.id,
'property_stock_valuation_account_id': self.stock_valuation_account.id,
'property_stock_journal': self.stock_journal.id,
})
# Try to change the product category with a `default_type` key in the context and
# check it doesn't break the account move generation.
self.product1.with_context(default_type='product').categ_id = cat2
self.assertEqual(self.product1.categ_id, cat2)
self.assertEqual(self.product1.value_svl, 100)
self.assertEqual(self.product1.quantity_svl, 10)
# An accounting entry should only be created for the replenish now that the category is perpetual.
self.assertEqual(len(self.product1.stock_valuation_layer_ids.mapped('account_move_id')), 1)
self.assertEqual(len(self.product1.stock_valuation_layer_ids), 3)
def test_standard_auto_to_manual_1(self):
self.product1.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.product1.product_tmpl_id.categ_id.property_valuation = 'real_time'
self.product1.product_tmpl_id.standard_price = 10
move1 = self._make_in_move(self.product1, 10)
self.assertEqual(self.product1.value_svl, 100)
self.assertEqual(self.product1.quantity_svl, 10)
self.assertEqual(len(self.product1.stock_valuation_layer_ids.mapped('account_move_id')), 1)
self.assertEqual(len(self.product1.stock_valuation_layer_ids), 1)
self.product1.product_tmpl_id.categ_id.property_valuation = 'manual_periodic'
self.assertEqual(self.product1.value_svl, 100)
self.assertEqual(self.product1.quantity_svl, 10)
# An accounting entry should only be created for the emptying now that the category is manual.
self.assertEqual(len(self.product1.stock_valuation_layer_ids.mapped('account_move_id')), 2)
self.assertEqual(len(self.product1.stock_valuation_layer_ids), 3)
def test_standard_auto_to_manual_2(self):
self.product1.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.product1.product_tmpl_id.categ_id.property_valuation = 'real_time'
self.product1.product_tmpl_id.standard_price = 10
move1 = self._make_in_move(self.product1, 10)
self.assertEqual(self.product1.value_svl, 100)
self.assertEqual(self.product1.quantity_svl, 10)
self.assertEqual(len(self.product1.stock_valuation_layer_ids.mapped('account_move_id')), 1)
self.assertEqual(len(self.product1.stock_valuation_layer_ids), 1)
cat2 = self.env['product.category'].create({'name': 'fifo'})
cat2.property_cost_method = 'standard'
cat2.property_valuation = 'manual_periodic'
self.product1.with_context(debug=True).categ_id = cat2
self.assertEqual(self.product1.value_svl, 100)
self.assertEqual(self.product1.quantity_svl, 10)
# An accounting entry should only be created for the emptying now that the category is manual.
self.assertEqual(len(self.product1.stock_valuation_layer_ids.mapped('account_move_id')), 2)
self.assertEqual(len(self.product1.stock_valuation_layer_ids), 3)
| agpl-3.0 | 3,227,380,840,484,385,300 | 46.200888 | 161 | 0.652041 | false |
hassanibi/erpnext | erpnext/config/stock.py | 1 | 7150 | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Stock Transactions"),
"items": [
{
"type": "doctype",
"name": "Stock Entry",
"description": _("Record item movement."),
},
{
"type": "doctype",
"name": "Delivery Note",
"description": _("Shipments to customers."),
},
{
"type": "doctype",
"name": "Purchase Receipt",
"description": _("Goods received from Suppliers."),
},
{
"type": "doctype",
"name": "Material Request",
"description": _("Requests for items."),
},
]
},
{
"label": _("Stock Reports"),
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Stock Ledger",
"doctype": "Stock Ledger Entry",
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Balance",
"doctype": "Stock Ledger Entry"
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Projected Qty",
"doctype": "Item",
},
{
"type": "page",
"name": "stock-balance",
"label": _("Stock Summary")
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Ageing",
"doctype": "Item",
},
]
},
{
"label": _("Items and Pricing"),
"items": [
{
"type": "doctype",
"name": "Item",
"description": _("All Products or Services."),
},
{
"type": "doctype",
"name": "Product Bundle",
"description": _("Bundle items at time of sale."),
},
{
"type": "doctype",
"name": "Price List",
"description": _("Price List master.")
},
{
"type": "doctype",
"name": "Item Group",
"icon": "fa fa-sitemap",
"label": _("Item Group"),
"link": "Tree/Item Group",
"description": _("Tree of Item Groups."),
},
{
"type": "doctype",
"name": "Item Price",
"description": _("Multiple Item prices."),
"route": "Report/Item Price"
},
{
"type": "doctype",
"name": "Shipping Rule",
"description": _("Rules for adding shipping costs.")
},
{
"type": "doctype",
"name": "Pricing Rule",
"description": _("Rules for applying pricing and discount.")
},
]
},
{
"label": _("Serial No and Batch"),
"items": [
{
"type": "doctype",
"name": "Serial No",
"description": _("Single unit of an Item."),
},
{
"type": "doctype",
"name": "Batch",
"description": _("Batch (lot) of an Item."),
},
{
"type": "doctype",
"name": "Installation Note",
"description": _("Installation record for a Serial No.")
},
{
"type": "report",
"name": "Serial No Service Contract Expiry",
"doctype": "Serial No"
},
{
"type": "report",
"name": "Serial No Status",
"doctype": "Serial No"
},
{
"type": "report",
"name": "Serial No Warranty Expiry",
"doctype": "Serial No"
},
]
},
{
"label": _("Tools"),
"icon": "fa fa-wrench",
"items": [
{
"type": "doctype",
"name": "Stock Reconciliation",
"description": _("Upload stock balance via csv.")
},
{
"type": "doctype",
"name": "Packing Slip",
"description": _("Split Delivery Note into packages.")
},
{
"type": "doctype",
"name": "Quality Inspection",
"description": _("Incoming quality inspection.")
},
{
"type": "doctype",
"name": "Landed Cost Voucher",
"description": _("Update additional costs to calculate landed cost of items"),
}
]
},
{
"label": _("Setup"),
"icon": "fa fa-cog",
"items": [
{
"type": "doctype",
"name": "Stock Settings",
"description": _("Default settings for stock transactions.")
},
{
"type": "doctype",
"name": "Warehouse",
"description": _("Where items are stored."),
},
{
"type": "doctype",
"name": "UOM",
"label": _("Unit of Measure") + " (UOM)",
"description": _("e.g. Kg, Unit, Nos, m")
},
{
"type": "doctype",
"name": "Item Attribute",
"description": _("Attributes for Item Variants. e.g Size, Color etc."),
},
{
"type": "doctype",
"name": "Brand",
"description": _("Brand master.")
},
]
},
{
"label": _("Analytics"),
"icon": "fa fa-table",
"items": [
{
"type": "report",
"is_query_report": False,
"name": "Item-wise Price List Rate",
"doctype": "Item Price",
},
{
"type": "page",
"name": "stock-analytics",
"label": _("Stock Analytics"),
"icon": "fa fa-bar-chart"
},
{
"type": "report",
"is_query_report": True,
"name": "Delivery Note Trends",
"doctype": "Delivery Note"
},
{
"type": "report",
"is_query_report": True,
"name": "Purchase Receipt Trends",
"doctype": "Purchase Receipt"
},
]
},
{
"label": _("Reports"),
"icon": "fa fa-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Ordered Items To Be Delivered",
"doctype": "Delivery Note"
},
{
"type": "report",
"is_query_report": True,
"name": "Purchase Order Items To Be Received",
"doctype": "Purchase Receipt"
},
{
"type": "report",
"name": "Item Shortage Report",
"route": "Report/Bin/Item Shortage Report",
"doctype": "Purchase Receipt"
},
{
"type": "report",
"is_query_report": True,
"name": "Requested Items To Be Transferred",
"doctype": "Material Request"
},
{
"type": "report",
"is_query_report": True,
"name": "Batch-Wise Balance History",
"doctype": "Batch"
},
{
"type": "report",
"is_query_report": True,
"name": "Batch Item Expiry Status",
"doctype": "Stock Ledger Entry"
},
{
"type": "report",
"is_query_report": True,
"name": "Item Prices",
"doctype": "Price List"
},
{
"type": "report",
"is_query_report": True,
"name": "Itemwise Recommended Reorder Level",
"doctype": "Item"
},
]
},
# {
# "label": _("Help"),
# "icon": "fa fa-facetime-video",
# "items": [
# {
# "type": "help",
# "label": _("Items and Pricing"),
# "youtube_id": "qXaEwld4_Ps"
# },
# {
# "type": "help",
# "label": _("Item Variants"),
# "youtube_id": "OGBETlCzU5o"
# },
# {
# "type": "help",
# "label": _("Opening Stock Balance"),
# "youtube_id": "0yPgrtfeCTs"
# },
# {
# "type": "help",
# "label": _("Making Stock Entries"),
# "youtube_id": "Njt107hlY3I"
# },
# {
# "type": "help",
# "label": _("Serialized Inventory"),
# "youtube_id": "gvOVlEwFDAk"
# },
# {
# "type": "help",
# "label": _("Batch Inventory"),
# "youtube_id": "J0QKl7ABPKM"
# },
# {
# "type": "help",
# "label": _("Managing Subcontracting"),
# "youtube_id": "ThiMCC2DtKo"
# },
# ]
# }
]
| gpl-3.0 | 6,314,041,835,165,396,000 | 20.666667 | 83 | 0.482378 | false |
prheenan/BioModel | WormLikeChain/Python/TestExamples/util.py | 1 | 5189 | # force floating point division. Can still use integer with //
from __future__ import division
# other good compatibility recquirements for python3
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
import FitUtil.WormLikeChain.Python.Code.WLC as WLC
_kbT = 4.11e-21
_K0 = 1200e-12
class test_object:
def __init__(self,name,max_force_N,ext=None,**kwargs):
if (ext is None):
L0 = dict(**kwargs)["L0"]
ext = np.linspace(0,L0,500)
self.name = name
self.max_force_N = max_force_N
self.ext = ext
self.param_values = dict(**kwargs)
def GetBullData(StepInNm=0.05):
"""
Returns samples from first unfold of Figure S2.a
http://pubs.acs.org/doi/suppl/10.1021/nn5010588
Bull, Matthew S., Ruby May A. Sullan, Hongbin Li, and Thomas T. Perkins.
"Improved Single Molecule Force Spectroscopy Using Micromachined Cantilevers"
"""
# get the extensions used
maxXnm = 19
nSteps= maxXnm/StepInNm
x = np.linspace(0,maxXnm,num=nSteps) * 1e-9
L0 = 0.34e-9 * 64
"""
# note: from Supplemental, pp 14 of
Edwards, Devin T., Jaevyn K. Faulk et al
"Optimizing 1-mus-Resolution Single-Molecule Force Spectroscopy..."
"""
Lp = 0.4e-9
ParamValues = dict(kbT = 4.11e-21,L0 = L0,
Lp = Lp,K0 = 1318.e-12)
Name = "Bull_2014_FigureS2"
noiseN = 6.8e-12
expectedMax=80e-12
return test_object(name="Bull",max_force_N=expectedMax,ext=x,**ParamValues)
def GetBouichatData(StepInNm=1):
"""
Returns samples from data from Figure 1
From "Estimating the Persistence Length of a Worm-Like Chain Molecule ..."
C. Bouchiat, M.D. Wang, et al.
Biophysical Journal Volume 76, Issue 1, January 1999, Pages 409-413
web.mit.edu/cortiz/www/3.052/3.052CourseReader/38_BouchiatBiophysicalJ1999.pdf
Returns:
tuple of <z,F> in SI units
"""
# upper and lower bound is taken from Figure 1, note nm scale
maxExtNm = 1335
# figure out the number of steps at this interpolation
nSteps = int(np.ceil(maxExtNm/StepInNm))
# get all the extension values
x = np.linspace(start=0,stop=maxExtNm,num=nSteps,endpoint=True) * 1e-9
# write down their parameter values, figure 1 inset
ParamValues = dict(kbT = 4.11e-21,L0 = 1317.52e-9,
Lp = 40.6e-9,K0 = 1318.e-12)
# the expected maximum fitted force is also from figure 1
expectedMax = 48e-12
Name = "Bouchiat_1999_Figure1"
return test_object(name=Name,max_force_N=expectedMax,ext=x,**ParamValues)
def get_fitting_parameters_with_noise(ext_pred,force_grid,params_fit,
noise_amplitude_N,ranges,Ns=10,
**brute_kwargs):
"""
Gets the fitting parameters after corrupting a known signal with noise
Args:
ext_pred: the x values for the fitting
force_grid: the y values for the fitting
params_fit: to be passed to wc_contour
noise_amplitude_N: the (assumed uniformly normal) noise
ranges: for parameters to fit, passed to brute
Ns: number of points to use, for brute
**brute_kwargs: passed to scipy.optimize.brute
Returns:
tuple of <fit parameters, predicted y, noisy y>
"""
noise_unitless = (np.random.normal(size=force_grid.size))
noise_N = noise_amplitude_N * noise_unitless
force_noise = force_grid + noise_N
brute_dict = dict(ranges=ranges,Ns=Ns,**brute_kwargs)
x0,y = WLC.fit(separation=ext_pred,force=force_noise,
brute_dict=brute_dict,
**params_fit)
return x0,y,force_noise
def ssDNA_example_data():
params_ssDNA = dict(kbT=_kbT,K0=_K0,L0=60e-9,Lp=0.7e-9)
to_ret = test_object(name="ssDNA",
max_force_N=65e-12,
**params_ssDNA)
return to_ret
def dsDNA_example_data():
params_dsDNA = dict(kbT=_kbT,K0=_K0,L0=500e-9,Lp=50e-9)
to_ret = test_object(name="dsDNA",
max_force_N=65e-12,
**params_dsDNA)
return to_ret
def protein_example_data():
params_protein = dict(kbT=_kbT,K0=_K0,L0=40e-9,Lp=0.3e-9)
to_ret = test_object(name="protein",
max_force_N=100e-12,
**params_protein)
return to_ret
def get_ext_and_force(test_obj):
ext = test_obj.ext
param_values = test_obj.param_values
force = np.linspace(0,test_obj.max_force_N,ext.size)
ext_pred,force_grid = WLC.SeventhOrderExtAndForceGrid(F=force,
**param_values)
idx_finite = np.where(np.isfinite(ext_pred))[0]
idx_good = np.where( (ext_pred[idx_finite] >= min(ext)) &
(ext_pred[idx_finite] <= max(ext)) )[0]
ext_pred = ext_pred[idx_finite[idx_good]]
force_grid = force_grid[idx_finite[idx_good]]
return ext_pred,force_grid
| gpl-2.0 | -4,838,821,080,969,498,000 | 36.601449 | 79 | 0.619002 | false |
algorhythms/LeetCode | 934 Shortest Bridge.py | 1 | 2622 | #!/usr/bin/python3
"""
In a given 2D binary array A, there are two islands. (An island is a
4-directionally connected group of 1s not connected to any other 1s.)
Now, we may change 0s to 1s so as to connect the two islands together to form 1
island.
Return the smallest number of 0s that must be flipped. (It is guaranteed that
the answer is at least 1.)
Example 1:
Input: [[0,1],[1,0]]
Output: 1
Example 2:
Input: [[0,1,0],[0,0,0],[0,0,1]]
Output: 2
Example 3:
Input: [[1,1,1,1,1],[1,0,0,0,1],[1,0,1,0,1],[1,0,0,0,1],[1,1,1,1,1]]
Output: 1
Note:
1 <= A.length = A[0].length <= 100
A[i][j] == 0 or A[i][j] == 1
"""
from typing import List
dirs = ((0, -1), (0, 1), (-1, 0), (1, 0))
class Solution:
def shortestBridge(self, A: List[List[int]]) -> int:
"""
market component 1 and component 2
iterate 0 and BFS, min(dist1 + dist2 - 1)?
O(N * N) high complexity
BFS grow from 1 component
"""
m, n = len(A), len(A[0])
# coloring
colors = [[None for _ in range(n)] for _ in range(m)]
color = 0
for i in range(m):
for j in range(n):
if A[i][j] == 1 and colors[i][j] is None:
self.dfs(A, i, j, colors, color)
color += 1
assert color == 2
# BFS
step = 0
q = []
visited = [[False for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
if colors[i][j] == 0:
visited[i][j] = True
q.append((i, j))
while q:
cur_q = []
for i, j in q:
for I, J in self.nbr(A, i, j):
if not visited[I][J]:
if colors[I][J] == None:
visited[I][J] = True # pre-check, dedup
cur_q.append((I, J))
elif colors[I][J] == 1:
return step
step += 1
q = cur_q
raise
def nbr(self, A, i, j):
m, n = len(A), len(A[0])
for di, dj in dirs:
I = i + di
J = j + dj
if 0 <= I < m and 0 <= J < n:
yield I, J
def dfs(self, A, i, j, colors, color):
colors[i][j] = color
for I, J in self.nbr(A, i, j):
if colors[I][J] is None and A[I][J] == 1:
self.dfs(A, I, J, colors, color)
if __name__ == "__main__":
assert Solution().shortestBridge([[1,1,1,1,1],[1,0,0,0,1],[1,0,1,0,1],[1,0,0,0,1],[1,1,1,1,1]]) == 1
| mit | -2,098,768,741,559,814,000 | 26.3125 | 104 | 0.450801 | false |
badele/home-assistant | homeassistant/components/zone.py | 1 | 4147 | """
homeassistant.components.zone
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Allows defintion of zones in Home Assistant.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zone/
"""
import logging
from homeassistant.const import (
ATTR_HIDDEN, ATTR_ICON, ATTR_LATITUDE, ATTR_LONGITUDE, CONF_NAME)
from homeassistant.helpers import extract_domain_configs, generate_entity_id
from homeassistant.helpers.entity import Entity
from homeassistant.util.location import distance
DOMAIN = "zone"
DEPENDENCIES = []
ENTITY_ID_FORMAT = 'zone.{}'
ENTITY_ID_HOME = ENTITY_ID_FORMAT.format('home')
STATE = 'zoning'
DEFAULT_NAME = 'Unnamed zone'
ATTR_RADIUS = 'radius'
DEFAULT_RADIUS = 100
ICON_HOME = 'mdi:home'
def active_zone(hass, latitude, longitude, radius=0):
""" Find the active zone for given latitude, longitude. """
# Sort entity IDs so that we are deterministic if equal distance to 2 zones
zones = (hass.states.get(entity_id) for entity_id
in sorted(hass.states.entity_ids(DOMAIN)))
min_dist = None
closest = None
for zone in zones:
zone_dist = distance(
latitude, longitude,
zone.attributes[ATTR_LATITUDE], zone.attributes[ATTR_LONGITUDE])
within_zone = zone_dist - radius < zone.attributes[ATTR_RADIUS]
closer_zone = closest is None or zone_dist < min_dist
smaller_zone = (zone_dist == min_dist and
zone.attributes[ATTR_RADIUS] <
closest.attributes[ATTR_RADIUS])
if within_zone and (closer_zone or smaller_zone):
min_dist = zone_dist
closest = zone
return closest
def in_zone(zone, latitude, longitude, radius=0):
""" Test if given latitude, longitude is in given zone. """
zone_dist = distance(
latitude, longitude,
zone.attributes[ATTR_LATITUDE], zone.attributes[ATTR_LONGITUDE])
return zone_dist - radius < zone.attributes[ATTR_RADIUS]
def setup(hass, config):
""" Setup zone. """
entities = set()
for key in extract_domain_configs(config, DOMAIN):
entries = config[key]
if not isinstance(entries, list):
entries = entries,
for entry in entries:
name = entry.get(CONF_NAME, DEFAULT_NAME)
latitude = entry.get(ATTR_LATITUDE)
longitude = entry.get(ATTR_LONGITUDE)
radius = entry.get(ATTR_RADIUS, DEFAULT_RADIUS)
icon = entry.get(ATTR_ICON)
if None in (latitude, longitude):
logging.getLogger(__name__).error(
'Each zone needs a latitude and longitude.')
continue
zone = Zone(hass, name, latitude, longitude, radius, icon)
zone.entity_id = generate_entity_id(ENTITY_ID_FORMAT, name,
entities)
zone.update_ha_state()
entities.add(zone.entity_id)
if ENTITY_ID_HOME not in entities:
zone = Zone(hass, hass.config.location_name, hass.config.latitude,
hass.config.longitude, DEFAULT_RADIUS, ICON_HOME)
zone.entity_id = ENTITY_ID_HOME
zone.update_ha_state()
return True
class Zone(Entity):
""" Represents a Zone in Home Assistant. """
# pylint: disable=too-many-arguments
def __init__(self, hass, name, latitude, longitude, radius, icon):
self.hass = hass
self._name = name
self.latitude = latitude
self.longitude = longitude
self.radius = radius
self._icon = icon
def should_poll(self):
return False
@property
def name(self):
return self._name
@property
def state(self):
""" The state property really does nothing for a zone. """
return STATE
@property
def icon(self):
return self._icon
@property
def state_attributes(self):
return {
ATTR_HIDDEN: True,
ATTR_LATITUDE: self.latitude,
ATTR_LONGITUDE: self.longitude,
ATTR_RADIUS: self.radius,
}
| mit | -537,197,058,929,668,000 | 29.270073 | 79 | 0.613456 | false |
tkensiski/rexus | Rexus/device_poller/devices/device_loader.py | 1 | 1483 | import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class DeviceLoader(object):
device_classes = {
# device_type : getattr(module, class_name)
}
def load_device_class(self, device_type, device_class):
logger.info('Loading class for device type: {device_type}'.format(
device_type=device_type.name
))
return self._load_device_class(device_type=device_type, device_class=device_class)
def _load_device_class(self, device_type, device_class):
if device_type in self.device_classes:
logger.debug('Class already loaded for: {device_type}'.format(
device_type=device_type.name
))
return self.device_classes[device_type]
# Build the module name
module_name = 'rexus.devices.{name}'.format(name=device_class.file)
# Import the module
module = __import__(module_name, fromlist=[device_class.klass])
# Get the class reference so we can use it later
loaded_class = getattr(module, device_class.klass)
# Memoize it for later
self._memoize_device_class(device_type=device_type, loaded_class=loaded_class)
# Pass it back so we can use it
return loaded_class
def _memoize_device_class(self, device_type, loaded_class):
if device_type in self.device_classes:
pass
self.device_classes[device_type] = loaded_class
| gpl-3.0 | 7,591,930,469,421,827,000 | 31.23913 | 90 | 0.641268 | false |
t11e/werkzeug | werkzeug/serving.py | 1 | 18996 | # -*- coding: utf-8 -*-
"""
werkzeug.serving
~~~~~~~~~~~~~~~~
There are many ways to serve a WSGI application. While you're developing
it you usually don't want a full blown webserver like Apache but a simple
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
the standard library. If you're using older versions of Python you can
download the package from the cheeseshop.
However there are some caveats. Sourcecode won't reload itself when
changed and each time you kill the server using ``^C`` you get an
`KeyboardInterrupt` error. While the latter is easy to solve the first
one can be a pain in the ass in some situations.
The easiest way is creating a small ``start-myproject.py`` that runs the
application::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from myproject import make_app
from werkzeug import run_simple
app = make_app(...)
run_simple('localhost', 8080, app, use_reloader=True)
You can also pass it a `extra_files` keyword argument with a list of
additional files (like configuration files) you want to observe.
For bigger applications you should consider using `werkzeug.script`
instead of a simple start file.
:copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import socket
import sys
import time
import thread
import subprocess
from urllib import unquote
from urlparse import urlparse
from itertools import chain
from SocketServer import ThreadingMixIn, ForkingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import werkzeug
from werkzeug._internal import _log
from werkzeug.exceptions import InternalServerError
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
"""A request handler that implements WSGI dispatching."""
@property
def server_version(self):
return 'Werkzeug/' + werkzeug.__version__
def make_environ(self):
path_info, query = urlparse(self.path)[2::2]
url_scheme = self.server.ssl_context is None and 'http' or 'https'
environ = {
'wsgi.version': (1, 0),
'wsgi.url_scheme': url_scheme,
'wsgi.input': self.rfile,
'wsgi.errors': sys.stderr,
'wsgi.multithread': self.server.multithread,
'wsgi.multiprocess': self.server.multiprocess,
'wsgi.run_once': False,
'SERVER_SOFTWARE': self.server_version,
'REQUEST_METHOD': self.command,
'SCRIPT_NAME': '',
'PATH_INFO': unquote(path_info),
'QUERY_STRING': query,
'CONTENT_TYPE': self.headers.get('Content-Type', ''),
'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
'REMOTE_ADDR': self.client_address[0],
'REMOTE_PORT': self.client_address[1],
'SERVER_NAME': self.server.server_address[0],
'SERVER_PORT': str(self.server.server_address[1]),
'SERVER_PROTOCOL': self.request_version
}
for key, value in self.headers.items():
key = 'HTTP_' + key.upper().replace('-', '_')
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
environ[key] = value
return environ
def run_wsgi(self):
app = self.server.app
environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, 'write() before start_response'
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
code, msg = status.split(None, 1)
self.send_response(int(code), msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if 'content-length' not in header_keys:
self.close_connection = True
self.send_header('Connection', 'close')
if 'server' not in header_keys:
self.send_header('Server', self.version_string())
if 'date' not in header_keys:
self.send_header('Date', self.date_time_string())
self.end_headers()
assert type(data) is str, 'applications must write bytes'
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
elif headers_set:
raise AssertionError('Headers already set')
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
# make sure the headers are sent
if not headers_sent:
write('')
finally:
if hasattr(application_iter, 'close'):
application_iter.close()
application_iter = None
try:
execute(app)
except (socket.error, socket.timeout), e:
self.connection_dropped(e, environ)
except:
if self.server.passthrough_errors:
raise
from werkzeug.debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except:
pass
self.server.log('error', 'Error on request:\n%s',
traceback.plaintext)
def handle(self):
"""Handles a request ignoring dropped connections."""
try:
return BaseHTTPRequestHandler.handle(self)
except (socket.error, socket.timeout), e:
self.connection_dropped(e)
except:
if self.server.ssl_context is None or not is_ssl_error():
raise
def connection_dropped(self, error, environ=None):
"""Called if the connection was closed by the client. By default
nothing happens.
"""
def handle_one_request(self):
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
elif self.parse_request():
return self.run_wsgi()
def send_response(self, code, message=None):
"""Send the response header and log the response code."""
self.log_request(code)
if message is None:
message = code in self.responses and self.responses[code][0] or ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
def version_string(self):
return BaseHTTPRequestHandler.version_string(self).strip()
def address_string(self):
return self.client_address[0]
def log_request(self, code='-', size='-'):
self.log('info', '"%s" %s %s', self.requestline, code, size)
def log_error(self, *args):
self.log('error', *args)
def log_message(self, format, *args):
self.log('info', format, *args)
def log(self, type, message, *args):
_log(type, '%s - - [%s] %s\n' % (self.address_string(),
self.log_date_time_string(),
message % args))
#: backwards compatible name if someone is subclassing it
BaseRequestHandler = WSGIRequestHandler
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
from random import random
from OpenSSL import crypto, SSL
cert = crypto.X509()
cert.set_serial_number(int(random() * sys.maxint))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
subject = cert.get_subject()
subject.CN = '*'
subject.O = 'Dummy Certificate'
issuer = cert.get_issuer()
issuer.CN = 'Untrusted Authority'
issuer.O = 'Self-Signed'
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 768)
cert.set_pubkey(pkey)
cert.sign(pkey, 'md5')
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_privatekey(pkey)
ctx.use_certificate(cert)
return ctx
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
if error is None:
error = sys.exc_info()[1]
from OpenSSL import SSL
return isinstance(error, SSL.Error)
class _SSLConnectionFix(object):
"""Wrapper around SSL connection to provide a working makefile()."""
def __init__(self, con):
self._con = con
def makefile(self, mode, bufsize):
return socket._fileobject(self._con, mode, bufsize)
def __getattr__(self, attrib):
return getattr(self._con, attrib)
class BaseWSGIServer(HTTPServer, object):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
def __init__(self, host, port, app, handler=None,
passthrough_errors=False, ssl_context=None):
if handler is None:
handler = WSGIRequestHandler
HTTPServer.__init__(self, (host, int(port)), handler)
self.app = app
self.passthrough_errors = passthrough_errors
if ssl_context is not None:
try:
from OpenSSL import tsafe
except ImportError:
raise TypeError('SSL is not available if the OpenSSL '
'library is not installed.')
if ssl_context == 'adhoc':
ssl_context = generate_adhoc_ssl_context()
self.socket = tsafe.Connection(ssl_context, self.socket)
self.ssl_context = ssl_context
else:
self.ssl_context = None
def log(self, type, message, *args):
_log(type, message, *args)
def serve_forever(self):
try:
HTTPServer.serve_forever(self)
except KeyboardInterrupt:
pass
def handle_error(self, request, client_address):
if self.passthrough_errors:
raise
else:
return HTTPServer.handle_error(self, request, client_address)
def get_request(self):
con, info = self.socket.accept()
if self.ssl_context is not None:
con = _SSLConnectionFix(con)
return con, info
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(self, host, port, app, processes=40, handler=None,
passthrough_errors=False, ssl_context=None):
BaseWSGIServer.__init__(self, host, port, app, handler,
passthrough_errors, ssl_context)
self.max_children = processes
def make_server(host, port, app=None, threaded=False, processes=1,
request_handler=None, passthrough_errors=False,
ssl_context=None):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and "
"multi process server.")
elif threaded:
return ThreadedWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
elif processes > 1:
return ForkingWSGIServer(host, port, app, processes, request_handler,
passthrough_errors, ssl_context)
else:
return BaseWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
def reloader_loop(extra_files=None, interval=1):
"""When this function is run from the main thread, it will force other
threads to exit when any modules currently loaded change.
Copyright notice. This function is based on the autoreload.py from
the CherryPy trac which originated from WSGIKit which is now dead.
:param extra_files: a list of additional files it should watch.
"""
def iter_module_files():
for module in sys.modules.values():
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
mtimes = {}
while 1:
for filename in chain(iter_module_files(), extra_files or ()):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
_log('info', ' * Detected change in %r, reloading' % filename)
sys.exit(3)
time.sleep(interval)
def restart_with_reloader():
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log('info', ' * Restarting with reloader...')
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code
def run_with_reloader(main_func, extra_files=None, interval=1):
"""Run the given function in an independent python interpreter."""
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
thread.start_new_thread(main_func, ())
try:
reloader_loop(extra_files, interval)
except KeyboardInterrupt:
return
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
def run_simple(hostname, port, application, use_reloader=False,
use_debugger=False, use_evalex=True,
extra_files=None, reloader_interval=1, threaded=False,
processes=1, request_handler=None, static_files=None,
passthrough_errors=False, ssl_context=None):
"""Start an application using wsgiref and with an optional reloader. This
wraps `wsgiref` to fix the wrong default reporting of the multithreaded
WSGI variable and adds optional multithreading and fork support.
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
:param hostname: The host for the application. eg: ``'localhost'``
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param threaded: should the process handle each request in a separate
thread?
:param processes: number of processes to spawn.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a dict of paths for static files. This works exactly
like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connction, 'adhoc' if the server
should automatically create one, or `None` to disable
SSL (which is the default).
"""
if use_debugger:
from werkzeug.debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
if static_files:
from werkzeug.utils import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
def inner():
make_server(hostname, port, application, threaded,
processes, request_handler,
passthrough_errors, ssl_context).serve_forever()
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
display_hostname = hostname or '127.0.0.1'
_log('info', ' * Running on http://%s:%d/', display_hostname, port)
if use_reloader:
# Create and destroy a socket so that any exceptions are raised before
# we spawn a separate Python interpreter and lose this ability.
test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
test_socket.bind((hostname, port))
test_socket.close()
run_with_reloader(inner, extra_files, reloader_interval)
else:
inner()
| bsd-3-clause | -766,306,627,783,075,700 | 36.916168 | 80 | 0.58686 | false |
artoonie/transcroobie | transcroobie/settings.py | 1 | 4966 | """
Django settings for transcroobie project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
ALLOWED_HOSTS = ['transcroobie.herokuapp.com', 'localhost']
# Application definition
INSTALLED_APPS = [
'transcroobie',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hit',
'hitrequest',
'storages',
'social.apps.django_app.default',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'transcroobie.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'transcroobie.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {'default': dj_database_url.config()}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'PST8PDT'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
STATIC_ROOT = os.path.join(SITE_ROOT, 'static/')
AWS_QUERYSTRING_AUTH = False
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
AWS_S3_HOST = os.environ['AWS_S3_HOST']
GS_ACCESS_KEY_ID = os.environ['GS_ACCESS_KEY_ID']
GS_SECRET_ACCESS_KEY = os.environ['GS_SECRET_ACCESS_KEY']
GS_BUCKET_NAME = os.environ['GS_BUCKET_NAME']
DEFAULT_FILE_STORAGE = "storages.backends.gs.GSBotoStorage"
#DEFAULT_FILE_STORAGE = "storages.backends.s3boto.S3BotoStorage"
MEDIA_URL = '/media/'
MEDIA_ROOT = '/tmp/'
IS_DEV_ENV = str(os.environ.get('I_AM_IN_DEV_ENV')) != "0"
USE_AMT_SANDBOX = str(os.environ.get('USE_AMT_SANDBOX')) != "0"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = IS_DEV_ENV
# Celery
BROKER_URL = os.environ.get('REDIS_URL')
CELERY_RESULT_BACKEND = os.environ.get('REDIS_URL')
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'US/Pacific'
# Google oauth
AUTHENTICATION_BACKENDS = (
#'social.backends.open_id.OpenIdAuth',
#'social.backends.google.GoogleOpenId',
'social.backends.google.GoogleOAuth2',
#'social.backends.google.GoogleOAuth',
#'social.backends.twitter.TwitterOAuth',
#'social.backends.yahoo.YahooOpenId',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/hitrequest/index.html'
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ['SOCIAL_AUTH_GOOGLE_OAUTH2_KEY']
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ['SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET']
SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS = ['avametric.com'] # for safety, for now
SOCIAL_AUTH_USER_MODEL = 'auth.User'
| gpl-3.0 | -2,759,039,616,634,398,700 | 28.384615 | 91 | 0.706404 | false |
Dwii/Master-Thesis | implementation/Python/lbm_2d_3d_example/cylinder3d.py | 1 | 1909 | # Copyright (C) 2013 FlowKit Ltd
from numpy import *
from pylb import multi
from pylb import lbio
#def inivelfun(x, y, z, d):
# """ v_x(x,y) = uMax*(1+.2*sin(y/ly*2pi)+.2*sin(z/lz*2pi)). v_y(x,y) = v_y(x,y)= 0 """
# return (d==0) * uLB * (1.0 + 1e-2 * sin(y/ly *2*pi) +
# 1e-2 * sin(z/lz *2*pi))
class InivelFun(object):
def __init__(self, uLB, ly, lz):
self.uLB, self.ly, self.lz = uLB, ly, lz
def __call__(self, x, y, z, d):
""" v_x(x,y) = uMax*(1+.2*sin(y/ly*2pi)+.2*sin(z/lz*2pi)). v_y(x,y) = v_y(x,y)= 0 """
return (d==0) * self.uLB * (1.0 + 1e-2 * sin(y/self.ly *2*pi) +
1e-2 * sin(z/self.lz *2*pi))
def cylinder(nx=160, ny=60, nz=60, Re=220.0, maxIter=10000, plotImages=True):
ly=ny-1.0
lz=nz-1.0
cx, cy, cz = nx/4, ny/2, nz/2
r=ny/9 # Coordinates of the cylinder.
uLB = 0.04 # Velocity in lattice units.
nulb = uLB * r / Re
omega = 1.0 / (3. * nulb + 0.5); # Relaxation parameter.
with multi.GenerateBlock((nx, ny, nz), omega) as block:
block.wall = fromfunction(lambda x, y, z: (x-cx)**2 + (z-cz)**2 < r**2, (nx, ny, nz))
inivelfun = InivelFun(uLB, ly, lz)
inivel = fromfunction(inivelfun, (nx, ny, nz, 3))
block.inipopulations(inivelfun)
block.setboundaryvel(inivelfun)
if plotImages:
plot = lbio.Plot(block.velnorm()[:,ny//2,:])
for time in range(maxIter):
block.collide_and_stream()
if (plotImages and time%10==0):
lbio.writelog(sum(sum(sum(block.wallforce()[:,:,:,0]))))
plot.draw(block.velnorm()[:,:,nz//2])
#print(block.fin[10,10,10,3])
#plot.savefig("vel."+str(time/100).zfill(4)+".png")
if __name__ == "__main__":
cylinder(maxIter=10000, plotImages=True)
| mit | 5,910,386,185,579,618,000 | 36.431373 | 94 | 0.510739 | false |
SymbiFlow/symbiflow-arch-defs | utils/update_arch_timings.py | 1 | 9248 | #!/usr/bin/env python3
import lxml.etree as ET
import argparse
from sdf_timing import sdfparse
from sdf_timing.utils import get_scale_seconds
from lib.pb_type import get_pb_type_chain
import re
import os
import sys
# Adds output to stderr to track if timing data for a particular BEL was found
# in bels.json
DEBUG = False
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def mergedicts(source, destination):
"""This function recursively merges two dictionaries:
`source` into `destination"""
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
mergedicts(value, node)
else:
destination[key] = value
return destination
def remove_site_number(site):
"""Some sites are numbered in the VPR arch definitions.
This happens for e.g. SLICE0. This function removes
trailing numbers from the name"""
number = re.search(r'\d+$', site)
if number is not None:
site = site[:-len(str(number.group()))]
return site
def gen_all_possibilities(pattern):
"""
Generates all possible combinations of a pattern if it contains a
wildcard string in braces eg. "LUT[ABCD]" will yield in "LUTA", "LUTB"
and so on.
>>> list(gen_all_possibilities("LUT"))
['LUT']
>>> list(gen_all_possibilities("LUT[ABCD]"))
['LUTA', 'LUTB', 'LUTC', 'LUTD']
"""
# Match the regex
match = re.match(r"(.*)\[([A-Za-z0-9]+)\](.*)", pattern)
# Generate combinations
if match is not None:
for c in match.group(2):
yield match.group(1) + c + match.group(3)
# Not a regex
else:
yield pattern
def get_cell_types_and_instances(bel, location, site, bels):
"""This function searches for a bel type and instance
translation between VPR and Vivado. The translation
is defined in the `bels` dictionary. If translation
is found a list of celltypes and bel instances is returned,
None otherwise"""
if site not in bels:
if DEBUG:
eprint(
"Site '{}' not found among '{}'".format(
site, ", ".join(bels.keys())
)
)
return None
if bel not in bels[site]:
if DEBUG:
eprint(
"Bel '{}' not found among '{}'".format(
bel, ", ".join(bels[site].keys())
)
)
return None
if location not in bels[site][bel]:
if DEBUG:
eprint(
"Location '{}' not found among '{}'".format(
location, ", ".join(bels[site][bel].keys())
)
)
return None
# Generate a list of tuples (celltype, instance)
cells = []
for pattern in bels[site][bel][location]:
for names in gen_all_possibilities(pattern):
cells.append(tuple(names.split(".")))
return cells
def find_timings(timings, bel, location, site, bels, corner, speed_type):
"""This function returns all the timings associated with
the selected `bel` in `location` and `site`. If timings
are not found, empty dict is returned"""
def get_timing(cell, delay, corner, speed_type):
"""
Gets timing for a particular cornet case. If not fount then chooses
the next best one.
"""
entries = cell[delay]['delay_paths'][corner.lower()]
entry = entries.get(speed_type, None)
if speed_type == 'min':
if entry is None:
entry = entries.get('avg', None)
if entry is None:
entry = entries.get('max', None)
elif speed_type == 'avg':
if entry is None:
entry = entries.get('max', None)
if entry is None:
entry = entries.get('min', None)
elif speed_type == 'max':
if entry is None:
entry = entries.get('avg', None)
if entry is None:
entry = entries.get('min', None)
if entry is None:
# if we failed with desired corner, try the opposite
newcorner = 'FAST' if corner == 'SLOW' else 'SLOW'
entry = get_timing(cell, delay, newcorner, speed_type)
assert entry is not None, (delay, corner, speed_type)
return entry
# Get cells, reverse the list so former timings will be overwritten by
# latter ones.
cells = get_cell_types_and_instances(bel, location, site, bels)
if cells is None:
return None
cells.reverse()
# Gather CELLs
cell = dict()
for ct, inst in cells:
cell = mergedicts(timings['cells'][ct][inst], cell)
# Gather timings
bel_timings = dict()
for delay in cell:
if cell[delay]['is_absolute']:
entry = get_timing(cell, delay, corner.lower(), speed_type)
elif cell[delay]['is_timing_check']:
if cell[delay]['type'] == "setuphold":
# 'setup' and 'hold' are identical
entry = get_timing(cell, delay, 'setup', speed_type)
else:
entry = get_timing(cell, delay, 'nominal', speed_type)
bel_timings[delay] = float(entry) * get_scale_seconds('1 ns')
return bel_timings
def get_bel_timings(element, timings, bels, corner, speed_type):
"""This function returns all the timings for an arch.xml
`element`. It determines the bel location by traversing
the pb_type chain"""
pb_chain = get_pb_type_chain(element)
if len(pb_chain) == 1:
return None
if 'max' in element.attrib and element.attrib['max'].startswith(
'{interconnect'):
bel = 'ROUTING_BEL'
else:
bel = pb_chain[-1]
location = pb_chain[-2]
site = remove_site_number(pb_chain[1])
result = find_timings(
timings, bel, location, site, bels, corner, speed_type
)
if DEBUG:
print(site, bel, location, result is not None, file=sys.stderr)
return result
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_arch', required=True, help="Input arch.xml file"
)
parser.add_argument('--sdf_dir', required=True, help="SDF files directory")
parser.add_argument(
'--out_arch', required=True, help="Output arch.xml file"
)
parser.add_argument(
'--bels_map',
required=True,
help="VPR <-> timing info bels mapping json file"
)
args = parser.parse_args()
arch_xml = ET.ElementTree()
root_element = arch_xml.parse(args.input_arch)
# read bels json
import json
with open(args.bels_map, 'r') as fp:
bels = json.load(fp)
timings = dict()
files = os.listdir(args.sdf_dir)
for f in files:
if not f.endswith('.sdf'):
continue
with open(args.sdf_dir + '/' + f, 'r') as fp:
try:
tmp = sdfparse.parse(fp.read())
except Exception as ex:
print("{}:".format(args.sdf_dir + '/' + f), file=sys.stderr)
print(repr(ex), file=sys.stderr)
raise
mergedicts(tmp, timings)
if DEBUG:
with open("/tmp/dump.json", 'w') as fp:
json.dump(timings, fp, indent=4)
for dm in root_element.iter('delay_matrix'):
if dm.attrib['type'] == 'max':
bel_timings = get_bel_timings(dm, timings, bels, 'SLOW', 'max')
elif dm.attrib['type'] == 'min':
bel_timings = get_bel_timings(dm, timings, bels, 'FAST', 'min')
else:
assert dm.attrib['type']
if bel_timings is None:
continue
dm.text = dm.text.format(**bel_timings)
for dc in root_element.iter('delay_constant'):
format_s = dc.attrib['max']
max_tim = get_bel_timings(dc, timings, bels, 'SLOW', 'max')
if max_tim is not None:
dc.attrib['max'] = format_s.format(**max_tim)
min_tim = get_bel_timings(dc, timings, bels, 'FAST', 'min')
if min_tim is not None:
dc.attrib['min'] = format_s.format(**min_tim)
for tq in root_element.iter('T_clock_to_Q'):
format_s = tq.attrib['max']
max_tim = get_bel_timings(tq, timings, bels, 'SLOW', 'max')
if max_tim is not None:
tq.attrib['max'] = format_s.format(**max_tim)
min_tim = get_bel_timings(tq, timings, bels, 'FAST', 'min')
if min_tim is not None:
tq.attrib['min'] = format_s.format(**min_tim)
for ts in root_element.iter('T_setup'):
bel_timings = get_bel_timings(ts, timings, bels, 'SLOW', 'max')
if bel_timings is None:
continue
ts.attrib['value'] = ts.attrib['value'].format(**bel_timings)
for th in root_element.iter('T_hold'):
bel_timings = get_bel_timings(th, timings, bels, 'FAST', 'min')
if bel_timings is None:
continue
th.attrib['value'] = th.attrib['value'].format(**bel_timings)
with open(args.out_arch, 'wb') as fp:
fp.write(ET.tostring(arch_xml))
if __name__ == "__main__":
main()
| isc | -8,196,329,145,353,724,000 | 30.138047 | 79 | 0.566393 | false |
esi-mineset/spark | python/pyspark/rdd.py | 1 | 96405 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
__all__ = ["RDD"]
class PythonEvalType(object):
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF = 0
SQL_BATCHED_UDF = 100
SQL_SCALAR_PANDAS_UDF = 200
SQL_GROUPED_MAP_PANDAS_UDF = 201
SQL_GROUPED_AGG_PANDAS_UDF = 202
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = None
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = socket.socket(af, socktype, proto)
try:
sock.settimeout(15)
sock.connect(sa)
except socket.error:
sock.close()
sock = None
continue
break
if not sock:
raise Exception("could not open socket")
# The RDD materialization time is unpredicable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sock.makefile("rb", 65536))
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
L{spark.dynamicAllocation.cachedExecutorIdleTimeout} to a high value.
The checkpoint directory set through L{SparkContext.setCheckpointDir()} is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. note:: This method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = unicode(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
.. note:: an RDD may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
.. note:: this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
.. note:: V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
.. note:: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(PickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<http://dx.doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _load_from_socket(port, self._jrdd_deserializer)
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | -443,223,749,307,125,300 | 37.48503 | 100 | 0.576277 | false |
54lihaoxin/leetcode_python | src/WordSearch/test_suite.py | 1 | 4757 |
from CommonClasses import *
from solution import Solution
class TestSuite:
def run(self):
self.test000()
self.test001()
self.test002()
self.test003()
# self.test004()
def test000(self):
print 'test 000\n'
board = ['ABCE',
'SFCS',
'ADEE']
word = 'ABCCED'
startTime = time.clock()
r = Solution().exist(board, word)
timeUsed = time.clock() - startTime
print ' input:\t{0}, {1}'.format(board, word)
# print ' expect:\t', ?
print ' output:\t{0}'.format(r)
print ' time used:\t{0:.6f}'.format(timeUsed)
print
def test001(self):
print 'test 001\n'
board = ['ABCE',
'SFCS',
'ADEE']
word = 'SEE'
startTime = time.clock()
r = Solution().exist(board, word)
timeUsed = time.clock() - startTime
print ' input:\t{0}, {1}'.format(board, word)
# print ' expect:\t', ?
print ' output:\t{0}'.format(r)
print ' time used:\t{0:.6f}'.format(timeUsed)
print
def test002(self):
print 'test 002\n'
board = ['ABCE',
'SFCS',
'ADEE']
word = 'ABCB'
startTime = time.clock()
r = Solution().exist(board, word)
timeUsed = time.clock() - startTime
print ' input:\t{0}, {1}'.format(board, word)
# print ' expect:\t', ?
print ' output:\t{0}'.format(r)
print ' time used:\t{0:.6f}'.format(timeUsed)
print
def test003(self):
print 'test 003\n'
board = ['aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaab']
word = 'baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
startTime = time.clock()
r = Solution().exist(board, word)
timeUsed = time.clock() - startTime
print ' input:\t{0}, {1}'.format(board, word)
# print ' expect:\t', ?
print ' output:\t{0}'.format(r)
print ' time used:\t{0:.6f}'.format(timeUsed)
print
def main(argv):
TestSuite().run()
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | -5,119,521,927,238,386,000 | 37.658333 | 917 | 0.59218 | false |
jawaidss/halalar-web | halalar/api/models.py | 1 | 5956 | from datetime import datetime, timedelta
from django_countries.fields import CountryField
import hashlib
import mailchimp
import os
import random
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.humanize.templatetags.humanize import naturaltime
from django.contrib.sites.models import Site
from django.core.mail import EmailMessage
from django.core.urlresolvers import reverse
from django.core.validators import MinValueValidator
from django.db import models
MINIMUM_AGE = 18
def _random_token(username):
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
return hashlib.sha1(salt + username).hexdigest()
def _profile_photo_upload_to(instance, filename):
return os.path.join('profiles', 'photos', '%s%sjpg' % (instance.user.username, os.extsep))
class Profile(models.Model):
MALE = 'male'
FEMALE = 'female'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
)
user = models.OneToOneField(User)
token = models.CharField(max_length=40, unique=True, editable=False)
photo = models.ImageField(upload_to=_profile_photo_upload_to, null=True, blank=True)
age = models.SmallIntegerField(validators=[MinValueValidator(MINIMUM_AGE)])
gender = models.CharField(max_length=10, choices=GENDER_CHOICES)
city = models.CharField(max_length=100)
country = CountryField(default='US')
religion = models.TextField()
family = models.TextField()
selfx = models.TextField('self')
community = models.TextField()
career = models.TextField()
class Meta:
ordering = ['user']
def __unicode__(self):
return self.user.username
def save(self, **kwargs):
if self.id is None and not self.token and self.user_id is not None:
self.token = _random_token(self.user.username)
super(Profile, self).save(**kwargs)
def serialize(self, include_email=True):
data = {'username': self.user.username,
'photo': self.photo and self.photo.url or None,
'age': self.age,
'gender': self.gender,
'city': self.city,
'country': self.country.code,
'religion': self.religion,
'family': self.family,
'self': self.selfx,
'community': self.community,
'career': self.career}
if include_email:
data['email'] = self.user.email
return data
def send_delayed_welcome_email(self):
site = Site.objects.get_current()
subject = site.name
message = '''Salaam,
I'm Sikander, the creator of %s. Thanks for signing up! I wanted to reach out to see if you needed any help getting started.
Best,
--
Sikander Chowhan
www.%s''' % (site.name, site.domain)
from_email = 'Sikander Chowhan <sikander@%s>' % site.domain
to = [self.user.email]
email = EmailMessage(subject, message, from_email, to)
email.send_at = datetime.now() + timedelta(days=1)
email.send()
def send_signup_notification_email(self):
site = Site.objects.get_current()
subject = self.user.username
message = '''Username: %(username)s
Email: %(email)s
Age: %(age)s
Gender: %(gender)s
City: %(city)s
Country: %(country)s
Religion: %(religion)s
Family: %(family)s
Self: %(self)s
Community: %(community)s
Career: %(career)s
https://%(domain)s%(user_url)s
https://%(domain)s%(profile_url)s''' % {'username': self.user.username,
'email': self.user.email,
'age': self.age,
'gender': self.get_gender_display(),
'city': self.city,
'country': self.country.name,
'religion': self.religion,
'family': self.family,
'self': self.selfx,
'community': self.community,
'career': self.career,
'domain': site.domain,
'user_url': reverse('admin:auth_user_change', args=[self.user.pk]),
'profile_url': reverse('admin:api_profile_change', args=[self.pk])}
from_email = 'sikander@%s' % site.domain
to = [settings.ASANA_EMAIL]
email = EmailMessage(subject, message, from_email, to)
if self.photo:
self.photo.open()
email.attach(os.path.basename(self.photo.url), self.photo.read())
self.photo.close()
email.send()
def subscribe_to_mailchimp_list(self):
m = mailchimp.Mailchimp()
m.lists.subscribe(settings.MAILCHIMP_LIST_ID,
{'email': self.user.email},
double_optin=False,
update_existing=True)
class Message(models.Model):
sender = models.ForeignKey(Profile, related_name='sent')
recipient = models.ForeignKey(Profile, related_name='received')
timestamp = models.DateTimeField(auto_now_add=True)
body = models.TextField()
class Meta:
ordering = ['timestamp']
get_latest_by = 'timestamp'
def __unicode__(self):
return self.body
def serialize(self):
return {'sender': self.sender.user.username,
'recipient': self.recipient.user.username,
'timestamp': naturaltime(self.timestamp),
'body': self.body}
def send_push_notification(self):
message = 'New message from %s' % self.sender.user.username
self.recipient.user.apnsdevice_set.all().send_message(message, badge=1)
self.recipient.user.gcmdevice_set.all().send_message(message) | mit | -1,104,692,743,606,615,800 | 32.846591 | 124 | 0.582942 | false |
misko/neon | neon/data/image.py | 2 | 13051 | # ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import ctypes as ct
from multiprocessing import Process, Semaphore
from multiprocessing.sharedctypes import Array
import logging
import numpy as np
import os
from PIL import Image as PILImage
from neon import NervanaObject
from neon.util.persist import load_obj
logger = logging.getLogger(__name__)
class Image(object):
def __init__(self):
raise NotImplementedError()
def save_pbuf(pbuf, imshape, jpgname):
"""
Takes a row-wise pixel buffer, reshapes it into the correct image size,
re-orders the rgb channels and saves out to jpg
This is purely for debugging
"""
im = PILImage.fromarray(pbuf.reshape(imshape).transpose(1, 2, 0)[:, :, [2, 1, 0]])
im.save(jpgname)
class Msg(object):
"""
Data structure encapsulating a message.
"""
def __init__(self, size):
self.s_e = Semaphore(1)
self.s_f = Semaphore(0)
self.s_buf = Array(ct.c_ubyte, size)
def send(self, func):
self.s_e.acquire()
self.s_buf.acquire()
send_result = func(self.s_buf._obj)
self.s_buf.release()
self.s_f.release()
return send_result
def recv(self, func):
self.s_f.acquire()
self.s_buf.acquire()
recv_result = func(self.s_buf._obj)
self.s_buf.release()
self.s_e.release()
return recv_result
class ImgEndpoint(NervanaObject):
"""
Parent class that sets up all common dataset config options that the client
and server will share
"""
SERVER_KILL = 255
SERVER_RESET = 254
def __init__(self, repo_dir, inner_size,
do_transforms=True, rgb=True, multiview=False,
set_name='train', subset_pct=100):
assert(subset_pct > 0 and subset_pct <= 100), "subset_pct must be between 0 and 100"
assert(set_name in ['train', 'validation'])
self.set_name = set_name if set_name == 'train' else 'val'
self.repo_dir = repo_dir
self.inner_size = inner_size
self.minibatch_size = self.be.bsz
# Load from repo dataset_cache:
try:
cache_filepath = os.path.join(repo_dir, 'dataset_cache.pkl')
dataset_cache = load_obj(cache_filepath)
except IOError:
raise IOError("Cannot find '%s/dataset_cache.pkl'. Run batch_writer to "
"preprocess the data and create batch files for imageset"
% (repo_dir))
# Should have following defined:
req_attributes = ['global_mean', 'nclass', 'val_start', 'ntrain', 'label_names',
'train_nrec', 'img_size', 'nval', 'train_start', 'val_nrec',
'label_dict', 'batch_prefix']
for r in req_attributes:
if r not in dataset_cache:
raise ValueError("Dataset cache missing required attribute %s" % (r))
self.__dict__.update(dataset_cache)
self.filename = os.path.join(repo_dir, self.batch_prefix)
self.center = False if do_transforms else True
self.flip = True if do_transforms else False
self.rgb = rgb
self.multiview = multiview
self.label = 'l_id'
if isinstance(self.nclass, dict):
self.nclass = self.nclass[self.label]
# Rough percentage
self.recs_available = getattr(self, self.set_name + '_nrec')
self.macro_start = getattr(self, self.set_name + '_start')
self.macros_available = getattr(self, 'n' + self.set_name)
self.ndata = int(self.recs_available * subset_pct / 100.)
self.start = 0
@property
def nbatches(self):
return -((self.start - self.ndata) // self.be.bsz) # ceildiv
def reset(self):
pass
class ImgMaster(ImgEndpoint):
"""
This is just a client that starts its own server process
"""
def __init__(self, repo_dir, inner_size, do_transforms=True, rgb=True,
multiview=False, set_name='train', subset_pct=100, dtype=np.float32):
super(ImgMaster, self).__init__(repo_dir, inner_size, do_transforms,
rgb, multiview, set_name, subset_pct)
# Create the communication buffers
# We have two response buffers b/c we are double buffering
npix = self.inner_size * self.inner_size * 3
ishape = (3, self.inner_size, self.inner_size)
origshape = (3, self.img_size, self.img_size)
mbsz = self.be.bsz
self.response = [Msg(npix * mbsz + 4*mbsz) for i in range(2)]
self.request = Msg(1)
self.active_idx = 0
self.jpg_idx = 0
self.server_args = [repo_dir, inner_size, do_transforms, rgb,
multiview, set_name, subset_pct]
self.server_args.append((self.request, self.response))
# For debugging, we can just make a local copy
self.local_img = np.empty((mbsz, npix), dtype=np.uint8)
self.local_lbl = np.empty((mbsz,), dtype=np.int32)
self.dev_X = self.be.iobuf(npix, dtype=dtype)
self.dev_X.lshape = ishape
self.dev_XT = self.be.empty(self.dev_X.shape[::-1], dtype=np.uint8)
self.dev_lbls = self.be.iobuf(1, dtype=np.int32)
self.dev_Y = self.be.iobuf(self.nclass, dtype=dtype)
# Crop the mean according to the inner_size
crop_start = (self.img_size - self.inner_size) / 2
crop_range = slice(crop_start, crop_start + self.inner_size)
if self.global_mean is not None:
self.mean_crop = self.global_mean.reshape(origshape)[:, crop_range, crop_range]
self.dev_mean = self.be.array(self.mean_crop.reshape(npix, 1), dtype=dtype)
else:
self.dev_mean = 127. # Just center uint8 values if missing global mean
def local_copy(bufobj):
self.local_img[:] = np.frombuffer(bufobj, dtype=np.uint8,
count=npix*mbsz).reshape(mbsz, npix)
self.local_lbl[:] = np.frombuffer(bufobj, dtype=np.int32, count=mbsz,
offset=npix*mbsz)
def device_copy(bufobj):
self.dev_XT.set(np.frombuffer(bufobj, dtype=np.uint8,
count=npix*mbsz).reshape(mbsz, npix))
self.dev_lbls.set(np.frombuffer(bufobj, dtype=np.int32, count=mbsz,
offset=npix*mbsz).reshape(1, mbsz))
def jpgview():
outname = 'tmpdir/outv2_' + str(self.jpg_idx) + '_' + str(self.local_lbl[0]) + '.jpg'
save_pbuf(self.local_img[0], ishape, outname)
self.local_copy = local_copy
self.device_copy = device_copy
self.dump_jpg = jpgview
def send_request(self, code):
def set_code(bufobj):
np.frombuffer(bufobj, dtype=np.uint8, count=1)[:] = code
self.request.send(set_code)
def recv_response(self, callback):
"""
callback is a function that will be executed while we have access
to the shared block of memory
we are switching between the response buffers modulo self.active_idx
"""
self.response[self.active_idx].recv(callback)
def init_batch_provider(self):
"""
Launches the server as a separate process and sends an initial request
"""
def server_start_cmd():
d = ImgServer(*self.server_args)
d.run_server()
p = Process(target=server_start_cmd)
p.start()
self.active_idx = 0
self.send_request(self.active_idx)
def exit_batch_provider(self):
"""
Sends kill signal to server
"""
self.send_request(self.SERVER_KILL)
def reset(self):
"""
sends request to restart data from index 0
"""
if self.start == 0:
return
# clear the old request
self.recv_response(self.device_copy)
# Reset server state
self.send_request(self.SERVER_RESET)
# Reset local state
self.start = 0
self.active_idx = 0
self.send_request(self.active_idx)
def next(self):
self.recv_response(self.local_copy)
self.active_idx = 1 if self.active_idx == 0 else 0
self.send_request(self.active_idx)
self.dump_jpg()
def __iter__(self):
for start in range(self.start, self.ndata, self.be.bsz):
end = min(start + self.be.bsz, self.ndata)
if end == self.ndata:
self.start = self.be.bsz - (self.ndata - start)
self.idx = start
self.recv_response(self.device_copy)
self.active_idx = 1 if self.active_idx == 0 else 0
self.send_request(self.active_idx)
# Separating these steps to avoid possible casting error
self.dev_X[:] = self.dev_XT.transpose()
self.dev_X[:] = self.dev_X - self.dev_mean
# Expanding out the labels on device
self.dev_Y[:] = self.be.onehot(self.dev_lbls, axis=0)
yield self.dev_X, self.dev_Y
class ImgServer(ImgEndpoint):
"""
This class interfaces with the clibrary that does the actual decoding
"""
def __init__(self, repo_dir, inner_size, do_transforms=True, rgb=True,
multiview=False, set_name='train', subset_pct=100, shared_objs=None):
super(ImgServer, self).__init__(repo_dir, inner_size, do_transforms,
rgb, multiview, set_name, subset_pct)
assert(shared_objs is not None)
libpath = os.path.dirname(os.path.realpath(__file__))
try:
self._i1klib = ct.cdll.LoadLibrary(os.path.join(libpath,
'imageset_decoder.so'))
except:
logger.error("Unable to load imageset_decoder.so. Ensure that "
"this file has been compiled")
(self.request, self.response) = shared_objs
self.worker = self._i1klib.create_data_worker(ct.c_int(self.img_size),
ct.c_int(self.inner_size),
ct.c_bool(self.center),
ct.c_bool(self.flip),
ct.c_bool(self.rgb),
ct.c_bool(self.multiview),
ct.c_int(self.minibatch_size),
ct.c_char_p(self.filename),
ct.c_int(self.macro_start),
ct.c_uint(self.ndata))
def decode_minibatch(bufobj):
self._i1klib.process_next_minibatch(self.worker, ct.POINTER(ct.c_ubyte)(bufobj))
self.decode_minibatch = decode_minibatch
def recv_request(self):
def read_code(bufobj):
return np.frombuffer(bufobj, dtype=np.uint8, count=1)[0]
return self.request.recv(read_code)
def send_response(self, active_idx):
self.response[active_idx].send(self.decode_minibatch)
def run_server(self):
while(True):
active_idx = self.recv_request()
if active_idx in (0, 1):
self.send_response(active_idx)
elif active_idx == self.SERVER_RESET:
self._i1klib.reset(self.worker)
else:
print("Server Exiting")
break
if __name__ == "__main__":
from timeit import default_timer
from neon.backends import gen_backend
from neon.util.argparser import NeonArgparser
parser = NeonArgparser(__doc__)
args = parser.parse_args()
be = gen_backend(backend='gpu', rng_seed=100)
NervanaObject.be.bsz = 128
master = ImgMaster(repo_dir=args.data_dir, set_name='train', inner_size=224, subset_pct=10)
master.init_batch_provider()
t0 = default_timer()
total_time = 0
for epoch in range(3):
for x, t in master:
print "****", epoch, master.start, master.idx, master.ndata
print t.get().argmax(axis=0)[:17]
master.send_request(master.SERVER_KILL)
| apache-2.0 | -4,210,209,415,006,743,600 | 36.610951 | 97 | 0.564401 | false |
Kungbib/CIPAC | webapp/kortkatalogen/liljeson/migrations/0001_initial.py | 1 | 3484 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-29 12:44
from __future__ import unicode_literals
import django.contrib.postgres.indexes
import django.contrib.postgres.search
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Box',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('folder_name', models.CharField(help_text='Filkatalog på disk där denna lådas filer ligger', max_length=255, unique=True, verbose_name='Katalognamn')),
('sequence_number', models.IntegerField(db_index=True)),
('label', models.CharField(db_index=True, max_length=255, verbose_name='Etikett')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Låda',
'verbose_name_plural': 'Lådor',
'ordering': ['sequence_number'],
'abstract': False,
},
),
migrations.CreateModel(
name='Card',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, help_text='Rubriken som visas överst på en kortsida', max_length=255, verbose_name='Kortnamn')),
('filename', models.CharField(db_index=True, help_text='Filnamnet för bildfilen', max_length=255, verbose_name='Filnamn')),
('filename_back', models.CharField(db_index=True, help_text='Filnamnet för bildfilen av baksidan', max_length=255, verbose_name='Filnamn baksida')),
('ocr_text', models.TextField(blank=True, help_text='Automatiskt OCR-tolkad text från kortet.')),
('ocr_text_back', models.TextField(blank=True, help_text='Automatiskt OCR-tolkad text från kortets baksida.')),
('letter', models.CharField(blank=True, db_index=True, help_text='Anges för första kortet för att dela upp katalogen alfabetiskt.', max_length=1, null=True, verbose_name='Indexbokstav')),
('sequence_number', models.IntegerField(db_index=True, verbose_name='Sekvensnummer i låda')),
('catalog_sequence_number', models.IntegerField(blank=True, help_text='Globalt katalognummer som anger kortets plats i katalogen. Används även som identifierare.', null=True, verbose_name='Kortnummer')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('comment', models.TextField(blank=True, help_text='Visas ej för besökare.', null=True, verbose_name='Intern kommentar')),
('search_index', django.contrib.postgres.search.SearchVectorField(null=True)),
('box', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cards', to='liljeson.Box', verbose_name='kort')),
],
),
migrations.AddIndex(
model_name='card',
index=django.contrib.postgres.indexes.GinIndex(fields=['search_index'], name='liljeson_ca_search__9b97bf_gin'),
),
]
| apache-2.0 | -1,620,285,583,742,835,200 | 57.728814 | 219 | 0.624531 | false |
ibm-cds-labs/simple-data-pipe-connector-flightstats | pixiedust_flightpredict/pixiedust_flightpredict/vizFeatures.py | 1 | 2619 | # -------------------------------------------------------------------------------
# Copyright IBM Corp. 2016
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
import pixiedust_flightpredict.training as training
from pixiedust.display.chart.renderers.baseChartDisplay import BaseChartDisplay
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from pyspark.sql import Row
from functools import reduce
import pixiedust
myLogger = pixiedust.getLogger(__name__)
def makeList(l):
return l if isinstance(l, list) else [l]
class VizualizeFeatures(BaseChartDisplay):
def doRender(self, handlerId):
f1="departureWeather.temp"
f2="arrivalWeather.temp"
f1=f1.split(".")
f2=f2.split(".")
handler=training.getTrainingHandler()
darr=self.entity.rdd.map(lambda s: ( handler.computeClassification(s),(\
reduce(lambda x,y: getattr(x,y) if isinstance(x, Row) else getattr(getattr(s,x),y), f1) if len(f1)>1 else getattr(s,f1[0]),\
reduce(lambda x,y: getattr(x,y) if isinstance(x, Row) else getattr(getattr(s,x),y), f2) if len(f2)>1 else getattr(s,f2[0])\
)))\
.reduceByKey(lambda x,y: makeList(x) + makeList(y))\
.collect()
numClasses=handler.numClasses()
citer=iter(cm.rainbow(np.linspace(0, 1, numClasses)))
colors = [next(citer) for i in range(0, numClasses)]
legends= [handler.getClassLabel(i) for i in range(0,numClasses)]
sets=[]
fig, ax = plt.subplots(figsize=(12,8))
for t in darr:
sets.append((ax.scatter([x[0] for x in t[1]],[x[1] for x in t[1]],color=colors[t[0]],alpha=0.5),legends[t[0]]))
ax.set_ylabel("Departure Airport Temp")
ax.set_xlabel("Arrival Airport Temp")
ax.legend([x[0] for x in sets],
[x[1] for x in sets],
scatterpoints=1,
loc='lower left',
ncol=numClasses,
fontsize=12)
def doRenderChart(self):
pass | apache-2.0 | 2,142,105,694,167,402,800 | 39.9375 | 136 | 0.612829 | false |
Axios-Engineering/audio-components | AudioSource/tests/test_AudioSource.py | 1 | 3739 | #!/usr/bin/env python
import unittest
import ossie.utils.testing
import os
from omniORB import any
class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
"""Test for all component implementations in AudioSource"""
def testScaBasicBehavior(self):
#######################################################################
# Launch the component with the default execparams
execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
self.launch(execparams)
#######################################################################
# Verify the basic state of the component
self.assertNotEqual(self.comp, None)
self.assertEqual(self.comp.ref._non_existent(), False)
self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True)
self.assertEqual(self.spd.get_id(), self.comp.ref._get_identifier())
#######################################################################
# Simulate regular component startup
# Verify that initialize nor configure throw errors
self.comp.initialize()
configureProps = self.getPropertySet(kinds=("configure",), modes=("readwrite", "writeonly"), includeNil=False)
self.comp.configure(configureProps)
#######################################################################
# Validate that query returns all expected parameters
# Query of '[]' should return the following set of properties
expectedProps = []
expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
props = self.comp.query([])
props = dict((x.id, any.from_any(x.value)) for x in props)
# Query may return more than expected, but not less
for expectedProp in expectedProps:
self.assertEquals(props.has_key(expectedProp.id), True)
#######################################################################
# Verify that all expected ports are available
for port in self.scd.get_componentfeatures().get_ports().get_uses():
port_obj = self.comp.getPort(str(port.get_usesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True)
for port in self.scd.get_componentfeatures().get_ports().get_provides():
port_obj = self.comp.getPort(str(port.get_providesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a(port.get_repid()), True)
#######################################################################
# Make sure start and stop can be called without throwing exceptions
self.comp.start()
self.comp.stop()
#######################################################################
# Simulate regular component shutdown
self.comp.releaseObject()
# TODO Add additional tests here
#
# See:
# ossie.utils.bulkio.bulkio_helpers,
# ossie.utils.bluefile.bluefile_helpers
# for modules that will assist with testing components with BULKIO ports
if __name__ == "__main__":
ossie.utils.testing.main("../AudioSource.spd.xml") # By default tests all implementations
| lgpl-3.0 | -6,723,459,644,412,660,000 | 49.527027 | 133 | 0.553624 | false |
Ultimaker/Uranium | tests/TestRenderBatch.py | 1 | 3618 | from unittest.mock import MagicMock, patch
import pytest
from UM.Math.Color import Color
from UM.Math.Matrix import Matrix
from UM.Mesh.MeshBuilder import MeshBuilder
from UM.Mesh.MeshData import MeshData
from UM.View.RenderBatch import RenderBatch
test_addItem_data = [
{"item": {"transformation": Matrix(), "mesh": MeshData()}, "should_add": True},
{"item": {"transformation": None, "mesh": MeshData()}, "should_add": False},
{"item": {"transformation": None, "mesh": None}, "should_add": False},
{"item": {"transformation": Matrix(), "mesh": None}, "should_add": False},
{"item": {"transformation": Matrix(), "mesh": MeshData(), "uniforms": {}}, "should_add": True},
]
test_compare_data = [
{"item1": {}, "item2": {"sort": 1}},
{"item1": {}, "item2": {"sort": 1}},
{"item1": {"type": RenderBatch.RenderType.Solid, "sort": 0}, "item2": {"sort": 20, "type":RenderBatch.RenderType.NoType}}, # Solid trumps notype, even if sort is higher
{"item1": {"type": RenderBatch.RenderType.Transparent, "sort": 0}, "item2": {"sort": 20, "type":RenderBatch.RenderType.NoType}}
]
def test_createRenderBatch():
mocked_shader = MagicMock()
with patch("UM.View.GL.OpenGL.OpenGL.getInstance"):
render_batch = RenderBatch(mocked_shader)
# Ensure that the proper defaults are set.
assert render_batch.renderType == RenderBatch.RenderType.Solid
assert render_batch.renderMode == RenderBatch.RenderMode.Triangles
assert render_batch.shader == mocked_shader
assert not render_batch.backfaceCull
assert render_batch.renderRange is None
assert render_batch.items == []
@pytest.mark.parametrize("data", test_addItem_data)
def test_addItem(data):
mocked_shader = MagicMock()
with patch("UM.View.GL.OpenGL.OpenGL.getInstance"):
render_batch = RenderBatch(mocked_shader)
render_batch.addItem(**data["item"])
if data["should_add"]:
assert len(render_batch.items) != 0
@pytest.mark.parametrize("data", test_compare_data)
def test_compare(data):
mocked_shader = MagicMock()
with patch("UM.View.GL.OpenGL.OpenGL.getInstance"):
render_batch_1 = RenderBatch(mocked_shader, **data["item1"])
render_batch_2 = RenderBatch(mocked_shader, **data["item2"])
assert render_batch_1 < render_batch_2
def test_render():
mocked_shader = MagicMock()
with patch("UM.View.GL.OpenGL.OpenGL.getInstance"):
render_batch = RenderBatch(mocked_shader)
# Render without a camera shouldn't cause any effect.
render_batch.render(None)
assert mocked_shader.bind.call_count == 0
# Rendering with a camera should cause the shader to be bound and released (even if the batch is empty)
mocked_camera = MagicMock()
mocked_camera.getWorldTransformation = MagicMock(return_value = Matrix())
mocked_camera.getViewProjectionMatrix = MagicMock(return_value=Matrix())
with patch("UM.View.GL.OpenGLContext.OpenGLContext.properties"):
render_batch.render(mocked_camera)
assert mocked_shader.bind.call_count == 1
assert mocked_shader.release.call_count == 1
# Actualy render with an item in the batch
mb = MeshBuilder()
mb.addPyramid(10, 10, 10, color=Color(0.0, 1.0, 0.0, 1.0))
mb.calculateNormals()
mesh_data = mb.build()
render_batch.addItem(Matrix(), mesh_data, {})
with patch("UM.View.GL.OpenGL.OpenGL.getInstance"):
with patch("UM.View.GL.OpenGLContext.OpenGLContext.properties"):
render_batch.render(mocked_camera)
assert mocked_shader.bind.call_count == 2
assert mocked_shader.release.call_count == 2 | lgpl-3.0 | -4,333,633,561,524,502,000 | 37.913978 | 173 | 0.68325 | false |
JSBCCA/pythoncode | exercises/exercise_10_18_16.py | 1 | 1586 | import sys
# open cust_info_login.txt
with open('cust_info_login.txt', 'r') as file:
customer_login = file.read().strip().split('\n')
cust_login = list(map(lambda c: c.split(' _ '), customer_login))
# save usernames and passwords to a list
users_and_passwords = []
for customer in cust_login:
unpw = [customer[2], customer[3]]
users_and_passwords.append(unpw)
# check for username and password
lock = True
while lock is True:
username = input("Please enter your username. Type 'q' to quit. ").strip()
if username.lower() == 'q':
sys.exit()
password = input("Please enter your password. ").strip()
if password.lower() == 'q':
sys.exit()
for user in users_and_passwords:
if username == user[0] and password == user[1]:
lock = False
# ask for new password
lock = True
while lock is True:
new_pass = input(
"What'd you like your password to be? Must be 6 characters. ").strip()
if len(new_pass) == 6:
# get user position in order to change password
for item in cust_login:
if (username in item) and (password in item):
item.remove(password)
item.append(new_pass)
# change password
with open('cust_info_login.txt', 'w') as file:
for i in range(len(cust_login)):
file.write(cust_login[i][0] + ' _ ' + cust_login[i][1] + ' _' +
' ' + cust_login[i][2] + ' _ ' + cust_login[i][3] +
'\n')
print("Password has been changed.")
lock = False
| mit | 1,050,089,105,675,520,000 | 37.682927 | 79 | 0.571248 | false |
jonge-democraten/jdleden | jdleden/afdelingrondschuif.py | 1 | 4580 | import logging
from hemres.management.commands.janeus_unsubscribe import Command as CommandUnsub
from hemres.management.commands.janeus_subscribe import Command as CommandSub
from jdleden import ledenlijst
from jdleden import afdelingen
from jdleden import afdelingenoud
logger = logging.getLogger(__name__)
def move_members(members_file, dryrun):
logger.info('BEGIN')
logger.info('file: ' + members_file)
logger.info('dryrun: ' + str(dryrun))
afdelingen_new = afdelingen.AFDELINGEN
afdelingen_oud = afdelingenoud.AFDELINGEN
logger.info("Checking consistency new and old postcode ranges...")
if not check_postcode_indeling(afdelingen_new):
logger.error('postcode check for new departments failed')
raise RuntimeError
if not check_postcode_indeling(afdelingen_oud):
logger.error('postcode check for old departments failed')
raise RuntimeError
logger.info("Reading %s ..." % members_file)
members = ledenlijst.read_xls(members_file)
logger.info("Reading complete")
logger.info("Calculating reallocated members")
reallocated = get_reallocated_members(members)
logger.info("Doing mass (un)subscribes")
for member in reallocated:
lidnummer = member[ledenlijst.LIDNUMMER]
town = member[ledenlijst.WOONPLAATS]
postcode = member[ledenlijst.POSTCODE]
digits = ledenlijst.parse_postcode(postcode)
afdeling_from = find_afdeling(afdelingen_oud, digits)
afdeling_to = find_afdeling(afdelingen_new, digits)
nieuwsbrief_from = "nieuwsbrief-" + afdeling_from.lower()
nieuwsbrief_to = "nieuwsbrief-" + afdeling_to.lower()
logger.info('Move a member living in ' + town + ' from ' + afdeling_from + ' to ' + afdeling_to)
if not dryrun:
CommandUnsub.unsubscribe(lidnummer, nieuwsbrief_from)
CommandSub.subscribe(lidnummer, nieuwsbrief_to)
if dryrun:
logger.warning("Dry-run. No actual database changes!")
logger.info('END')
return reallocated
def get_reallocated_members(members):
reallocated_members = []
for member in members.values():
postcode_string = member[ledenlijst.POSTCODE]
postcode = ledenlijst.parse_postcode(postcode_string)
if not postcode:
continue
if postcode >= 1000 and postcode < 10000:
afdeling_old = find_afdeling(afdelingenoud.AFDELINGEN, postcode)
afdeling_new = find_afdeling(afdelingen.AFDELINGEN, postcode)
if afdeling_new != afdeling_old:
reallocated_members.append(member)
else:
ledenlijst.logger.warning('invalid postcode: ' + str(postcode) + ' for member living in ' + member[ledenlijst.WOONPLAATS])
return reallocated_members
def find_afdeling(afdelingsgrenzen, postcode):
for afdeling, postcodes in afdelingsgrenzen.items():
for postcoderange in postcodes:
if postcode >= postcoderange[0] and postcode <= postcoderange[1]:
return afdeling
return 'Afdeling unknown'
def check_postcode_indeling(afdelingen):
no_overlap = check_overlap_afdelingen(afdelingen)
correct_ranges = check_postcode_ranges(afdelingen)
return no_overlap and correct_ranges
def check_postcode_ranges(afdelingsgrenzen):
correct_ranges = True
for _afdeling, postcodes in afdelingsgrenzen.items():
for postcoderange in postcodes:
if postcoderange[0] > postcoderange[1]:
ledenlijst.logger.error('wrong range, lower bound is higher than upper bound: ' + str(postcoderange))
correct_ranges = False
return correct_ranges
def check_overlap_afdelingen(afdelingsgrenzen):
overlapping_postcodes = []
for i in range(1000, 10000):
counter = 0
afdelingen = []
for afdeling, postcodes in afdelingsgrenzen.items():
for postcoderange in postcodes:
if i >= postcoderange[0] and i <= postcoderange[1]:
counter += 1
afdelingen.append(afdeling)
if counter > 1:
overlapping_postcodes.append(i)
ledenlijst.logger.warning('postcode: ' + str(i) + ' in afdelingen: ' + str(afdelingen))
if counter == 0:
ledenlijst.logger.warning('postcode: ' + str(i) + ' heeft geen afdeling')
if len(overlapping_postcodes) > 0:
ledenlijst.logger.error('overlapping postcodes: ' + str(len(overlapping_postcodes)))
return False
return True
| mit | 244,635,679,448,980,670 | 39.530973 | 134 | 0.666376 | false |
hip-odoo/odoo | addons/base_geolocalize/models/res_partner.py | 5 | 2828 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
import urllib2
from odoo import api, fields, models, tools, _
from odoo.exceptions import UserError
def geo_find(addr):
if not addr:
return None
url = 'https://maps.googleapis.com/maps/api/geocode/json?sensor=false&address='
url += urllib2.quote(addr.encode('utf8'))
try:
result = json.load(urllib2.urlopen(url))
except Exception as e:
raise UserError(_('Cannot contact geolocation servers. Please make sure that your Internet connection is up and running (%s).') % e)
if result['status'] != 'OK':
return None
try:
geo = result['results'][0]['geometry']['location']
return float(geo['lat']), float(geo['lng'])
except (KeyError, ValueError):
return None
def geo_query_address(street=None, zip=None, city=None, state=None, country=None):
if country and ',' in country and (country.endswith(' of') or country.endswith(' of the')):
# put country qualifier in front, otherwise GMap gives wrong results,
# e.g. 'Congo, Democratic Republic of the' => 'Democratic Republic of the Congo'
country = '{1} {0}'.format(*country.split(',', 1))
return tools.ustr(', '.join(filter(None, [street,
("%s %s" % (zip or '', city or '')).strip(),
state,
country])))
class ResPartner(models.Model):
_inherit = "res.partner"
partner_latitude = fields.Float(string='Geo Latitude', digits=(16, 5))
partner_longitude = fields.Float(string='Geo Longitude', digits=(16, 5))
date_localization = fields.Date(string='Geolocation Date')
@api.multi
def geo_localize(self):
# We need country names in English below
for partner in self.with_context(lang='en_US'):
result = geo_find(geo_query_address(street=partner.street,
zip=partner.zip,
city=partner.city,
state=partner.state_id.name,
country=partner.country_id.name))
if result is None:
result = geo_find(geo_query_address(
city=partner.city,
state=partner.state_id.name,
country=partner.country_id.name
))
if result:
partner.write({
'partner_latitude': result[0],
'partner_longitude': result[1],
'date_localization': fields.Date.context_today(partner)
})
return True
| agpl-3.0 | 6,065,413,060,464,888,000 | 38.830986 | 140 | 0.542079 | false |
lisaglendenning/pypetri | source/pypetri/graph/graph.py | 1 | 5541 | # @copyright
# @license
import collections
import networkx as nx
import pypetri.trellis as trellis
#############################################################################
#############################################################################
class Graph(collections.Mapping, trellis.Component):
CHANGE_ACTIONS = range(3)
ADD_ACTION, REMOVE_ACTION, CLEAR_ACTION = CHANGE_ACTIONS
CHANGE_TYPES = range(2)
NODE_TYPE, EDGE_TYPE = CHANGE_TYPES
Graph = nx.Graph
graph = trellis.attr(None)
changes = trellis.todo(list)
to_change = changes.future
def __init__(self, graph=None, *args, **kwargs):
if graph is None:
graph = self.Graph(*args, **kwargs)
super(Graph, self).__init__(graph=graph)
for k in dir(graph):
if not hasattr(self, k):
setattr(self, k, getattr(graph, k))
def __getitem__(self, key):
return self.graph[key]
def __iter__(self):
return iter(self.graph)
def __len__(self):
return len(self.graph)
@trellis.modifier
def add_node(self, *args, **kwargs):
change = (self.ADD_ACTION, self.NODE_TYPE, args, kwargs,)
self.to_change.append(change)
@trellis.modifier
def add_nodes_from(self, nbunch):
for n in nbunch:
self.add_node(n)
@trellis.modifier
def remove_node(self, *args, **kwargs):
change = (self.REMOVE_ACTION, self.NODE_TYPE, args, kwargs,)
self.to_change.append(change)
@trellis.modifier
def remove_nodes_from(self, nbunch):
for n in nbunch:
self.remove_node(n)
@trellis.modifier
def add_edge(self, *args, **kwargs):
change = (self.ADD_ACTION, self.EDGE_TYPE, args, kwargs,)
self.to_change.append(change)
@trellis.modifier
def add_edges_from(self, ebunch):
for e in ebunch:
self.add_edge(*e)
@trellis.modifier
def remove_edge(self, *args, **kwargs):
change = (self.REMOVE_ACTION, self.EDGE_TYPE, args, kwargs,)
self.to_change.append(change)
@trellis.modifier
def remove_edges_from(self, ebunch):
for e in ebunch:
self.remove_edge(*e)
@trellis.modifier
def add_star(self, nbunch):
self.add_nodes_from(nbunch)
hub = nbunch[0]
for i in xrange(1, len(nbunch)):
self.add_edge(hub, nbunch[i])
@trellis.modifier
def add_path(self, nbunch):
self.add_nodes_from(nbunch)
for i in xrange(len(nbunch)-1):
self.add_edge(nbunch[i],nbunch[i+1])
@trellis.modifier
def add_cycle(self, nbunch):
self.add_path(nbunch)
self.add_edge(nbunch[-1], nbunch[0])
@trellis.modifier
def clear(self):
change = (self.CLEAR_ACTION,)
self.to_change.append(change)
@trellis.maintain
def regraph(self):
graph = self.graph
for change in self.changes:
self.apply(graph, change)
if self.changes:
trellis.mark_dirty()
def apply(self, graph, change, log=True):
undos = []
action = change[0]
if action == self.ADD_ACTION:
type, args, kwargs = change[1:]
if type == self.NODE_TYPE:
if not graph.has_node(args[0]):
undo = (self.REMOVE_ACTION, type, args,)
undos.append(undo)
graph.add_node(*args, **kwargs)
elif type == self.EDGE_TYPE:
if not graph.has_edge(*args[0:2]):
undo = (self.REMOVE_ACTION, type, args,)
undos.append(undo)
graph.add_edge(*args, **kwargs)
elif action == self.REMOVE_ACTION:
type, args, kwargs = change[1:]
if type == self.NODE_TYPE:
u = args[0]
if graph.has_node(u):
edges = graph.edges(u, data=True)
for edge in edges:
undo = (self.ADD_ACTION, self.EDGE_TYPE, edge[:2], edge[2],)
undos.append(undo)
undo = (self.ADD_ACTION, type, (u,), dict(graph.node[u]),)
undos.append(undo)
graph.remove_node(*args, **kwargs)
elif type == self.EDGE_TYPE:
u,v = args[0:2]
if graph.has_edge(u,v):
undo = (self.ADD_ACTION, type, args, dict(graph.edge[u][v]),)
undos.append(undo)
graph.remove_edge(*args, **kwargs)
elif action == self.CLEAR_ACTION:
for n in graph.nodes_iter(data=True):
undo = (self.ADD_ACTION, self.NODE_TYPE, n[:1], n[-1],)
undos.append(undo)
for e in graph.edges_iter(data=True):
undo = (self.ADD_ACTION, self.EDGE_TYPE, e[:2], e[-1],)
undos.append(undo)
graph.clear()
else:
assert False
if log:
trellis.on_undo(self.undo, graph, undos)
def undo(self, graph, changes):
for change in changes:
self.apply(graph, change, False)
def snapshot(self):
return self.graph.copy()
#############################################################################
#############################################################################
| mit | 4,138,626,352,126,483,000 | 31.982143 | 84 | 0.495398 | false |
mpi-sws-rse/datablox | blox/categorize_shard__1_0/b_categorize_shard.py | 1 | 1171 | from block import *
from shard import *
from logging import ERROR, WARN, INFO, DEBUG
import time
class categorize_shard(Shard):
@classmethod
def initial_configs(cls, config):
return [config for i in range(config["nodes"])]
@classmethod
def node_type(self):
return {"name": "Categorize", "input_port": "input", "output_port": "output", "port_type": "PUSH"}
def on_load(self, config):
self.config = config
self.nodes = config["nodes"]
self.max_nodes = 20
self.current_node = 0
self.add_port("input", Port.PUSH, Port.UNNAMED, [])
self.log(INFO, "Categorize shard loaded")
def config_for_new_node(self):
return self.config
def recv_push(self, port, log):
self.log(INFO, "%s sending to port %d" % (self.id, self.current_node))
self.push_node(self.current_node, log)
self.current_node = (self.current_node + 1) % self.nodes
def can_add_node(self):
return (self.nodes < self.max_nodes)
def should_add_node(self, node_num):
self.log(INFO, self.id + " should_add_node got a new node")
self.nodes += 1
# start distribution from the new node
self.current_node = node_num | apache-2.0 | 438,109,497,504,606,850 | 29.051282 | 102 | 0.652434 | false |
stphivos/django-mock-queries | setup.py | 1 | 1603 | from setuptools import setup
def read_md(filename):
return open(filename).read()
def parse_requirements(filename):
reqs = []
with open(filename, 'r') as f:
reqs = f.read().splitlines()
if not reqs:
raise RuntimeError("Unable to read requirements from '%s'" % filename)
return reqs
setup(
name='django_mock_queries',
version='2.1.6',
description='A django library for mocking queryset functions in memory for testing',
long_description=read_md('README.md'),
long_description_content_type='text/markdown',
url='https://github.com/stphivos/django-mock-queries',
author='Phivos Stylianides',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Testing :: Mocking',
'Topic :: Software Development :: Testing :: Unit',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='django orm mocking unit-testing tdd',
packages=['django_mock_queries'],
install_requires=parse_requirements('requirements/core.txt'),
)
| mit | 7,877,405,215,237,546,000 | 33.847826 | 88 | 0.631316 | false |
jantman/awslimitchecker | awslimitchecker/alerts/base.py | 1 | 5566 | """
awslimitchecker/alerts/base.py
The latest version of this package is available at:
<https://github.com/jantman/awslimitchecker>
################################################################################
Copyright 2015-2019 Jason Antman <[email protected]>
This file is part of awslimitchecker, also known as awslimitchecker.
awslimitchecker is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
awslimitchecker is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with awslimitchecker. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/awslimitchecker> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <[email protected]> <http://www.jasonantman.com>
################################################################################
"""
import logging
from abc import ABCMeta, abstractmethod
logger = logging.getLogger(__name__)
class AlertProvider(object):
__metaclass__ = ABCMeta
def __init__(self, region_name):
"""
Initialize an AlertProvider class. This MUST be overridden by
subclasses. All configuration must be passed as keyword arguments
to the class constructor (these come from ``--alert-config`` CLI
arguments). Any dependency imports must be made in the constructor.
The constructor should do as much as possible to validate configuration.
:param region_name: the name of the region we're connected to
:type region_name: str
"""
self._region_name = region_name
@abstractmethod
def on_success(self, duration=None):
"""
Method called when no thresholds were breached, and run completed
successfully. Should resolve any open incidents (if the service supports
that functionality) or else simply return.
:param duration: duration of the usage/threshold checking run
:type duration: float
"""
raise NotImplementedError()
@abstractmethod
def on_critical(self, problems, problem_str, exc=None, duration=None):
"""
Method called when the run encountered errors, or at least one critical
threshold was met or crossed.
:param problems: dict of service name to nested dict of limit name to
limit, same format as the return value of
:py:meth:`~.AwsLimitChecker.check_thresholds`. ``None`` if ``exc`` is
specified.
:type problems: dict or None
:param problem_str: String representation of ``problems``, as displayed
in ``awslimitchecker`` command line output. ``None`` if ``exc`` is
specified.
:type problem_str: str or None
:param exc: Exception object that was raised during the run (optional)
:type exc: Exception
:param duration: duration of the run
:type duration: float
"""
raise NotImplementedError()
@abstractmethod
def on_warning(self, problems, problem_str, duration=None):
"""
Method called when one or more warning thresholds were crossed, but no
criticals and the run did not encounter any errors.
:param problems: dict of service name to nested dict of limit name to
limit, same format as the return value of
:py:meth:`~.AwsLimitChecker.check_thresholds`.
:type problems: dict or None
:param problem_str: String representation of ``problems``, as displayed
in ``awslimitchecker`` command line output.
:type problem_str: str or None
:param duration: duration of the run
:type duration: float
"""
raise NotImplementedError()
@staticmethod
def providers_by_name():
"""
Return a dict of available AlertProvider subclass names to the class
objects.
:return: AlertProvider class names to classes
:rtype: dict
"""
return {x.__name__: x for x in AlertProvider.__subclasses__()}
@staticmethod
def get_provider_by_name(name):
"""
Get a reference to the provider class with the specified name.
:param name: name of the AlertProvider subclass
:type name: str
:return: AlertProvider subclass
:rtype: ``class``
:raises: RuntimeError
"""
try:
return AlertProvider.providers_by_name()[name]
except KeyError:
raise RuntimeError(
'ERROR: "%s" is not a valid AlertProvider class name' % name
)
| agpl-3.0 | -9,175,748,815,709,389,000 | 38.197183 | 80 | 0.634567 | false |
pkonink/complete-python-bootcamp | capstone.py | 1 | 7045 | # Capstone project for Jose Portilla's Complete Python Bootcamp course at udemy.com
# Project Idea: Inverted index - An Inverted Index is a data structure used to create full text search.
# Given a set of text files, implement a program to create an inverted index. Also create a
# user interface to do a search using that inverted index which returns a list of files that
# contain the query term / terms. The search index can be in memory.
# Word-level inverted index - Features:
# * loads text file from web into memory, scans it and builds index
# + index stores as {'word':[(str('path/to/file'),int(pos_of__occurrence)),(...)]}
# * combines the dictionary with main database of all scanned text files
# + main dictionary stored locally as a sqlite file
# * UI that allows entry of multiple words (phrases) and return of snippets from relevant text files
# + returns results for both single words and complete phrase (ie, "love", "you", and "love you")
# + UI in cli only, no web or widget
# * Two tables for normalized storage
# + table: CREATE TABLE words(id INTEGER PRIMARY KEY AUTOINCREMENT, word TEXT);
# + table: CREATE TABLE words_loc(id INTEGER PRIMARY KEY AUTOINCREMENT, words_id INTEGER, url TEXT, loc INTEGER);
import urllib2
import sqlite3
import re
class FileLoad(object):
def __init__(self,file_loc):
'''loads file, builds index, adds to main index'''
self.return_list = {}
try:
response = urllib2.urlopen(file_loc)
html = response.read()
except:
html = False
print "%s is not a valid URL."%(file_loc)
if html != False:
# progressively remove script, style, then all HTML tags
clean_html = re.sub(r'<script[\s\S]+?>[\s\S]+?<\/script>','',html)
clean_html = re.sub(r'<style[\s\S]+?>[\s\S]+?<\/style>','',clean_html)
clean_html = re.sub(r'<[^<]+?>', '', clean_html)
# remove all special characters except single - and single ' to help build a more clean word list
real_clean_html = re.sub(r'^[\'-]|[\'-]$|[-]{2,}|[\']{2,}|([\'-])\W|\W([\'-])|[^a-z\'\s-]+', ' ', clean_html.lower())
# created ordered list of unique words from file
word_list = sorted(set(real_clean_html.split()))
# now add to sqlite database
try:
conn = sqlite3.connect('capstone.db')
self.cursor = conn.cursor()
# find locations for each word and update database where necessary
for w in word_list:
# We're only interested in words with more than one letter
if len(w) > 1:
# Check if word is already in database; if not, add it
w_id = self.check_for_word(w)
if w_id == False:
self.cursor.execute("insert into words(word) values(?)",(w,))
conn.commit()
w_id = self.cursor.lastrowid
# Get word location in document
for word_loc in [p.start() for p in re.finditer(r'\s%s[\s|-|\.|,]'%(w),clean_html.lower())]:
# First, check if this word instance is already in database
self.cursor.execute("select url,loc from words_loc where words_id = ?",(w_id,))
r = self.cursor.fetchone()
# If that instance of word isn't recorded already, add to the database
if r[1] != word_loc or r[0] != file_loc:
self.cursor.execute("insert into words_loc(words_id,url,loc) values(?,?,?)",(w_id,file_loc,word_loc))
conn.commit()
# Close connection and print affirmative message.
conn.close()
print "Index successfully updated for: %s"%(file_loc)
# Print an error if there's a problem with adding to database
except sqlite3.Error, e:
print "Error %s:"%(e.args[0])
def check_for_word(self,word):
'''Checks if a word is already recorded in database'''
self.cursor.execute("select id from words where word = ?",(word,))
result = self.cursor.fetchone()
if result:
return result[0]
else:
return False
class FileSnip(object):
def __init__(self,result):
'''loads file, converts to string, and returns text within n spaces before and
after word_position for display
result = (file,word_position)'''
#for word_loc in [p.start() for p in re.finditer(r'\s%s[\s|-|\.|,]'%(w),clean_html.lower())]:
# print loc,"Excerpt: ...",clean_html[loc-40:loc+40],"...\n"
print result
class SearchScan(object):
def __init__(self,word_list):
'''scans index for occurrences of words in word_list
scans index for phrases; phrase = words in word_list within n pos of each other
results = [(word,file,loc),(...)]'''
print word_list
class SearchOutput(object):
def __init__(self,result_list):
''' combines and displays results to screen word, URL, and file snippet for each result'''
print result_list
class UserInput(object):
def __init__(self):
pass
def user_activity(self):
''' asks user to load file or search for terms and calls pertinent method'''
while True:
task = raw_input('Type "search" or "load" for activity: ').upper()
if task == 'SEARCH':
self.search_query()
break
elif task == 'LOAD':
self.load_file()
break
def load_file(self):
''' takes file location from user and calls FileLoad'''
file = raw_input("Enter full URL including http:// of page to load): ")
# do validation here
FileLoad(file)
def search_query(self):
''' asks for search terms, calls SearchScan, and returns results as SearchOutput'''
search = raw_input("Enter search term: ")
word_list = search.split()
for item in SearchScan(word_list):
results.append([item['0'],item['1'],FileSnip([item['1'],item['2']])])
SearchOutput(results)
def again_or_die(self):
''' asks for another search query or end program'''
while True:
cont = raw_input("Press y to continue or any other key to quit. ").upper()
if cont == "Y":
return True
break
else:
return False
break
class main(object):
def __init__(self):
ui = UserInput()
while True:
#ask for input
ui.user_activity()
#show output
if ui.again_or_die() == False:
print "Goodbye!"
break
main()
| cc0-1.0 | 1,865,882,465,895,336,000 | 40.686391 | 133 | 0.555713 | false |
bouthors/ZenPacks.MatthieuBouthors.pfSense | setup.py | 1 | 2450 | ################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
# NB: PACKAGES is deprecated
NAME = "ZenPacks.MatthieuBouthors.pfSense"
VERSION = "0.7.0"
AUTHOR = "Matthieu Bouthors"
LICENSE = "GPL v2"
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.MatthieuBouthors']
PACKAGES = ['ZenPacks', 'ZenPacks.MatthieuBouthors', 'ZenPacks.MatthieuBouthors.pfSense']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = ""
PREV_ZENPACK_NAME = ""
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name=NAME,
version=VERSION,
author=AUTHOR,
license=LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers=COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName=PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages=NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages=find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data=True,
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires=INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points={
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe=False,
)
| gpl-2.0 | 913,829,869,681,725,400 | 36.121212 | 89 | 0.707347 | false |
ZeromusSoftware/RPi3500 | big_data/adamant_algorithm/square_meter_price.py | 1 | 2747 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 26 11:25:06 2016
@author: william
"""
import urllib
import pygeoj
import unicodedata
import pandas as pd
sectors = {"Bouches-du-Rhône":[]}
file13 = pygeoj.load("Data/france-geojson/departements/13/communes.geojson")
for feature in file13:
s = feature.properties['nom']
sectors["Bouches-du-Rhône"].append(s)
communes = sectors["Bouches-du-Rhône"]
def refresh_sqm_price():
prix,evolution = [],[]
for s in communes:
normalized_str = ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))
commune = "v_"+normalized_str.lower().replace("'","-").replace(" ","-")+"_13"
if "marseille" in commune:
try :
arrondissement = str(int(commune[12:14]))
except :
arrondissement = "0"+commune[12]
commune = "v_marseille_130"+arrondissement
page=urllib.urlopen('http://www.efficity.com/prix-immobilier-m2/'+commune)
strpage=page.read()
print(commune)
try:
stringevolutiontoseek = '<p class="evol-values">'
indexevol = strpage.index(stringevolutiontoseek)
strevolution = strpage[indexevol+len(stringevolutiontoseek):indexevol+len(stringevolutiontoseek)+4]
floatevolution = float(strevolution.replace(" ",""))
print(floatevolution)
evolution.append(floatevolution)
except :
print("evolution raté..")
evolution.append(0.0)
try:
stringpricetoseek = '<div class="price-per-sqm-width price-per-sqm-values">'
indexprice = strpage.index(stringpricetoseek)
firstcut = strpage[indexprice+len(stringpricetoseek):indexprice+len(stringpricetoseek)+50]
index1 = firstcut.index('<strong>')+len('<strong>')
index2 = firstcut.index('</strong>')+1
strprix = firstcut[index1:index2]
intprix = 0
n = len(strprix)
k = 1
for i in range(n):
try:
if type (int(strprix[n-i-1]))==int:
intprix+=k*int(strprix[n-i-1])
k=k*10
except:
pass
print(intprix)
prix.append(intprix)
except:
return ("prix raté..")
rows = []
for i in range(len(communes)):
rows.append((communes[i],prix[i],evolution[i]))
df = pd.DataFrame(rows,columns = ["Commune","Prix du m2","Evolution sur 3 mois"])
df.to_csv('Data/square_meters_price.csv')
return True | gpl-2.0 | -3,089,712,412,697,723,000 | 32.45122 | 113 | 0.549234 | false |
Kediel/Violent-Python | Chapter 2/bruteKey.py | 1 | 2401 | import pexpect
import optparse
import os
from threading import *
maxConnections = 5
connection_lock = BoundedSemaphore(value = maxConnections)
Stop = False
Fails = 0
def connect(user, host, keyfile, release):
global Stop, Fails
try:
perm_denied = 'Permission denied'
ssh_newkey = 'Are you sure you want to continue'
conn_closed = 'Connection closed by remote host'
opt = ' -o PasswordAuthentication = no'
connStr = 'ssh ' + user +\
'@' + host + '-i ' + keyfile + opt
child = pexpect.spawn(connStr)
ret = child.expect([pexpect.TIMEOUT, perm_denied, \
ssh_newkey, conn_closed, '$', '#', ])
if ret == 2:
print '[-] Adding Host to `/.ssh/known_hosts'
child.sendline('yes')
connect(user, host, keyfile, False)
elif ret == 3:
print '[-] Connection Closed By Remote Host'
Fails += 1
elif ret > 3:
print '[+] Success. ' + str(keyfile)
Stop = True
finally:
if release:
connection_lock.release()
def main():
parser = optparse.OptionParser('usage%prog -H ' +\
'<target host> -u <user> -d <directory>')
parser.add_option('-H', dest = 'tgtHost', type = 'string', \
help = 'specify target host')
parser.add_option('-u', dest = 'user', type = 'string', \
help = 'specify the user')
parser.add_option('-d', dest = 'passDir', type = 'string', \
help = 'specify directory with keys')
(options, args) = parser.parse_args()
host = options.tgtHost
user = options.user
passDir = options.passDir
if host == None or user == None or passDir == None:
print parser.usage
exit(0)
for filename in os.listdir(passDir):
if Stop:
print '[*] Exiting: Key Found.'
exit(0)
if Fails > 5:
print '[!] Exiting: '+\
'Too Many Connections Closed by Remote Host.'
print '[!] Adjust number of simultaneous threads.'
exit(0)
connection_lock.acquire()
fullpath = os.path.join(passDir, filename)
print '[-] Testing keyfile ' + str(fullpath)
t = Thread(target = connect, args = (user, host, fullpath, True))
child = t.start()
if __name__ == '__main__':
main()
| mit | -5,777,584,655,809,432,000 | 25.097826 | 69 | 0.546022 | false |
sfcta/TAutils | wrangler/TransitParser.py | 1 | 21183 | from simpleparse.common import numbers, strings, comments
from simpleparse import generator
from simpleparse.parser import Parser
from simpleparse.dispatchprocessor import *
import re
from .Linki import Linki
from .Logger import WranglerLogger
from .Node import Node
from .PNRLink import PNRLink
from .Supplink import Supplink
from .TransitLine import TransitLine
from .TransitLink import TransitLink
from .ZACLink import ZACLink
__all__ = [ 'TransitParser' ]
WRANGLER_FILE_SUFFICES = [ "lin", "link", "pnr", "zac", "access", "xfer" ]
# PARSER DEFINITION ------------------------------------------------------------------------------
# NOTE: even though XYSPEED and TIMEFAC are node attributes here, I'm not sure that's really ok --
# Cube documentation implies TF and XYSPD are node attributes...
transit_file_def=r'''
transit_file := ( accessli / line / link / pnr / zac / supplink )+, smcw*, whitespace*
line := whitespace?, smcw?, c"LINE", whitespace, lin_attr*, lin_node*, whitespace?
lin_attr := ( lin_attr_name, whitespace?, "=", whitespace?, attr_value, whitespace?,
comma, whitespace?, semicolon_comment* )
lin_nodeattr := ( lin_nodeattr_name, whitespace?, "=", whitespace?, attr_value, whitespace?, comma?, whitespace?, semicolon_comment* )
lin_attr_name := c"allstops" / c"color" / (c"freq",'[',[1-5],']') / c"mode" / c"name" / c"oneway" / c"owner" / c"runtime" / c"timefac" / c"xyspeed" / c"longname"
lin_nodeattr_name := c"access_c" / c"access" / c"delay" / c"xyspeed" / c"timefac"
lin_node := lin_nodestart?, whitespace?, nodenum, spaces*, comma?, spaces*, semicolon_comment?, whitespace?, lin_nodeattr*
lin_nodestart := (whitespace?, "N", whitespace?, "=")
link := whitespace?, smcw?, c"LINK", whitespace, link_attr*, whitespace?, semicolon_comment*
link_attr := (( (link_attr_name, whitespace?, "=", whitespace?, attr_value) /
(word_nodes, whitespace?, "=", whitespace?, nodepair) /
(word_modes, whitespace?, "=", whitespace?, numseq) ),
whitespace?, comma?, whitespace?)
link_attr_name := c"dist" / c"speed" / c"time" / c"oneway"
pnr := whitespace?, smcw?, c"PNR", whitespace, pnr_attr*, whitespace?
pnr_attr := (( (pnr_attr_name, whitespace?, "=", whitespace?, attr_value) /
(word_node, whitespace?, "=", whitespace?, ( nodepair / nodenum )) /
(word_zones, whitespace?, "=", whitespace?, numseq )),
whitespace?, comma?, whitespace?, semicolon_comment*)
pnr_attr_name := c"time" / c"maxtime" / c"distfac" / c"cost"
zac := whitespace?, smcw?, c"ZONEACCESS", whitespace, zac_attr*, whitespace?, semicolon_comment*
zac_attr := (( (c"link", whitespace?, "=", whitespace?, nodepair) /
(zac_attr_name, whitespace?, "=", whitespace?, attr_value) ),
whitespace?, comma?, whitespace?)
zac_attr_name := c"mode"
supplink := whitespace?, smcw?, c"SUPPLINK", whitespace, supplink_attr*, whitespace?, semicolon_comment*
supplink_attr := (( (supplink_attr_name, whitespace?, "=", whitespace?, attr_value) /
(c"n", whitespace?, "=", whitespace?, nodepair )),
whitespace?, comma?, whitespace?)
supplink_attr_name:= c"mode" / c"dist" / c"speed" / c"oneway" / c"time"
accessli := whitespace?, smcw?, nodenumA, spaces?, nodenumB, spaces?, accesstag?, spaces?, (float/int)?, spaces?, semicolon_comment?
accesstag := c"wnr" / c"pnr"
word_nodes := c"nodes"
word_node := c"node"
word_modes := c"modes"
word_zones := c"zones"
numseq := int, (spaces?, ("-" / ","), spaces?, int)*
nodepair := nodenum, spaces?, ("-" / ","), spaces?, nodenum
nodenumA := nodenum
nodenumB := nodenum
nodenum := int
attr_value := alphanums / string_single_quote / string_double_quote
alphanums := [a-zA-Z0-9\.]+
<comma> := [,]
<whitespace> := [ \t\r\n]+
<spaces> := [ \t]+
smcw := whitespace?, (semicolon_comment / c_comment, whitespace?)+
'''
class TransitFileProcessor(DispatchProcessor):
""" Class to process transit files
"""
def __init__(self, verbosity=1):
self.verbosity=verbosity
self.lines = []
self.links = []
self.pnrs = []
self.zacs = []
self.accesslis = []
self.xferlis = []
self.liType = ''
self.supplinks = []
self.endcomments = []
def crackTags(self, leaf, buffer):
tag = leaf[0]
text = buffer[leaf[1]:leaf[2]]
subtags = leaf[3]
b = []
if subtags:
for leaf in subtags:
b.append(self.crackTags(leaf, buffer))
return (tag,text,b)
def line(self, (tag,start,stop,subtags), buffer):
# this is the whole line
if self.verbosity>=1:
print tag, start, stop
# Append list items for this line
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
self.lines.append(xxx)
if self.verbosity==2:
# lines are composed of smcw (semicolon-comment / whitespace), line_attr and lin_node
for linepart in subtags:
print " ",linepart[0], " -> [ ",
for partpart in linepart[3]:
print partpart[0], "(", buffer[partpart[1]:partpart[2]],")",
print " ]"
def link(self, (tag,start,stop,subtags), buffer):
# this is the whole link
if self.verbosity>=1:
print tag, start, stop
# Append list items for this link
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
self.links.append(xxx)
if self.verbosity==2:
# links are composed of smcw and link_attr
for linkpart in subtags:
print " ",linkpart[0], " -> [ ",
for partpart in linkpart[3]:
print partpart[0], "(", buffer[partpart[1]:partpart[2]], ")",
print " ]"
def pnr(self, (tag,start,stop,subtags), buffer):
if self.verbosity>=1:
print tag, start, stop
# Append list items for this link
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
self.pnrs.append(xxx)
if self.verbosity==2:
# pnrs are composed of smcw and pnr_attr
for pnrpart in subtags:
print " ",pnrpart[0], " -> [ ",
for partpart in pnrpart[3]:
print partpart[0], "(", buffer[partpart[1]:partpart[2]], ")",
print " ]"
def zac(self, (tag,start,stop,subtags), buffer):
if self.verbosity>=1:
print tag, start, stop
if self.verbosity==2:
# zacs are composed of smcw and zac_attr
for zacpart in subtags:
print " ",zacpart[0], " -> [ ",
for partpart in zacpart[3]:
print partpart[0], "(", buffer[partpart[1]:partpart[2]], ")",
print " ]"
# Append list items for this link
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
self.zacs.append(xxx)
def supplink(self, (tag,start,stop,subtags), buffer):
if self.verbosity>=1:
print tag, start, stop
if self.verbosity==2:
# supplinks are composed of smcw and zac_attr
for supplinkpart in subtags:
print " ",supplinkpart[0], " -> [ ",
for partpart in supplinkpart[3]:
print partpart[0], "(", buffer[partpart[1]:partpart[2]], ")",
print " ]"
# Append list items for this link
# TODO: make the others more like this -- let the list separate the parse structures!
supplink = []
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
supplink.append(xxx)
self.supplinks.append(supplink)
def smcw(self, (tag,start,stop,subtags), buffer):
""" Semicolon comment whitespace
"""
if self.verbosity>=1:
print tag, start, stop
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
self.endcomments.append(xxx)
def accessli(self, (tag,start,stop,subtags), buffer):
if self.verbosity>=1:
print tag, start, stop
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
if self.liType=="access":
self.accesslis.append(xxx)
elif self.liType=="xfer":
self.xferlis.append(xxx)
else:
raise NetworkException("Found access or xfer link without classification")
class TransitParser(Parser):
def __init__(self, filedef=transit_file_def, verbosity=1):
Parser.__init__(self, filedef)
self.verbosity=verbosity
self.tfp = TransitFileProcessor(self.verbosity)
def buildProcessor(self):
return self.tfp
def convertLineData(self):
""" Convert the parsed tree of data into a usable python list of transit lines
returns list of comments and transit line objects
"""
rows = []
currentRoute = None
for line in self.tfp.lines:
# Each line is a 3-tuple: key, value, list-of-children.
# Add comments as simple strings
if line[0] == 'smcw':
cmt = line[1].strip()
if not cmt==';;<<Trnbuild>>;;':
rows.append(cmt)
continue
# Handle Line attributes
if line[0] == 'lin_attr':
key = None
value = None
comment = None
# Pay attention only to the children of lin_attr elements
kids = line[2]
for child in kids:
if child[0]=='lin_attr_name': key=child[1]
if child[0]=='attr_value': value=child[1]
if child[0]=='semicolon_comment': comment=child[1].strip()
# If this is a NAME attribute, we need to start a new TransitLine!
if key=='NAME':
if currentRoute:
rows.append(currentRoute)
currentRoute = TransitLine(name=value)
else:
currentRoute[key] = value # Just store all other attributes
# And save line comment if there is one
if comment: currentRoute.comment = comment
continue
# Handle Node list
if line[0] == "lin_node":
# Pay attention only to the children of lin_attr elements
kids = line[2]
node = None
for child in kids:
if child[0]=='nodenum':
node = Node(child[1])
if child[0]=='lin_nodeattr':
key = None
value = None
for nodechild in child[2]:
if nodechild[0]=='lin_nodeattr_name': key = nodechild[1]
if nodechild[0]=='attr_value': value = nodechild[1]
if nodechild[0]=='semicolon_comment': comment=nodechild[1].strip()
node[key] = value
if comment: node.comment = comment
currentRoute.n.append(node)
continue
# Got something other than lin_node, lin_attr, or smcw:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (line[0], line[1]))
# End of tree; store final route and return
if currentRoute: rows.append(currentRoute)
return rows
def convertLinkData(self):
""" Convert the parsed tree of data into a usable python list of transit lines
returns list of comments and transit line objects
"""
rows = []
currentLink = None
key = None
value = None
comment = None
for link in self.tfp.links:
# Each link is a 3-tuple: key, value, list-of-children.
# Add comments as simple strings:
if link[0] in ('smcw','semicolon_comment'):
if currentLink:
currentLink.comment = " "+link[1].strip() # Link comment
rows.append(currentLink)
currentLink = None
else:
rows.append(link[1].strip()) # Line comment
continue
# Link records
if link[0] == 'link_attr':
# Pay attention only to the children of lin_attr elements
kids = link[2]
for child in kids:
if child[0] in ('link_attr_name','word_nodes','word_modes'):
key = child[1]
# If this is a NAME attribute, we need to start a new TransitLink.
if key in ('nodes','NODES'):
if currentLink: rows.append(currentLink)
currentLink = TransitLink() # Create new dictionary for this transit support link
if child[0]=='nodepair':
currentLink.setId(child[1])
if child[0] in ('attr_value','numseq'):
currentLink[key] = child[1]
continue
# Got something unexpected:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (link[0], link[1]))
# Save last link too
if currentLink: rows.append(currentLink)
return rows
def convertPNRData(self):
""" Convert the parsed tree of data into a usable python list of PNR objects
returns list of strings and PNR objects
"""
rows = []
currentPNR = None
key = None
value = None
for pnr in self.tfp.pnrs:
# Each pnr is a 3-tuple: key, value, list-of-children.
# Add comments as simple strings
# Textline Comments
if pnr[0] =='smcw':
# Line comment; thus existing PNR must be finished.
if currentPNR:
rows.append(currentPNR)
currentPNR = None
rows.append(pnr[1].strip()) # Append line-comment
continue
# PNR records
if pnr[0] == 'pnr_attr':
# Pay attention only to the children of attr elements
kids = pnr[2]
for child in kids:
if child[0] in ('pnr_attr_name','word_node','word_zones'):
key = child[1]
# If this is a NAME attribute, we need to start a new PNR.
if key in ('node','NODE'):
if currentPNR:
rows.append(currentPNR)
currentPNR = PNRLink() # Create new dictionary for this PNR
if child[0]=='nodepair' or child[0]=='nodenum':
#print "child[0]/[1]",child[0],child[1]
currentPNR.id = child[1]
currentPNR.parseID()
if child[0] in ('attr_value','numseq'):
currentPNR[key.upper()] = child[1]
if child[0]=='semicolon_comment':
currentPNR.comment = ' '+child[1].strip()
continue
# Got something unexpected:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (pnr[0], pnr[1]))
# Save last link too
if currentPNR: rows.append(currentPNR)
return rows
def convertZACData(self):
""" Convert the parsed tree of data into a usable python list of ZAC objects
returns list of strings and ZAC objects
"""
rows = []
currentZAC = None
key = None
value = None
for zac in self.tfp.zacs:
# Each zac is a 3-tuple: key, value, list-of-children.
# Add comments as simple strings
# Textline Comments
if zac[0] in ('smcw','semicolon_comment'):
if currentZAC:
currentZAC.comment = ' '+zac[1].strip()
rows.append(currentZAC)
currentZAC = None
else:
rows.append(zac[1].strip()) # Append value
continue
# Link records
if zac[0] == 'zac_attr':
# Pay attention only to the children of lin_attr elements
kids = zac[2]
for child in kids:
if child[0]=='nodepair':
# Save old ZAC
if currentZAC: rows.append(currentZAC)
# Start new ZAC
currentZAC = ZACLink() # Create new dictionary for this ZAC.
currentZAC.id=child[1]
if child[0] =='zac_attr_name':
key = child[1]
if child[0]=='attr_value':
currentZAC[key] = child[1]
continue
# Got something unexpected:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (zac[0], zac[1]))
# Save last link too
if currentZAC: rows.append(currentZAC)
return rows
def convertLinkiData(self, linktype):
""" Convert the parsed tree of data into a usable python list of ZAC objects
returns list of strings and ZAC objects
"""
rows = []
currentLinki = None
key = None
value = None
linkis = []
if linktype=="access":
linkis=self.tfp.accesslis
elif linktype=="xfer":
linkis=self.tfp.xferlis
else:
raise NetworkException("ConvertLinkiData with invalid linktype")
for accessli in linkis:
# whitespace?, smcw?, nodenumA, spaces?, nodenumB, spaces?, (float/int)?, spaces?, semicolon_comment?
if accessli[0]=='smcw':
rows.append(accessli[1].strip())
elif accessli[0]=='nodenumA':
currentLinki = Linki()
rows.append(currentLinki)
currentLinki.A = accessli[1].strip()
elif accessli[0]=='nodenumB':
currentLinki.B = accessli[1].strip()
elif accessli[0]=='float':
currentLinki.distance = accessli[1].strip()
elif accessli[0]=='int':
currentLinki.xferTime = accessli[1].strip()
elif accessli[0]=='semicolon_comment':
currentLinki.comment = accessli[1].strip()
elif accessli[0]=='accesstag':
currentLinki.accessType = accessli[1].strip()
else:
# Got something unexpected:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (accessli[0], accessli[1]))
return rows
def convertSupplinksData(self):
""" Convert the parsed tree of data into a usable python list of Supplink objects
returns list of strings and Supplink objects
"""
rows = []
currentSupplink = None
key = None
value = None
for supplink in self.tfp.supplinks:
# Supplink records are lists
if currentSupplink: rows.append(currentSupplink)
currentSupplink = Supplink() # Create new dictionary for this PNR
for supplink_attr in supplink:
if supplink_attr[0] == 'supplink_attr':
if supplink_attr[2][0][0]=='supplink_attr_name':
currentSupplink[supplink_attr[2][0][1]] = supplink_attr[2][1][1]
elif supplink_attr[2][0][0]=='nodepair':
currentSupplink.setId(supplink_attr[2][0][1])
else:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (supplink[0], supplink[1]))
raise
elif supplink_attr[0] == "semicolon_comment":
currentSupplink.comment = supplink_attr[1].strip()
elif supplink_attr[0] == 'smcw':
currentSupplink.comment = supplink_attr[1].strip()
else:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (supplink[0], supplink[1]))
raise
# Save last link too
if currentSupplink: rows.append(currentSupplink)
return rows
| gpl-3.0 | -808,136,675,958,228,500 | 38.817669 | 165 | 0.514186 | false |
Cerdic/myCDN | lib/http.py | 1 | 2270 | # CirruxCache provides dynamic HTTP caching on AppEngine (CDN like)
# Copyright (C) 2009 Samuel Alba <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
userAgent = 'CirruxCache 0.3.1 / shad (http://code.google.com/p/cirruxcache/) ;'
# Copy from httplib
httpResponses = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
}
# An empty base class to tell webpy that we implement all HTTP methods
class Base(object):
def GET(self, *args):
pass
def HEAD(self, *args):
pass
def POST(self, *args):
pass
def PUT(self, *args):
pass
def DELETE(self, *args):
pass
| gpl-2.0 | 7,745,352,291,298,996,000 | 25.395349 | 81 | 0.69163 | false |
summerzhangft/summer | article/models.py | 1 | 1104 | from django.db import models
from tag.models import Tag
from mistune import markdown
from django.utils import timezone
from django.contrib.auth.models import User
class Article(models.Model):
title = models.CharField(max_length=100)
raw_content = models.TextField(blank=True)
tags = models.ManyToManyField(Tag)
author = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
background = models.CharField(null=True,max_length=500)
description = models.CharField(max_length=200,null=True)
vote = models.IntegerField(default=0)
pub_date=models.DateTimeField(editable=False)
@property
def render_content(self):
return markdown(self.raw_content)
@property
def pub_time_format(self):
return self.pub_date.strftime('%B %d, %Y')
def save(self,*args,**kwargs):
if not self.pub_date:
self.pub_date=timezone.now()
super(Article,self).save(*args,**kwargs)
def __str__(self):
return self.title
class Meta:
ordering = ('-pub_date',)
# Create your models here.
| gpl-3.0 | -3,490,386,367,264,015,000 | 29.666667 | 73 | 0.669384 | false |
etoki/0726_biginning | landingPage/bootstrap/settings.py | 1 | 3048 | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8zadu3+@^3*glz12%eyx1v4rbe0f)^0%2l-x923jg!p&7*40%('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bootstrap.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates"),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.static',
],
},
},
]
WSGI_APPLICATION = 'bootstrap.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = ''
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
) | mit | -7,993,397,726,805,836,000 | 25.982301 | 91 | 0.678806 | false |
liavkoren/djangoDev | tests/generic_views/urls.py | 1 | 12651 | from django.conf.urls import url
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from . import models
from . import views
urlpatterns = [
# TemplateView
url(r'^template/no_template/$',
TemplateView.as_view()),
url(r'^template/simple/(?P<foo>\w+)/$',
TemplateView.as_view(template_name='generic_views/about.html')),
url(r'^template/custom/(?P<foo>\w+)/$',
views.CustomTemplateView.as_view(template_name='generic_views/about.html')),
url(r'^template/content_type/$',
TemplateView.as_view(template_name='generic_views/robots.txt', content_type='text/plain')),
url(r'^template/cached/(?P<foo>\w+)/$',
cache_page(2.0)(TemplateView.as_view(template_name='generic_views/about.html'))),
# DetailView
url(r'^detail/obj/$',
views.ObjectDetail.as_view()),
url(r'^detail/artist/(?P<pk>\d+)/$',
views.ArtistDetail.as_view(),
name="artist_detail"),
url(r'^detail/author/(?P<pk>\d+)/$',
views.AuthorDetail.as_view(),
name="author_detail"),
url(r'^detail/author/bycustompk/(?P<foo>\d+)/$',
views.AuthorDetail.as_view(pk_url_kwarg='foo')),
url(r'^detail/author/byslug/(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/bycustomslug/(?P<foo>[\w-]+)/$',
views.AuthorDetail.as_view(slug_url_kwarg='foo')),
url(r'^detail/author/(?P<pk>\d+)/template_name_suffix/$',
views.AuthorDetail.as_view(template_name_suffix='_view')),
url(r'^detail/author/(?P<pk>\d+)/template_name/$',
views.AuthorDetail.as_view(template_name='generic_views/about.html')),
url(r'^detail/author/(?P<pk>\d+)/context_object_name/$',
views.AuthorDetail.as_view(context_object_name='thingy')),
url(r'^detail/author/(?P<pk>\d+)/dupe_context_object_name/$',
views.AuthorDetail.as_view(context_object_name='object')),
url(r'^detail/page/(?P<pk>\d+)/field/$',
views.PageDetail.as_view()),
url(r'^detail/author/invalid/url/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/invalid/qs/$',
views.AuthorDetail.as_view(queryset=None)),
url(r'^detail/nonmodel/1/$',
views.NonModelDetail.as_view()),
url(r'^detail/doesnotexist/(?P<pk>\d+)/$',
views.ObjectDoesNotExistDetail.as_view()),
# FormView
url(r'^contact/$',
views.ContactView.as_view()),
# Create/UpdateView
url(r'^edit/artists/create/$',
views.ArtistCreate.as_view()),
url(r'^edit/artists/(?P<pk>\d+)/update/$',
views.ArtistUpdate.as_view()),
url(r'^edit/authors/create/naive/$',
views.NaiveAuthorCreate.as_view()),
url(r'^edit/authors/create/redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/authors/create/')),
url(r'^edit/authors/create/interpolate_redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/author/%(id)d/update/')),
url(r'^edit/authors/create/restricted/$',
views.AuthorCreateRestricted.as_view()),
url(r'^edit/authors/create/$',
views.AuthorCreate.as_view()),
url(r'^edit/authors/create/special/$',
views.SpecializedAuthorCreate.as_view()),
url(r'^edit/author/(?P<pk>\d+)/update/naive/$',
views.NaiveAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>\d+)/update/redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/authors/create/')),
url(r'^edit/author/(?P<pk>\d+)/update/interpolate_redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/author/%(id)d/update/')),
url(r'^edit/author/(?P<pk>\d+)/update/$',
views.AuthorUpdate.as_view()),
url(r'^edit/author/update/$',
views.OneAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>\d+)/update/special/$',
views.SpecializedAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>\d+)/delete/naive/$',
views.NaiveAuthorDelete.as_view()),
url(r'^edit/author/(?P<pk>\d+)/delete/redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/')),
url(r'^edit/author/(?P<pk>\d+)/delete/interpolate_redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/?deleted=%(id)s')),
url(r'^edit/author/(?P<pk>\d+)/delete/$',
views.AuthorDelete.as_view()),
url(r'^edit/author/(?P<pk>\d+)/delete/special/$',
views.SpecializedAuthorDelete.as_view()),
# ArchiveIndexView
url(r'^dates/books/$',
views.BookArchive.as_view()),
url(r'^dates/books/context_object_name/$',
views.BookArchive.as_view(context_object_name='thingies')),
url(r'^dates/books/allow_empty/$',
views.BookArchive.as_view(allow_empty=True)),
url(r'^dates/books/template_name/$',
views.BookArchive.as_view(template_name='generic_views/list.html')),
url(r'^dates/books/template_name_suffix/$',
views.BookArchive.as_view(template_name_suffix='_detail')),
url(r'^dates/books/invalid/$',
views.BookArchive.as_view(queryset=None)),
url(r'^dates/books/paginated/$',
views.BookArchive.as_view(paginate_by=10)),
url(r'^dates/books/reverse/$',
views.BookArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
url(r'^dates/books/by_month/$',
views.BookArchive.as_view(date_list_period='month')),
url(r'^dates/booksignings/$',
views.BookSigningArchive.as_view()),
# ListView
url(r'^list/dict/$',
views.DictList.as_view()),
url(r'^list/dict/paginated/$',
views.DictList.as_view(paginate_by=1)),
url(r'^list/artists/$',
views.ArtistList.as_view(),
name="artists_list"),
url(r'^list/authors/$',
views.AuthorList.as_view(),
name="authors_list"),
url(r'^list/authors/paginated/$',
views.AuthorList.as_view(paginate_by=30)),
url(r'^list/authors/paginated/(?P<page>\d+)/$',
views.AuthorList.as_view(paginate_by=30)),
url(r'^list/authors/paginated-orphaned/$',
views.AuthorList.as_view(paginate_by=30, paginate_orphans=2)),
url(r'^list/authors/notempty/$',
views.AuthorList.as_view(allow_empty=False)),
url(r'^list/authors/notempty/paginated/$',
views.AuthorList.as_view(allow_empty=False, paginate_by=2)),
url(r'^list/authors/template_name/$',
views.AuthorList.as_view(template_name='generic_views/list.html')),
url(r'^list/authors/template_name_suffix/$',
views.AuthorList.as_view(template_name_suffix='_objects')),
url(r'^list/authors/context_object_name/$',
views.AuthorList.as_view(context_object_name='author_list')),
url(r'^list/authors/dupe_context_object_name/$',
views.AuthorList.as_view(context_object_name='object_list')),
url(r'^list/authors/invalid/$',
views.AuthorList.as_view(queryset=None)),
url(r'^list/authors/paginated/custom_class/$',
views.AuthorList.as_view(paginate_by=5, paginator_class=views.CustomPaginator)),
url(r'^list/authors/paginated/custom_page_kwarg/$',
views.AuthorList.as_view(paginate_by=30, page_kwarg='pagina')),
url(r'^list/authors/paginated/custom_constructor/$',
views.AuthorListCustomPaginator.as_view()),
# YearArchiveView
# Mixing keyword and positional captures below is intentional; the views
# ought to be able to accept either.
url(r'^dates/books/(?P<year>\d{4})/$',
views.BookYearArchive.as_view()),
url(r'^dates/books/(?P<year>\d{4})/make_object_list/$',
views.BookYearArchive.as_view(make_object_list=True)),
url(r'^dates/books/(?P<year>\d{4})/allow_empty/$',
views.BookYearArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>\d{4})/allow_future/$',
views.BookYearArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>\d{4})/paginated/$',
views.BookYearArchive.as_view(make_object_list=True, paginate_by=30)),
url(r'^dates/books/no_year/$',
views.BookYearArchive.as_view()),
url(r'^dates/books/(?P<year>\d{4})/reverse/$',
views.BookYearArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
url(r'^dates/booksignings/(?P<year>\d{4})/$',
views.BookSigningYearArchive.as_view()),
# MonthArchiveView
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/$',
views.BookMonthArchive.as_view()),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>\d{1,2})/$',
views.BookMonthArchive.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/allow_empty/$',
views.BookMonthArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/allow_future/$',
views.BookMonthArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/paginated/$',
views.BookMonthArchive.as_view(paginate_by=30)),
url(r'^dates/books/(?P<year>\d{4})/no_month/$',
views.BookMonthArchive.as_view()),
url(r'^dates/booksignings/(?P<year>\d{4})/(?P<month>[a-z]{3})/$',
views.BookSigningMonthArchive.as_view()),
# WeekArchiveView
url(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/$',
views.BookWeekArchive.as_view()),
url(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/allow_empty/$',
views.BookWeekArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/allow_future/$',
views.BookWeekArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/paginated/$',
views.BookWeekArchive.as_view(paginate_by=30)),
url(r'^dates/books/(?P<year>\d{4})/week/no_week/$',
views.BookWeekArchive.as_view()),
url(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/monday/$',
views.BookWeekArchive.as_view(week_format='%W')),
url(r'^dates/booksignings/(?P<year>\d{4})/week/(?P<week>\d{1,2})/$',
views.BookSigningWeekArchive.as_view()),
# DayArchiveView
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/$',
views.BookDayArchive.as_view()),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/$',
views.BookDayArchive.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/allow_empty/$',
views.BookDayArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/allow_future/$',
views.BookDayArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/allow_empty_and_future/$',
views.BookDayArchive.as_view(allow_empty=True, allow_future=True)),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/paginated/$',
views.BookDayArchive.as_view(paginate_by=True)),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/no_day/$',
views.BookDayArchive.as_view()),
url(r'^dates/booksignings/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/$',
views.BookSigningDayArchive.as_view()),
# TodayArchiveView
url(r'^dates/books/today/$',
views.BookTodayArchive.as_view()),
url(r'^dates/books/today/allow_empty/$',
views.BookTodayArchive.as_view(allow_empty=True)),
url(r'^dates/booksignings/today/$',
views.BookSigningTodayArchive.as_view()),
# DateDetailView
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookDetail.as_view()),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookDetail.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/allow_future/$',
views.BookDetail.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/nopk/$',
views.BookDetail.as_view()),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/byslug/(?P<slug>[\w-]+)/$',
views.BookDetail.as_view()),
url(r'^dates/books/get_object_custom_queryset/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookDetailGetObjectCustomQueryset.as_view()),
url(r'^dates/booksignings/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookSigningDetail.as_view()),
# Useful for testing redirects
url(r'^accounts/login/$', 'django.contrib.auth.views.login')
]
| bsd-3-clause | 3,350,932,227,050,310,700 | 47.471264 | 118 | 0.617343 | false |
sbergot/python | efront/repo.py | 1 | 2666 | import logging
import re
import os
from efront import iohelpers as io
DEV_DIR = r"c:\dev4.1"
ROOTS = [
r"c:\svn",
r"c:\git",
]
def get_current_target():
regex = re.compile("<JUNCTION> +dev4\.1 \[(.*)\]")
matches = []
def get_match(line):
m = regex.search(str(line))
if m is None:
return
matches.append(m.group(1))
io.cmd("dir", cwd="c:\\", logger=get_match)
assert len(matches) == 1, "multiple junctions found: {}".format(matches)
return matches[0]
def remove_junction(junction_path):
io.cmd("rmdir {}".format(junction_path), logger=logging.debug)
def create_junction(dev_dir, srcdir):
logging.info("creating a junction to the repository between {} and {}".format(dev_dir, srcdir))
io.cmd("mklink /J {} {}".format(dev_dir, os.path.abspath(srcdir)), logger=logging.debug)
def switch(srcdir):
if os.path.exists(DEV_DIR):
remove_junction(DEV_DIR)
create_junction(DEV_DIR, srcdir)
if os.path.exists(os.path.join(DEV_DIR, "Switch.cmd")):
logging.info("Running Switch.cmd")
io.cmd("Switch.cmd", cwd=DEV_DIR, logger=logging.getLogger("Switch.cmd").debug)
def find_src_dir(path):
true_dirs = filter(os.path.exists, [os.path.join(root, path) for root in ROOTS] + [os.path.abspath(path)])
true_dirs = list(set(true_dirs))
if len(true_dirs) == 0:
raise Exception("{} not found".format(path))
if len(true_dirs) > 1:
print("\n".join("{} - {}".format(i, p) for i, p in enumerate(true_dirs)))
selection = int(raw_input("please select source: "))
else:
selection = 0
return true_dirs[selection]
class Target:
root_names = list(map(os.path.basename, ROOTS))
root_names.sort()
def __init__(self, name):
self.name = name
self.srcs = set()
def add(self, root):
self.srcs.add(os.path.basename(root))
def _get_src(self, root):
return root if root in self.srcs else " " * len(root)
def __str__(self):
return " ".join([self._get_src(root) for root in self.root_names] + [self.name])
def list_dirs(log):
log("available dirs:")
dirs = {}
for root in ROOTS:
for dirname in os.listdir(root):
if not os.path.exists(os.path.join(root, dirname, "msbuild_RSK.bat")):
continue
if not dirname in dirs:
dirs[dirname] = Target(dirname)
dirs[dirname].add(root)
dirs_list = list(set(dirs))
dirs_list.sort()
for dirname in dirs_list:
log(str(dirs[dirname]))
| bsd-3-clause | 7,354,850,230,155,652,000 | 29.738095 | 110 | 0.582146 | false |
hydrogo/hydropy | hydropy/baseflow.py | 1 | 2682 | # -*- coding: utf-8 -*-
"""
Hydropy package
@author: Stijn Van Hoey
"""
def get_baseflow_chapman(flowserie, recession_time):
"""
Parameters
----------
flowserie : pd.TimeSeries
River discharge flowserie
recession_time : float [0-1]
recession constant
Notes
------
$$Q_b(i) = \frac{k}{2-k}Q_b(i-1) + \frac{1-k}{2-k}Q(i)$$
"""
if not isinstance(flowserie, pd.TimeSeries):
raise Exception("Not a pd.TimeSerie as input")
secterm = (1.-recession_time)*flowserie/(2.-recession_time)
baseflow = np.empty(flowserie.shape[0])
for i, timestep in enumerate(baseflow):
if i == 0:
baseflow[i] = 0.0
else:
baseflow[i] = recession_time*baseflow[i-1]/(2.-recession_time) + \
secterm.values[i]
return pd.TimeSeries(baseflow, index = flowserie.index)
def get_baseflow_boughton(flowserie, recession_time, baseflow_index):
"""
Parameters
----------
flowserie : pd.TimeSeries
River discharge flowserie
recession_time : float [0-1]
recession constant
baseflow_index : float
Notes
------
$$Q_b(i) = \frac{k}{1+C}Q_b(i-1) + \frac{C}{1+C}Q(i)$$
"""
if not isinstance(flowserie, pd.TimeSeries):
raise Exception("Not a pd.TimeSerie as input")
parC = baseflow_index
secterm = parC*flowserie/(1 + parC)
baseflow = np.empty(flowserie.shape[0])
for i, timestep in enumerate(baseflow):
if i == 0:
baseflow[i] = 0.0
else:
baseflow[i] = recession_time*baseflow[i-1]/(1 + parC) + \
secterm.values[i]
return pd.TimeSeries(baseflow, index = flowserie.index)
def get_baseflow_ihacres(flowserie, recession_time, baseflow_index, alfa):
"""
Parameters
----------
flowserie : pd.TimeSeries
River discharge flowserie
recession_time : float [0-1]
recession constant
Notes
------
$$Q_b(i) = \frac{k}{1+C}Q_b(i-1) + \frac{C}{1+C}[Q(i)+\alpha Q(i-1)]$$
$\alpha$ < 0.
"""
if not isinstance(flowserie, pd.TimeSeries):
raise Exception("Not a pd.TimeSerie as input")
parC = baseflow_index
secterm = parC/(1 + parC)
baseflow = np.empty(flowserie.shape[0])
for i, timestep in enumerate(baseflow):
if i == 0:
baseflow[i] = 0.0
else:
baseflow[i] = recession_time*baseflow[i-1]/(1 + parC) + \
secterm*(flowserie.values[i] + \
alfa*flowserie.values[i-1])
return pd.TimeSeries(baseflow, index = flowserie.index) | bsd-2-clause | -665,022,005,729,298,800 | 25.048544 | 78 | 0.557047 | false |
qqzwc/XX-Net | code/default/x_tunnel/local/heroku_front/cert_util.py | 1 | 22349 | #!/usr/bin/env python
# coding:utf-8
import os
import sys
import glob
import binascii
import time
import random
import base64
import hashlib
import threading
import subprocess
current_path = os.path.dirname(os.path.abspath(__file__))
python_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, 'python27', '1.0'))
root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir))
data_path = os.path.abspath(os.path.join(root_path, os.pardir, os.pardir, 'data', "gae_proxy"))
if not os.path.isdir(data_path):
data_path = current_path
if __name__ == "__main__":
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
if sys.platform == "win32":
win32_lib = os.path.abspath( os.path.join(python_path, 'lib', 'win32'))
sys.path.append(win32_lib)
elif sys.platform == "linux" or sys.platform == "linux2":
linux_lib = os.path.abspath( os.path.join(python_path, 'lib', 'linux'))
sys.path.append(linux_lib)
elif sys.platform == "darwin":
darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin'))
sys.path.append(darwin_lib)
from xlog import getLogger
xlog = getLogger("heroku_front")
import OpenSSL
import ssl, datetime
from pyasn1.type import univ, constraint, char, namedtype, tag
from pyasn1.codec.der.decoder import decode
from pyasn1.error import PyAsn1Error
from config import config
def get_cmd_out(cmd):
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = proc.stdout
lines = out.readlines()
return lines
class SSLCert:
def __init__(self, cert):
"""
Returns a (common name, [subject alternative names]) tuple.
"""
self.x509 = cert
@classmethod
def from_pem(klass, txt):
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, txt)
return klass(x509)
@classmethod
def from_der(klass, der):
pem = ssl.DER_cert_to_PEM_cert(der)
return klass.from_pem(pem)
def to_pem(self):
return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, self.x509)
def digest(self, name):
return self.x509.digest(name)
@property
def issuer(self):
return self.x509.get_issuer().get_components()
@property
def notbefore(self):
t = self.x509.get_notBefore()
return datetime.datetime.strptime(t, "%Y%m%d%H%M%SZ")
@property
def notafter(self):
t = self.x509.get_notAfter()
return datetime.datetime.strptime(t, "%Y%m%d%H%M%SZ")
@property
def has_expired(self):
return self.x509.has_expired()
@property
def subject(self):
return self.x509.get_subject().get_components()
@property
def serial(self):
return self.x509.get_serial_number()
@property
def keyinfo(self):
pk = self.x509.get_pubkey()
types = {
OpenSSL.crypto.TYPE_RSA: "RSA",
OpenSSL.crypto.TYPE_DSA: "DSA",
}
return (
types.get(pk.type(), "UNKNOWN"),
pk.bits()
)
@property
def cn(self):
c = None
for i in self.subject:
if i[0] == "CN":
c = i[1]
return c
@property
def altnames(self):
altnames = []
for i in range(self.x509.get_extension_count()):
ext = self.x509.get_extension(i)
if ext.get_short_name() == "subjectAltName":
try:
dec = decode(ext.get_data(), asn1Spec=_GeneralNames())
except PyAsn1Error:
continue
for i in dec[0]:
altnames.append(i[0].asOctets())
return altnames
class CertUtil(object):
"""CertUtil module, based on mitmproxy"""
ca_vendor = 'GoAgent' #TODO: here should be XX-Net
ca_keyfile = os.path.join(data_path, 'CA.crt')
ca_thumbprint = ''
ca_certdir = os.path.join(data_path, 'certs')
ca_digest = 'sha256'
ca_lock = threading.Lock()
ca_validity_years = 10
ca_validity = 24 * 60 * 60 * 365 * ca_validity_years
cert_validity_years = 2
cert_validity = 24 * 60 * 60 * 365 * cert_validity_years
@staticmethod
def create_ca():
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
req = OpenSSL.crypto.X509Req()
subj = req.get_subject()
subj.countryName = 'CN'
subj.stateOrProvinceName = 'Internet'
subj.localityName = 'Cernet'
subj.organizationName = CertUtil.ca_vendor
subj.organizationalUnitName = '%s Root' % CertUtil.ca_vendor
subj.commonName = '%s XX-Net' % CertUtil.ca_vendor #TODO: here should be GoAgent
req.set_pubkey(key)
req.sign(key, CertUtil.ca_digest)
ca = OpenSSL.crypto.X509()
ca.set_version(2)
ca.set_serial_number(0)
ca.gmtime_adj_notBefore(0)
ca.gmtime_adj_notAfter(CertUtil.ca_validity)
ca.set_issuer(req.get_subject())
ca.set_subject(req.get_subject())
ca.set_pubkey(req.get_pubkey())
ca.add_extensions([
OpenSSL.crypto.X509Extension(
'basicConstraints', False, 'CA:TRUE', subject=ca, issuer=ca)
])
ca.sign(key, CertUtil.ca_digest)
#xlog.debug("CA key:%s", key)
xlog.info("create CA")
return key, ca
@staticmethod
def generate_ca_file():
xlog.info("generate CA file:%s", CertUtil.ca_keyfile)
key, ca = CertUtil.create_ca()
with open(CertUtil.ca_keyfile, 'wb') as fp:
fp.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, ca))
fp.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key))
@staticmethod
def get_cert_serial_number(commonname):
assert CertUtil.ca_thumbprint
saltname = '%s|%s' % (CertUtil.ca_thumbprint, commonname)
return int(hashlib.md5(saltname.encode('utf-8')).hexdigest(), 16)
@staticmethod
def _get_cert(commonname, sans=()):
with open(CertUtil.ca_keyfile, 'rb') as fp:
content = fp.read()
key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, content)
ca = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, content)
pkey = OpenSSL.crypto.PKey()
pkey.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
req = OpenSSL.crypto.X509Req()
subj = req.get_subject()
subj.countryName = 'CN'
subj.stateOrProvinceName = 'Internet'
subj.localityName = 'Cernet'
subj.organizationalUnitName = '%s Branch' % CertUtil.ca_vendor
if commonname[0] == '.':
subj.commonName = '*' + commonname
subj.organizationName = '*' + commonname
sans = ['*'+commonname] + [x for x in sans if x != '*'+commonname]
else:
subj.commonName = commonname
subj.organizationName = commonname
sans = [commonname] + [x for x in sans if x != commonname]
#req.add_extensions([OpenSSL.crypto.X509Extension(b'subjectAltName', True, ', '.join('DNS: %s' % x for x in sans)).encode()])
req.set_pubkey(pkey)
req.sign(pkey, CertUtil.ca_digest)
cert = OpenSSL.crypto.X509()
cert.set_version(2)
try:
cert.set_serial_number(CertUtil.get_cert_serial_number(commonname))
except OpenSSL.SSL.Error:
cert.set_serial_number(int(time.time()*1000))
cert.gmtime_adj_notBefore(-600) #avoid crt time error warning
cert.gmtime_adj_notAfter(CertUtil.cert_validity)
cert.set_issuer(ca.get_subject())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
if commonname[0] == '.':
sans = ['*'+commonname] + [s for s in sans if s != '*'+commonname]
else:
sans = [commonname] + [s for s in sans if s != commonname]
cert.add_extensions([OpenSSL.crypto.X509Extension(b'subjectAltName', True, ', '.join('DNS: %s' % x for x in sans))])
cert.sign(key, CertUtil.ca_digest)
certfile = os.path.join(CertUtil.ca_certdir, commonname + '.crt')
with open(certfile, 'wb') as fp:
fp.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert))
fp.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, pkey))
return certfile
@staticmethod
def _get_cert_cn(commonname, full_name=False):
yield commonname
# some site need full name cert
# like https://about.twitter.com in Google Chrome
if commonname.count('.') >= 2 and [len(x) for x in reversed(commonname.split('.'))] > [2, 4] and not full_name:
yield '.' + commonname.partition('.')[-1]
@staticmethod
def _get_old_cert(commonname, full_name=False):
for CN in CertUtil._get_cert_cn(commonname, full_name):
certfile = os.path.join(CertUtil.ca_certdir, CN + '.crt')
if os.path.exists(certfile):
if OpenSSL:
with open(certfile, 'rb') as fp:
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, fp.read())
if datetime.datetime.strptime(cert.get_notAfter(), '%Y%m%d%H%M%SZ') <= datetime.datetime.utcnow():
try:
os.remove(certfile)
except OSError as e:
xlog.warning('CertUtil._get_old_cert failed: unable to remove outdated cert, %r', e)
else:
continue
# well, have to use the old one
return certfile
@staticmethod
def get_cert(commonname, sans=(), full_name=False):
certfile = CertUtil._get_old_cert(commonname, full_name)
if certfile:
return certfile
if OpenSSL is None:
return CertUtil.ca_keyfile
else:
with CertUtil.ca_lock:
certfile = CertUtil._get_old_cert(commonname, full_name)
if certfile:
return certfile
return CertUtil._get_cert(commonname, sans)
@staticmethod
def win32_notify( msg="msg", title="Title"):
import ctypes
res = ctypes.windll.user32.MessageBoxW(None, msg, title, 1)
# Yes:1 No:2
return res
@staticmethod
def import_windows_ca(common_name, certfile):
import ctypes
with open(certfile, 'rb') as fp:
certdata = fp.read()
if certdata.startswith(b'-----'):
begin = b'-----BEGIN CERTIFICATE-----'
end = b'-----END CERTIFICATE-----'
certdata = base64.b64decode(b''.join(certdata[certdata.find(begin)+len(begin):certdata.find(end)].strip().splitlines()))
crypt32 = ctypes.WinDLL(b'crypt32.dll'.decode())
store_handle = crypt32.CertOpenStore(10, 0, 0, 0x4000 | 0x20000, b'ROOT'.decode())
if not store_handle:
return False
CERT_FIND_SUBJECT_STR = 0x00080007
CERT_FIND_HASH = 0x10000
X509_ASN_ENCODING = 0x00000001
class CRYPT_HASH_BLOB(ctypes.Structure):
_fields_ = [('cbData', ctypes.c_ulong), ('pbData', ctypes.c_char_p)]
assert CertUtil.ca_thumbprint
crypt_hash = CRYPT_HASH_BLOB(20, binascii.a2b_hex(CertUtil.ca_thumbprint.replace(':', '')))
crypt_handle = crypt32.CertFindCertificateInStore(store_handle, X509_ASN_ENCODING, 0, CERT_FIND_HASH, ctypes.byref(crypt_hash), None)
if crypt_handle:
crypt32.CertFreeCertificateContext(crypt_handle)
return True
ret = crypt32.CertAddEncodedCertificateToStore(store_handle, 0x1, certdata, len(certdata), 4, None)
crypt32.CertCloseStore(store_handle, 0)
del crypt32
if not ret and __name__ != "__main__":
#res = CertUtil.win32_notify(msg=u'Import GoAgent Ca?', title=u'Authority need')
#if res == 2:
# return -1
import win32elevate
try:
win32elevate.elevateAdminRun(os.path.abspath(__file__))
except Exception as e:
xlog.warning('CertUtil.import_windows_ca failed: %r', e)
return True
else:
CertUtil.win32_notify(msg=u'已经导入GoAgent证书,请重启浏览器.', title=u'Restart browser need.')
return True if ret else False
@staticmethod
def remove_windows_ca(name):
import ctypes
import ctypes.wintypes
class CERT_CONTEXT(ctypes.Structure):
_fields_ = [
('dwCertEncodingType', ctypes.wintypes.DWORD),
('pbCertEncoded', ctypes.POINTER(ctypes.wintypes.BYTE)),
('cbCertEncoded', ctypes.wintypes.DWORD),
('pCertInfo', ctypes.c_void_p),
('hCertStore', ctypes.c_void_p),]
try:
crypt32 = ctypes.WinDLL(b'crypt32.dll'.decode())
store_handle = crypt32.CertOpenStore(10, 0, 0, 0x4000 | 0x20000, b'ROOT'.decode())
pCertCtx = crypt32.CertEnumCertificatesInStore(store_handle, None)
while pCertCtx:
certCtx = CERT_CONTEXT.from_address(pCertCtx)
certdata = ctypes.string_at(certCtx.pbCertEncoded, certCtx.cbCertEncoded)
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_ASN1, certdata)
if hasattr(cert, 'get_subject'):
cert = cert.get_subject()
cert_name = next((v for k, v in cert.get_components() if k == 'CN'), '')
if cert_name and name == cert_name:
crypt32.CertDeleteCertificateFromStore(crypt32.CertDuplicateCertificateContext(pCertCtx))
pCertCtx = crypt32.CertEnumCertificatesInStore(store_handle, pCertCtx)
except Exception as e:
xlog.warning('CertUtil.remove_windows_ca failed: %r', e)
@staticmethod
def get_linux_firefox_path():
home_path = os.path.expanduser("~")
firefox_path = os.path.join(home_path, ".mozilla/firefox")
if not os.path.isdir(firefox_path):
return
for filename in os.listdir(firefox_path):
if filename.endswith(".default") and os.path.isdir(os.path.join(firefox_path, filename)):
config_path = os.path.join(firefox_path, filename)
return config_path
@staticmethod
def import_linux_firefox_ca(common_name, ca_file):
firefox_config_path = CertUtil.get_linux_firefox_path()
if not firefox_config_path:
return False
if not any(os.path.isfile('%s/certutil' % x) for x in os.environ['PATH'].split(os.pathsep)):
xlog.warning('please install *libnss3-tools* package to import GoAgent root ca')
return False
cmd_line = 'certutil -L -d %s |grep "GoAgent" &&certutil -d %s -D -n "%s" ' % (firefox_config_path, firefox_config_path, common_name)
os.system(cmd_line) # remove old cert first
cmd_line = 'certutil -d %s -A -t "C,," -n "%s" -i "%s"' % (firefox_config_path, common_name, ca_file)
os.system(cmd_line) # install new cert
return True
@staticmethod
def import_debian_ca(common_name, ca_file):
def get_debian_ca_sha1(nss_path):
commonname = "GoAgent XX-Net - GoAgent" #TODO: here should be GoAgent - XX-Net
cmd = ['certutil', '-L','-d', 'sql:%s' % nss_path, '-n', commonname]
lines = get_cmd_out(cmd)
get_sha1_title = False
sha1 = ""
for line in lines:
if line.endswith("Fingerprint (SHA1):\n"):
get_sha1_title = True
continue
if get_sha1_title:
sha1 = line
break
sha1 = sha1.replace(' ', '').replace(':', '').replace('\n', '')
if len(sha1) != 40:
return False
else:
return sha1
home_path = os.path.expanduser("~")
nss_path = os.path.join(home_path, ".pki/nssdb")
if not os.path.isdir(nss_path):
return False
if not any(os.path.isfile('%s/certutil' % x) for x in os.environ['PATH'].split(os.pathsep)):
xlog.warning('please install *libnss3-tools* package to import GoAgent root ca')
return False
sha1 = get_debian_ca_sha1(nss_path)
ca_hash = CertUtil.ca_thumbprint.replace(':', '')
if sha1 == ca_hash:
xlog.info("system cert exist")
return
# shell command to list all cert
# certutil -L -d sql:$HOME/.pki/nssdb
# remove old cert first
cmd_line = 'certutil -L -d sql:$HOME/.pki/nssdb |grep "GoAgent" && certutil -d sql:$HOME/.pki/nssdb -D -n "%s" ' % ( common_name)
os.system(cmd_line)
# install new cert
cmd_line = 'certutil -d sql:$HOME/.pki/nssdb -A -t "C,," -n "%s" -i "%s"' % (common_name, ca_file)
os.system(cmd_line)
return True
@staticmethod
def import_ubuntu_system_ca(common_name, certfile):
import platform
platform_distname = platform.dist()[0]
if platform_distname != 'Ubuntu':
return
pemfile = "/etc/ssl/certs/CA.pem"
new_certfile = "/usr/local/share/ca-certificates/CA.crt"
if not os.path.exists(pemfile) or not CertUtil.file_is_same(certfile, new_certfile):
if os.system('cp "%s" "%s" && update-ca-certificates' % (certfile, new_certfile)) != 0:
xlog.warning('install root certificate failed, Please run as administrator/root/sudo')
@staticmethod
def file_is_same(file1, file2):
BLOCKSIZE = 65536
try:
with open(file1, 'rb') as f1:
buf1 = f1.read(BLOCKSIZE)
except:
return False
try:
with open(file2, 'rb') as f2:
buf2 = f2.read(BLOCKSIZE)
except:
return False
if buf1 != buf2:
return False
else:
return True
@staticmethod
def import_mac_ca(common_name, certfile):
commonname = "GoAgent XX-Net" #TODO: need check again
ca_hash = CertUtil.ca_thumbprint.replace(':', '')
def get_exist_ca_sha1():
args = ['security', 'find-certificate', '-Z', '-a', '-c', commonname]
output = subprocess.check_output(args)
for line in output.splitlines(True):
if len(line) == 53 and line.startswith("SHA-1 hash:"):
sha1_hash = line[12:52]
return sha1_hash
exist_ca_sha1 = get_exist_ca_sha1()
if exist_ca_sha1 == ca_hash:
xlog.info("GoAgent CA exist")
return
import_command = 'security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain ../../../../data/gae_proxy/CA.crt'# % certfile.decode('utf-8')
if exist_ca_sha1:
delete_ca_command = 'security delete-certificate -Z %s' % exist_ca_sha1
exec_command = "%s;%s" % (delete_ca_command, import_command)
else:
exec_command = import_command
admin_command = """osascript -e 'do shell script "%s" with administrator privileges' """ % exec_command
cmd = admin_command.encode('utf-8')
xlog.info("try auto import CA command:%s", cmd)
os.system(cmd)
@staticmethod
def import_ca(certfile):
commonname = "GoAgent XX-Net - GoAgent" #TODO: here should be GoAgent - XX-Net
if sys.platform.startswith('win'):
CertUtil.import_windows_ca(commonname, certfile)
elif sys.platform == 'darwin':
CertUtil.import_mac_ca(commonname, certfile)
elif sys.platform.startswith('linux'):
CertUtil.import_debian_ca(commonname, certfile)
CertUtil.import_linux_firefox_ca(commonname, certfile)
#CertUtil.import_ubuntu_system_ca(commonname, certfile) # we don't need install CA to system root, special user is enough
@staticmethod
def init_ca():
#Check Certs Dir
if not os.path.exists(CertUtil.ca_certdir):
os.makedirs(CertUtil.ca_certdir)
# Confirmed GoAgent CA exist
if not os.path.exists(CertUtil.ca_keyfile):
xlog.info("no CA file exist")
xlog.info("clean old site certs")
any(os.remove(x) for x in glob.glob(CertUtil.ca_certdir+'/*.crt')+glob.glob(CertUtil.ca_certdir+'/.*.crt'))
if os.name == 'nt':
CertUtil.remove_windows_ca('%s CA' % CertUtil.ca_vendor)
CertUtil.generate_ca_file()
# Load GoAgent CA
with open(CertUtil.ca_keyfile, 'rb') as fp:
CertUtil.ca_thumbprint = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, fp.read()).digest('sha1')
#Check exist site cert buffer with CA
certfiles = glob.glob(CertUtil.ca_certdir+'/*.crt')+glob.glob(CertUtil.ca_certdir+'/.*.crt')
if certfiles:
filename = random.choice(certfiles)
commonname = os.path.splitext(os.path.basename(filename))[0]
with open(filename, 'rb') as fp:
serial_number = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, fp.read()).get_serial_number()
if serial_number != CertUtil.get_cert_serial_number(commonname):
any(os.remove(x) for x in certfiles)
CertUtil.import_ca(CertUtil.ca_keyfile)
# change the status,
# web_control /cert_import_status will return True, else return False
# launcher will wait ready to open browser and check update
config.cert_import_ready = True
if __name__ == '__main__':
CertUtil.init_ca()
#TODO:
# CA commaon should be GoAgent, vander should be XX-Net
# need change and test on all support platform: Windows/Mac/Ubuntu/Debian
| bsd-2-clause | 4,132,843,732,148,384,000 | 37.28988 | 168 | 0.584285 | false |
mvaled/sentry | src/sentry/south_migrations/0291_merge_legacy_releases.py | 1 | 100956 | # -*- coding: utf-8 -*-
import re
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import IntegrityError, models, transaction
def is_full_sha(version):
# sha1 or md5
return bool(re.match(r'[a-f0-9]{40}$', version) or re.match(r'[a-f0-9]{32}$', version))
def is_short_sha(version):
# short sha
return bool(re.match(r'[a-f0-9]{7,40}$', version))
def is_semver_like(version):
return bool(re.match(r'([a-z]*)(\-)?v?(?:\d+\.)*\d+', version))
def is_travis_build(version):
# TRAVIS_12345
return bool(re.match(r'(travis)(\_|\-)([a-f0-9]{1,40}$)', version, re.IGNORECASE))
def is_jenkins_build(version):
# jenkins-123-abcdeff
return bool(
re.match(r'(jenkins)(\_|\-)([0-9]{1,40})(\_|\-)([a-f0-9]{5,40}$)', version, re.IGNORECASE)
)
def is_head_tag(version):
# HEAD-abcdefg, master@abcdeff, master(abcdeff)
return bool(
re.match(r'(head|master|qa)(\_|\-|\@|\()([a-f0-9]{6,40})(\)?)$', version, re.IGNORECASE)
)
def is_short_sha_and_date(version):
# abcdefg-2016-03-16
return bool(re.match(r'([a-f0-9]{7,40})-(\d{4})-(\d{2})-(\d{2})', version))
def is_word_and_date(version):
# release-2016-01-01
return bool(re.match(r'([a-z]*)-(\d{4})-(\d{2})-(\d{2})', version))
def merge(to_release, from_releases, sentry_models):
# The following models reference release:
# ReleaseCommit.release
# ReleaseEnvironment.release_id
# ReleaseProject.release
# GroupRelease.release_id
# GroupResolution.release
# Group.first_release
# ReleaseFile.release
model_list = (
sentry_models.ReleaseCommit, sentry_models.ReleaseEnvironment, sentry_models.ReleaseFile,
sentry_models.ReleaseProject, sentry_models.GroupRelease, sentry_models.GroupResolution
)
for release in from_releases:
for model in model_list:
if hasattr(model, 'release'):
update_kwargs = {'release': to_release}
else:
update_kwargs = {'release_id': to_release.id}
try:
with transaction.atomic():
model.objects.filter(release_id=release.id).update(**update_kwargs)
except IntegrityError:
for item in model.objects.filter(release_id=release.id):
try:
with transaction.atomic():
model.objects.filter(id=item.id).update(**update_kwargs)
except IntegrityError:
item.delete()
sentry_models.Group.objects.filter(first_release=release).update(first_release=to_release)
release.delete()
def update_version(release, sentry_models):
old_version = release.version
try:
project_slug = release.projects.values_list('slug', flat=True)[0]
except IndexError:
# delete releases if they have no projects
release.delete()
return
new_version = ('%s-%s' % (project_slug, old_version))[:64]
sentry_models.Release.objects.filter(id=release.id).update(version=new_version)
sentry_models.TagValue.objects.filter(
project__in=release.projects.all(), key='sentry:release', value=old_version
).update(value=new_version)
class Migration(DataMigration):
def forwards(self, orm):
db.commit_transaction()
dupe_releases = orm.Release.objects.values_list('version', 'organization_id')\
.annotate(vcount=models.Count('id'))\
.filter(vcount__gt=1)
for version, org_id in dupe_releases:
releases = list(
orm.Release.objects.filter(organization_id=org_id, version=version)
.order_by('date_added')
)
releases_with_files = list(
orm.ReleaseFile.objects.filter(
release__in=releases).values_list(
'release_id', flat=True).distinct()
)
# if multiple releases have files, just rename them
# instead of trying to merge
if len(releases_with_files) > 1:
for release in releases:
update_version(release, orm)
continue
if len(releases_with_files) == 1:
from_releases = []
for release in releases:
if release.id == releases_with_files[0]:
to_release = release
else:
from_releases.append(release)
else:
to_release = releases[0]
from_releases = releases[1:]
if is_full_sha(version):
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
affected_projects = set()
for release in releases:
affected_projects.update(
[p for p in release.projects.values_list('slug', flat=True)]
)
has_prod = False
has_staging = False
has_dev = False
for p in affected_projects:
if 'prod' in p:
has_prod = True
elif 'stag' in p or 'stg' in p:
has_staging = True
elif 'dev' in p:
has_dev = True
# assume projects are split by environment if there
# are at least prod/staging or prod/dev, etc
projects_split_by_env = len([x for x in [has_prod, has_dev, has_staging] if x]) >= 2
# compare date_added
date_diff = None
dates = [release.date_added for release in releases]
if dates:
diff = (max(dates) - min(dates)).total_seconds()
if date_diff is None or diff > date_diff:
date_diff = diff
if is_short_sha(version) or \
is_head_tag(version) or \
is_short_sha_and_date(version):
# if projects are across multiple environments, allow 1 week difference
if projects_split_by_env and date_diff and date_diff < 604800:
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
# +/- 8 hours
if date_diff and date_diff > 28800:
for release in releases:
update_version(release, orm)
else:
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
if is_semver_like(version):
# check ref string and urls
refs = {release.ref for release in releases}
urls = {release.url for release in releases}
if (len(refs) == 1 and None not in refs) or (len(urls) == 1 and None not in urls):
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
# if projects are across multiple environments, allow 1 week difference
if projects_split_by_env and date_diff and date_diff < 604800:
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
# +/- 30 mins
if date_diff and date_diff > 1800:
for release in releases:
update_version(release, orm)
else:
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
if len(version) >= 20 or is_travis_build(version) or \
is_jenkins_build(version) or \
is_word_and_date(version):
# if projects are across multiple environments, allow 1 week difference
if projects_split_by_env and date_diff and date_diff < 604800:
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
# +/- 4 hours
if date_diff and date_diff > 14400:
for release in releases:
update_version(release, orm)
else:
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
# if we made it this far, assume we should just rename
for release in releases:
update_version(release, orm)
db.start_transaction()
def backwards(self, orm):
"Write your backwards methods here."
pass
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.apitoken': {
'Meta': {
'object_name': 'ApiToken'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True'
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'token':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authenticator': {
'Meta': {
'unique_together': "(('user', 'type'),)",
'object_name': 'Authenticator',
'db_table': "'auth_authenticator'"
},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 2, 2, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.commit': {
'Meta': {
'unique_together': "(('repository_id', 'key'),)",
'object_name': 'Commit',
'index_together': "(('repository_id', 'date_added'),)"
},
'author': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.CommitAuthor']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'message': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'repository_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {
'unique_together': "(('organization_id', 'email'),)",
'object_name': 'CommitAuthor'
},
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.commitfilechange': {
'Meta': {
'unique_together': "(('commit', 'filename'),)",
'object_name': 'CommitFileChange'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'filename': ('django.db.models.fields.CharField', [], {
'max_length': '255'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '1'
})
},
'sentry.counter': {
'Meta': {
'object_name': 'Counter',
'db_table': "'sentry_projectcounter'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'unique': 'True'
}
),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {
'object_name': 'DSymBundle'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'sdk': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymSDK']"
}
)
},
'sentry.dsymobject': {
'Meta': {
'object_name': 'DSymObject'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_path': ('django.db.models.fields.TextField', [], {
'db_index': 'True'
}),
'uuid':
('django.db.models.fields.CharField', [], {
'max_length': '36',
'db_index': 'True'
}),
'vmaddr':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'vmsize':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
})
},
'sentry.dsymsdk': {
'Meta': {
'object_name':
'DSymSDK',
'index_together':
"[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"
},
'dsym_type':
('django.db.models.fields.CharField', [], {
'max_length': '20',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'sdk_name': ('django.db.models.fields.CharField', [], {
'max_length': '20'
}),
'version_build': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {
'unique_together': "[('object', 'address')]",
'object_name': 'DSymSymbol'
},
'address':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {
'unique_together': "(('project_id', 'name'),)",
'object_name': 'Environment'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {
'unique_together':
"(('event_id', 'key_id', 'value_id'),)",
'object_name':
'EventTag',
'index_together':
"(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {
'object_name': 'GlobalDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '36'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'short_id'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'short_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {
'object_name': 'GroupRedirect'
},
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'previous_group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'unique': 'True'
})
},
'sentry.grouprelease': {
'Meta': {
'unique_together': "(('group_id', 'release_id', 'environment'),)",
'object_name': 'GroupRelease'
},
'environment':
('django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64'
}),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {
'unique_together': "(('group', 'user'),)",
'object_name': 'GroupSubscription'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Project']"
}
),
'reason':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('group', 'key', 'value'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'",
'index_together': "(('project', 'key', 'value', 'last_seen'),)"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationavatar': {
'Meta': {
'object_name': 'OrganizationAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.Organization']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'token': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True',
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationonboardingtask': {
'Meta': {
'unique_together': "(('organization', 'task'),)",
'object_name': 'OrganizationOnboardingTask'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_completed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'forced_color': (
'django.db.models.fields.CharField', [], {
'max_length': '6',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectdsymfile': {
'Meta': {
'unique_together': "(('project', 'uuid'),)",
'object_name': 'ProjectDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'uuid': ('django.db.models.fields.CharField', [], {
'max_length': '36'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {
'unique_together': "(('project_id', 'platform'),)",
'object_name': 'ProjectPlatform'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project_id', 'version'),)",
'object_name': 'Release'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'releases'",
'symmetrical': 'False',
'through': "orm['sentry.ReleaseProject']",
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasecommit': {
'Meta': {
'unique_together': "(('release', 'commit'), ('release', 'order'))",
'object_name': 'ReleaseCommit'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseenvironment': {
'Meta': {
'unique_together': "(('project_id', 'release_id', 'environment_id'),)",
'object_name': 'ReleaseEnvironment',
'db_table': "'sentry_environmentrelease'"
},
'environment_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseproject': {
'Meta': {
'unique_together': "(('project', 'release'),)",
'object_name': 'ReleaseProject',
'db_table': "'sentry_release_project'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.repository': {
'Meta': {
'unique_together':
"(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))",
'object_name':
'Repository'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'external_id':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'provider':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'url': ('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
})
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_password_expired':
('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'session_nonce':
('django.db.models.fields.CharField', [], {
'max_length': '12',
'null': 'True'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useravatar': {
'Meta': {
'object_name': 'UserAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.useremail': {
'Meta': {
'unique_together': "(('user', 'email'),)",
'object_name': 'UserEmail'
},
'date_hash_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_verified': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'emails'",
'to': "orm['sentry.User']"
}
),
'validation_hash': (
'django.db.models.fields.CharField', [], {
'default': "u'eoUxyDO82qJrLEXmZNPgefpGSvdT4CsY'",
'max_length': '32'
}
)
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
symmetrical = True
| bsd-3-clause | 510,566,170,111,118,700 | 35.953148 | 98 | 0.40688 | false |
hacchy/MetaVelvet | scripts/scriptEstimatedCovMulti.py | 1 | 7954 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Evaluate peaks of coverage from a kmer coverage file (e.g.
Graph2-stats.txt)'''
import sys
import math
# Define functions
def num(s):
'''Take a string representing and convert it to a number. Return
a float or int as appropriate. An exception is raised if the
string did not represent a number.'''
try:
return int(s)
except ValueError:
return float(s)
def importStats(fin_stats):
dicStats = {}
listHeader = []
while True:
line = fin_stats.readline()
# Exit after last line
if not line:
break
# Skip empty line
line = line.rstrip()
if not line:
continue
lineFields = line.split("\t")
if len(dicStats) == 0:
# Process header line
listHeader = lineFields
for header in listHeader:
dicStats[header] = []
else:
# Process line containing coverage values
listStats = lineFields
for i in range(len(lineFields)):
stats = num(listStats[i])
dicStats[listHeader[i]].append(stats)
return dicStats
def weightedHisto(dicStats, xMin, xMax, binWidth):
dicHisto = {}
listShort1Cov = dicStats["short1_cov"]
listLgth = dicStats["lgth"]
for x in range(xMin, xMax, binWidth):
dicHisto[x] = 0
for i in range(len(listShort1Cov)):
cov = listShort1Cov[i]
if cov < xMin or cov >= xMax:
continue
for x in range(xMin, xMax+binWidth, binWidth):
if (cov >= x and cov < x + binWidth):
dicHisto[x] += listLgth[i]
return dicHisto
def smoothingHisto(dicHisto, xMin, xMax, binWidth, widthMovAve):
dicSmoothHisto = {}
listMovAve = []
for x in range(xMin, xMax, binWidth):
listMovAve.append(dicHisto[x])
if len(listMovAve) < widthMovAve:
continue
dicSmoothHisto[x - binWidth * ((widthMovAve - 1) / 2)] \
= sum(listMovAve) / float(widthMovAve)
listMovAve.pop(0)
return dicSmoothHisto
def printHisto(dicHisto, xMin, xMax, binWidth):
print "Histogram :"
for x in range(xMin, xMax, binWidth):
#print str(x) + " : " + str(int(round(dicHisto[x], 0)))
lenBar = int(round((dicHisto[x] / 20000), 0)) - 1
print str(x) + "\t",
for i in range(lenBar):
print "=",
print "\n",
print "\n",
def setXMax(xMax, binWidth):
return int((math.floor(xMax / binWidth)) * binWidth)
def getFirstXMax(dicStats, binWidth, thresConLen):
listLgth = dicStats["lgth"]
listShort1Cov = dicStats["short1_cov"]
maxCov = 0
subMaxCov = 0
for i in range(len(listLgth)):
if listLgth[i] >= thresConLen:
if listShort1Cov[i] > maxCov:
subMaxCov = maxCov
maxCov = listShort1Cov[i]
xMax = setXMax(subMaxCov, binWidth) + binWidth * 5
return xMax
def getN50(tupleConLen):
listSortedConLen = list(tupleConLen)
listSortedConLen.sort()
listSortedConLen.reverse()
totalLen = sum(listSortedConLen)
sumLen = 0
for i in range(len(listSortedConLen)):
sumLen += listSortedConLen[i]
if sumLen >= totalLen / 2:
return listSortedConLen[i]
return -1
def setWidthByXMax(xMax):
listWidth = [0, 0] # [binWidth, widthMovAve]
if xMax > 300:
listWidth = [6, 5]
if xMax <= 300:
listWidth = [4, 3]
if xMax <= 120:
listWidth = [2, 3]
if xMax <= 100:
listWidth = [1, 1]
return listWidth
def detectPeakPandS(dicHisto, xMin, xMax, binWidth, thresHeight,
listPeakPandS):
countIncrease = 0; thresIncrease = 3
countDecrease = 0; thresDecrease = 3
beforeHeight = -1
flagPeakStart = False
peakHeight = 0; peakCov = 0
for x in range(xMax - binWidth, xMin - binWidth, -1 * binWidth):
if beforeHeight == -1:
beforeHeight = dicHisto[x]
continue
if not flagPeakStart:
if dicHisto[x] >= thresHeight:
if dicHisto[x] >= beforeHeight:
countIncrease += 1
if countIncrease >= thresIncrease:
countIncrease = 0
flagPeakStart = True
beforeHeight = dicHisto[x]
if flagPeakStart:
if dicHisto[x] >= peakHeight:
peakHeight = dicHisto[x]
peakCov = x
else:
countDecrease += 1
if countDecrease >= thresDecrease:
for i in range(2):
if listPeakPandS[i] == -1:
tmpBias = float(binWidth) / 2
listPeakPandS[i] = peakCov + tmpBias
peakHeight = 0; peakCov = 0
break
if listPeakPandS[1] != -1:
return listPeakPandS
countDecrease = 0
flagPeakStart = False
return listPeakPandS
def printPeaks(listPeak):
print "Peaks :"
print listPeak
strList = []
for value in listPeak:
strList.append(str(value))
print '_'.join(strList)
def check_args():
'''Check that an argument was provided or complain and exit.
Return the name of the file to use'''
if len(sys.argv) != 2:
script_name = sys.argv[0]
print 'Usage: %s <Graph2_stats_file>' % (sys.argv[0])
sys.exit(1)
return sys.argv[1]
def main(stats_file):
# Import stats file
fin_stats = open(stats_file, "r")
dicStats = importStats(fin_stats)
# Make weighted histogram
listPeak = []
xMin = 0
xMax = 1000
binWidth = 4
widthMovAve = 5
listPeakPandS = [-1, -1]
N50 = 0
thresHeight = 0
thresConLen = 0
while True:
# Get N50
if len(listPeak) == 0:
N50 = getN50(tuple(dicStats["lgth"]))
print "N50 : " + str(N50)
thresConLen = N50 * 5
# Get first xMax
if len(listPeak) == 0:
xMax = getFirstXMax(dicStats, binWidth, thresConLen)
print "First xMax : " + str(xMax)
# Set width and xMax
listWidth = setWidthByXMax(xMax)
binWidth = listWidth[0]; widthMovAve = listWidth[1]
xMax = setXMax(xMax, binWidth)
# Make weighted and smoothed histogram
xMin = 0
dicHisto = weightedHisto(dicStats, xMin, xMax, binWidth)
dicSmoothHisto = smoothingHisto(dicHisto, xMin, xMax,
binWidth, widthMovAve)
xMin += binWidth * ((widthMovAve - 1) / 2)
xMax -= binWidth * ((widthMovAve - 1) / 2)
# Get thresHeight
if len(listPeak) == 0:
thresHeight = dicSmoothHisto[xMax - binWidth]
print "Thres Height : " + str(thresHeight)
# Print histogram
if len(listPeak) == 0:
printHisto(dicSmoothHisto, xMin, xMax, binWidth)
# Detect (primary and) secondary peak
listPeakPandS = detectPeakPandS(dicSmoothHisto, xMin, xMax, binWidth,
thresHeight, listPeakPandS)
# Record peak
if len(listPeak) == 0:
listPeak.append(listPeakPandS[0])
listPeak.append(listPeakPandS[1])
# When couldn't detect secondary peak, break
if listPeakPandS[1] == -1:
listPeak.pop(-1)
printPeaks(listPeak)
break
# Prepare for next peak
listPeakPandS[0] = listPeakPandS[1]
listPeakPandS[1] = -1
xMax = listPeakPandS[0]
if __name__ == "__main__":
stats_file = check_args()
main(stats_file)
| gpl-2.0 | 4,155,654,913,366,671,000 | 26.714286 | 78 | 0.544129 | false |
balta2ar/coursera-dl | coursera/api.py | 1 | 18829 | # vim: set fileencoding=utf8 :
"""
This module contains implementations of different APIs that are used by the
downloader.
"""
import os
import json
import logging
from six import iterkeys, iteritems
from six.moves.urllib_parse import quote_plus
from .utils import (BeautifulSoup, make_coursera_absolute_url,
extend_supplement_links)
from .network import get_page
from .define import (OPENCOURSE_SUPPLEMENT_URL,
OPENCOURSE_PROGRAMMING_ASSIGNMENTS_URL,
OPENCOURSE_ASSET_URL,
OPENCOURSE_ASSETS_URL,
OPENCOURSE_API_ASSETS_V1_URL,
OPENCOURSE_VIDEO_URL)
class CourseraOnDemand(object):
"""
This is a class that provides a friendly interface to extract certain
parts of on-demand courses. On-demand class is a new format that Coursera
is using, they contain `/learn/' in their URLs. This class does not support
old-style Coursera classes. This API is by no means complete.
"""
def __init__(self, session, course_id):
"""
Initialize Coursera OnDemand API.
@param session: Current session that holds cookies and so on.
@type session: requests.Session
@param course_id: Course ID from course json.
@type course_id: str
"""
self._session = session
self._course_id = course_id
def extract_links_from_lecture(self,
video_id, subtitle_language='en',
resolution='540p', assets=None):
"""
Return the download URLs of on-demand course video.
@param video_id: Video ID.
@type video_id: str
@param subtitle_language: Subtitle language.
@type subtitle_language: str
@param resolution: Preferred video resolution.
@type resolution: str
@param assets: List of assets that may present in the video.
@type assets: [str]
@return: @see CourseraOnDemand._extract_links_from_text
"""
if assets is None:
assets = []
links = self._extract_videos_and_subtitles_from_lecture(
video_id, subtitle_language, resolution)
assets = self._normalize_assets(assets)
extend_supplement_links(
links, self._extract_links_from_lecture_assets(assets))
return links
def _normalize_assets(self, assets):
"""
Perform asset normalization. For some reason, assets that are sometimes
present in lectures, have "@1" at the end of their id. Such "uncut"
asset id when fed to OPENCOURSE_ASSETS_URL results in error that says:
"Routing error: 'get-all' not implemented". To avoid that, the last
two characters from asset id are cut off and after that that method
works fine. It looks like, Web UI is doing the same.
@param assets: List of asset ids.
@type assets: [str]
@return: Normalized list of asset ids (without trailing "@1")
@rtype: [str]
"""
new_assets = []
for asset in assets:
# For example: giAxucdaEeWJTQ5WTi8YJQ@1
if len(asset) == 24:
# Turn it into: giAxucdaEeWJTQ5WTi8YJQ
asset = asset[:-2]
new_assets.append(asset)
return new_assets
def _extract_links_from_lecture_assets(self, asset_ids):
"""
Extract links to files of the asset ids.
@param asset_ids: List of asset ids.
@type asset_ids: [str]
@return: @see CourseraOnDemand._extract_links_from_text
"""
links = {}
def _add_asset(name, url, destination):
filename, extension = os.path.splitext(name)
if extension is '':
return
extension = extension.lower().strip('.')
basename = os.path.basename(filename)
if extension not in destination:
destination[extension] = []
destination[extension].append((url, basename))
for asset_id in asset_ids:
for asset in self._get_asset_urls(asset_id):
_add_asset(asset['name'], asset['url'], links)
return links
def _get_asset_urls(self, asset_id):
"""
Get list of asset urls and file names. This method may internally
use _get_open_course_asset_urls to extract `asset` element types.
@param asset_id: Asset ID.
@type asset_id: str
@return List of dictionaries with asset file names and urls.
@rtype [{
'name': '<filename.ext>'
'url': '<url>'
}]
"""
url = OPENCOURSE_ASSETS_URL.format(id=asset_id)
page = get_page(self._session, url)
logging.debug('Parsing JSON for asset_id <%s>.', asset_id)
dom = json.loads(page)
urls = []
for element in dom['elements']:
typeName = element['typeName']
definition = element['definition']
# Elements of `asset` types look as follows:
#
# {'elements': [{'definition': {'assetId': 'gtSfvscoEeW7RxKvROGwrw',
# 'name': 'Презентация к лекции'},
# 'id': 'phxNlMcoEeWXCQ4nGuQJXw',
# 'typeName': 'asset'}],
# 'linked': None,
# 'paging': None}
#
if typeName == 'asset':
open_course_asset_id = definition['assetId']
for asset in self._get_open_course_asset_urls(open_course_asset_id):
urls.append({'name': asset['name'],
'url': asset['url']})
# Elements of `url` types look as follows:
#
# {'elements': [{'definition': {'name': 'What motivates you.pptx',
# 'url': 'https://d396qusza40orc.cloudfront.net/learning/Powerpoints/2-4A_What_motivates_you.pptx'},
# 'id': '0hixqpWJEeWQkg5xdHApow',
# 'typeName': 'url'}],
# 'linked': None,
# 'paging': None}
#
elif typeName == 'url':
urls.append({'name': definition['name'],
'url': definition['url']})
else:
logging.warning(
'Unknown asset typeName: %s\ndom: %s\n'
'If you think the downloader missed some '
'files, please report the issue here:\n'
'https://github.com/coursera-dl/coursera-dl/issues/new',
typeName, json.dumps(dom, indent=4))
return urls
def _get_open_course_asset_urls(self, asset_id):
"""
Get list of asset urls and file names. This method only works
with asset_ids extracted internally by _get_asset_urls method.
@param asset_id: Asset ID.
@type asset_id: str
@return List of dictionaries with asset file names and urls.
@rtype [{
'name': '<filename.ext>'
'url': '<url>'
}]
"""
url = OPENCOURSE_API_ASSETS_V1_URL.format(id=asset_id)
page = get_page(self._session, url)
dom = json.loads(page)
# Structure is as follows:
# elements [ {
# name
# url {
# url
return [{'name': element['name'],
'url': element['url']['url']}
for element in dom['elements']]
def _extract_videos_and_subtitles_from_lecture(self,
video_id,
subtitle_language='en',
resolution='540p'):
url = OPENCOURSE_VIDEO_URL.format(video_id=video_id)
page = get_page(self._session, url)
logging.debug('Parsing JSON for video_id <%s>.', video_id)
video_content = {}
dom = json.loads(page)
# videos
logging.info('Gathering video URLs for video_id <%s>.', video_id)
sources = dom['sources']
sources.sort(key=lambda src: src['resolution'])
sources.reverse()
# Try to select resolution requested by the user.
filtered_sources = [source
for source in sources
if source['resolution'] == resolution]
if len(filtered_sources) == 0:
# We will just use the 'vanilla' version of sources here, instead of
# filtered_sources.
logging.warn('Requested resolution %s not available for <%s>. '
'Downloading highest resolution available instead.',
resolution, video_id)
else:
logging.info('Proceeding with download of resolution %s of <%s>.',
resolution, video_id)
sources = filtered_sources
video_url = sources[0]['formatSources']['video/mp4']
video_content['mp4'] = video_url
# subtitles and transcripts
subtitle_nodes = [
('subtitles', 'srt', 'subtitle'),
('subtitlesTxt', 'txt', 'transcript'),
]
for (subtitle_node, subtitle_extension, subtitle_description) in subtitle_nodes:
logging.info('Gathering %s URLs for video_id <%s>.', subtitle_description, video_id)
subtitles = dom.get(subtitle_node)
if subtitles is not None:
if subtitle_language == 'all':
for current_subtitle_language in subtitles:
video_content[current_subtitle_language + '.' + subtitle_extension] = make_coursera_absolute_url(subtitles.get(current_subtitle_language))
else:
if subtitle_language != 'en' and subtitle_language not in subtitles:
logging.warning("%s unavailable in '%s' language for video "
"with video id: [%s], falling back to 'en' "
"%s", subtitle_description.capitalize(), subtitle_language, video_id, subtitle_description)
subtitle_language = 'en'
subtitle_url = subtitles.get(subtitle_language)
if subtitle_url is not None:
# some subtitle urls are relative!
video_content[subtitle_language + '.' + subtitle_extension] = make_coursera_absolute_url(subtitle_url)
lecture_video_content = {}
for key, value in iteritems(video_content):
lecture_video_content[key] = [(value, '')]
return lecture_video_content
def extract_links_from_programming(self, element_id):
"""
Return a dictionary with links to supplement files (pdf, csv, zip,
ipynb, html and so on) extracted from graded programming assignment.
@param element_id: Element ID to extract files from.
@type element_id: str
@return: @see CourseraOnDemand._extract_links_from_text
"""
logging.info('Gathering supplement URLs for element_id <%s>.', element_id)
# Assignment text (instructions) contains asset tags which describe
# supplementary files.
text = ''.join(self._extract_assignment_text(element_id))
if not text:
return {}
supplement_links = self._extract_links_from_text(text)
return supplement_links
def extract_links_from_supplement(self, element_id):
"""
Return a dictionary with supplement files (pdf, csv, zip, ipynb, html
and so on) extracted from supplement page.
@return: @see CourseraOnDemand._extract_links_from_text
"""
logging.info('Gathering supplement URLs for element_id <%s>.', element_id)
url = OPENCOURSE_SUPPLEMENT_URL.format(
course_id=self._course_id, element_id=element_id)
page = get_page(self._session, url)
dom = json.loads(page)
supplement_content = {}
# Supplement content has structure as follows:
# 'linked' {
# 'openCourseAssets.v1' [ {
# 'definition' {
# 'value'
for asset in dom['linked']['openCourseAssets.v1']:
value = asset['definition']['value']
# Supplement lecture types are known to contain both <asset> tags
# and <a href> tags (depending on the course), so we extract
# both of them.
extend_supplement_links(
supplement_content, self._extract_links_from_text(value))
return supplement_content
def _extract_asset_tags(self, text):
"""
Extract asset tags from text into a convenient form.
@param text: Text to extract asset tags from. This text contains HTML
code that is parsed by BeautifulSoup.
@type text: str
@return: Asset map.
@rtype: {
'<id>': {
'name': '<name>',
'extension': '<extension>'
},
...
}
"""
soup = BeautifulSoup(text)
asset_tags_map = {}
for asset in soup.find_all('asset'):
asset_tags_map[asset['id']] = {'name': asset['name'],
'extension': asset['extension']}
return asset_tags_map
def _extract_asset_urls(self, asset_ids):
"""
Extract asset URLs along with asset ids.
@param asset_ids: List of ids to get URLs for.
@type assertn: [str]
@return: List of dictionaries with asset URLs and ids.
@rtype: [{
'id': '<id>',
'url': '<url>'
}]
"""
ids = quote_plus(','.join(asset_ids))
url = OPENCOURSE_ASSET_URL.format(ids=ids)
page = get_page(self._session, url)
dom = json.loads(page)
return [{'id': element['id'],
'url': element['url']}
for element in dom['elements']]
def _extract_assignment_text(self, element_id):
"""
Extract assignment text (instructions).
@param element_id: Element id to extract assignment instructions from.
@type element_id: str
@return: List of assignment text (instructions).
@rtype: [str]
"""
url = OPENCOURSE_PROGRAMMING_ASSIGNMENTS_URL.format(
course_id=self._course_id, element_id=element_id)
page = get_page(self._session, url)
dom = json.loads(page)
return [element['submissionLearnerSchema']['definition']
['assignmentInstructions']['definition']['value']
for element in dom['elements']]
def _extract_links_from_text(self, text):
"""
Extract supplement links from the html text. Links may be provided
in two ways:
1. <a> tags with href attribute
2. <asset> tags with id attribute (requires additional request
to get the direct URL to the asset file)
@param text: HTML text.
@type text: str
@return: Dictionary with supplement links grouped by extension.
@rtype: {
'<extension1>': [
('<link1>', '<title1>'),
('<link2>', '<title2')
],
'extension2': [
('<link3>', '<title3>'),
('<link4>', '<title4>')
],
...
}
"""
supplement_links = self._extract_links_from_a_tags_in_text(text)
extend_supplement_links(
supplement_links,
self._extract_links_from_asset_tags_in_text(text))
return supplement_links
def _extract_links_from_asset_tags_in_text(self, text):
"""
Scan the text and extract asset tags and links to corresponding
files.
@param text: Page text.
@type text: str
@return: @see CourseraOnDemand._extract_links_from_text
"""
# Extract asset tags from instructions text
asset_tags_map = self._extract_asset_tags(text)
ids = list(iterkeys(asset_tags_map))
if not ids:
return {}
# asset tags contain asset names and ids. We need to make another
# HTTP request to get asset URL.
asset_urls = self._extract_asset_urls(ids)
supplement_links = {}
# Build supplement links, providing nice titles along the way
for asset in asset_urls:
title = asset_tags_map[asset['id']]['name']
extension = asset_tags_map[asset['id']]['extension']
if extension not in supplement_links:
supplement_links[extension] = []
supplement_links[extension].append((asset['url'], title))
return supplement_links
def _extract_links_from_a_tags_in_text(self, text):
"""
Extract supplement links from the html text that contains <a> tags
with href attribute.
@param text: HTML text.
@type text: str
@return: Dictionary with supplement links grouped by extension.
@rtype: {
'<extension1>': [
('<link1>', '<title1>'),
('<link2>', '<title2')
],
'extension2': [
('<link3>', '<title3>'),
('<link4>', '<title4>')
]
}
"""
soup = BeautifulSoup(text)
links = [item['href']
for item in soup.find_all('a') if 'href' in item.attrs]
links = sorted(list(set(links)))
supplement_links = {}
for link in links:
filename, extension = os.path.splitext(link)
# Some courses put links to sites in supplement section, e.g.:
# http://pandas.pydata.org/
if extension is '':
continue
# Make lowercase and cut the leading/trailing dot
extension = extension.lower().strip('.')
basename = os.path.basename(filename)
if extension not in supplement_links:
supplement_links[extension] = []
# Putting basename into the second slot of the tuple is important
# because that will allow to download many supplements within a
# single lecture, e.g.:
# 01_slides-presented-in-this-module.pdf
# 01_slides-presented-in-this-module_Dalal-cvpr05.pdf
# 01_slides-presented-in-this-module_LM-3dtexton.pdf
supplement_links[extension].append((link, basename))
return supplement_links
| lgpl-3.0 | -3,989,550,405,040,123,400 | 35.244701 | 162 | 0.545691 | false |
TylerTemp/amazedown | amazedown/quote_by.py | 1 | 2321 | import re
from markdown.blockprocessors import BlockQuoteProcessor
from markdown import Extension
import logging
logger = logging.getLogger('MARKDOWN.quote_by')
class QuoteByProcessor(BlockQuoteProcessor):
BY_RE = re.compile(r'(?P<pre>[ ]{0,3}>[ ]*)'
r'(-- ?|—— ?)'
r'(?P<name>.*)'
)
def run(self, parent, blocks):
block = blocks[0]
reversed_line_result = []
BY_RE = self.BY_RE
no_more = False
line_count = 0
for each_line in block.splitlines()[::-1]:
if no_more:
reversed_line_result.append(each_line)
continue
logger.debug(each_line)
match = BY_RE.match(each_line)
if match:
each_line = match.expand('\g<pre><small>\g<name></small>')
line_count += 1
else:
no_more = True
reversed_line_result.append(each_line)
line_result = reversed_line_result[::-1]
sep_at = len(line_result) - line_count
raw_result = line_result[:sep_at]
by_result = line_result[sep_at:]
raw = '\n'.join(raw_result)
by = '<br/>\n'.join(by_result)
logger.debug(raw)
logger.debug(by)
blocks[0] = '%s\n%s' % (raw, by)
return super(QuoteByProcessor, self).run(parent, blocks)
class QuoteByExtension(Extension):
""" Add definition lists to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of DefListProcessor to BlockParser. """
md.parser.blockprocessors.add('quote_by',
QuoteByProcessor(md.parser),
'<quote')
def makeExtension(configs=None):
if configs is None:
configs = {}
return QuoteByExtension(configs=configs)
if __name__ == '__main__':
import markdown
logging.basicConfig(
level=logging.DEBUG,
format='\033[32m%(levelname)1.1s\033[0m[%(lineno)3s]%(message)s')
md = """
> sth
> goes -goes
> --here
> sth
> goes -goes
> --here
>
> -- here
>
> - here
>
> —— here
>
> ——here
>
> — here
>
> —here
"""
result = markdown.markdown(md, extensions=[makeExtension()])
print(result) | gpl-3.0 | 4,252,384,952,064,257,500 | 21.831683 | 74 | 0.53449 | false |
William-Hai/SimpleDemo-python | file/csv/demo_csv.py | 1 | 1083 | # encoding=utf-8
__author__ = 'Q-Whai'
'''
DESC: 与CSV文件操作相关测试Demo
Blog: http://blog.csdn.net/lemon_tree12138
Create Date: 2016/2/25
Last Modify: 2016/3/9
version: 0.0.1
'''
import csv
# ----------------------------------------- #
# 读取CSV文件中的内容 #
# ----------------------------------------- #
def read_csv(file_path):
reader = csv.reader(file(file_path, 'rb'))
for line in reader:
print(line)
# ----------------------------------------- #
# 向CSV文件中写入指定内容 #
# ----------------------------------------- #
def write_csv(file_path, data):
writer = csv.writer(file(file_path, 'wb'))
for data_raw in data:
writer.writerow(data_raw)
# ----------------------------------------- #
# 程序入口 #
# ----------------------------------------- #
if __name__ == '__main__':
data = [['0', '1', '2'], ['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']]
write_csv('F:/Temp/a.csv', data)
read_csv('F:/Temp/a.csv')
| gpl-3.0 | 102,617,643,984,817,540 | 25.179487 | 79 | 0.371205 | false |
Fluent-networks/floranet | floranet/test/unit/floranet/test_netserver.py | 1 | 11517 | import os
import base64
import time
from random import randrange
from mock import patch, MagicMock
from twisted.trial import unittest
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet import reactor, protocol
from twisted.internet.udp import Port
from twistar.registry import Registry
from floranet.lora.wan import LoraWAN, Rxpk
from floranet.netserver import NetServer
import floranet.lora.mac as lora_mac
from floranet.models.model import Model
from floranet.models.config import Config
from floranet.models.gateway import Gateway
from floranet.models.device import Device
from floranet.models.application import Application
import floranet.test.unit.mock_dbobject as mockDBObject
import floranet.test.unit.mock_model as mockModel
from floranet.test.unit.mock_reactor import reactorCall
class NetServerTest(unittest.TestCase):
@inlineCallbacks
def setUp(self):
"""Test setup. Creates a new NetServer
Use factory default configuration.
"""
Registry.getConfig = MagicMock(return_value=None)
# Get factory default configuration
with patch.object(Model, 'save', MagicMock()):
config = yield Config.loadFactoryDefaults()
self.server = NetServer(config)
def _test_device(self):
"""Create a test device object """
return Device(
deveui=int('0x0F0E0E0D00010209', 16),
devaddr=int('0x06000001', 16),
appeui=int('0x0A0B0C0D0A0B0C0D', 16),
nwkskey=int('0xAEB48D4C6E9EA5C48C37E4F132AA8516', 16),
appskey=int('0x7987A96F267F0A86B739EED480FC2B3C', 16),
adr= True,
tx_chan=3,
tx_datr='SF7BW125',
gw_addr='192.168.1.125',
enabled = True)
def test_checkDevaddr(self):
"""Test checkDevaddr method"""
# Check valid address
device = self._test_device()
result = self.server.checkDevaddr(device.devaddr)
self.assertTrue(result)
# Check invalid address
devaddr = int('0x11223344', 16)
result = self.server.checkDevaddr(devaddr)
self.assertFalse(result)
@inlineCallbacks
def test_getOTAADevAddrs(self):
"""Test getOTAADevAddrs method"""
device = self._test_device()
mockDBObject.return_value = device
expected = [[], [device.devaddr]]
results = []
# Test when no devices are found
with patch.object(Device, 'find', classmethod(mockDBObject.findFail)):
result = yield self.server._getOTAADevAddrs()
results.append(result)
# Test when one device is found
with patch.object(Device, 'find', classmethod(mockDBObject.findOne)):
result = yield self.server._getOTAADevAddrs()
results.append(result)
self.assertEqual(expected, results)
@inlineCallbacks
def test_getFreeOTAAddress(self):
expected = [self.server.config.otaastart,
self.server.config.otaastart+1,
self.server.config.otaaend, None]
results = []
# Test with empty OTA device list
# Mock the server method to return the devaddr list
with patch.object(self.server, '_getOTAADevAddrs',
MagicMock(return_value=[])):
result = yield self.server._getFreeOTAAddress()
results.append(result)
# Test with one OTA device
with patch.object(self.server, '_getOTAADevAddrs', MagicMock(
return_value=[self.server.config.otaastart])):
result = yield self.server._getFreeOTAAddress()
results.append(result)
# Test with last address only available
with patch.object(self.server, '_getOTAADevAddrs',MagicMock(
return_value=xrange(self.server.config.otaastart,
self.server.config.otaaend))):
result = yield self.server._getFreeOTAAddress()
results.append(result)
# Test with no address available
with patch.object(self.server, '_getOTAADevAddrs',MagicMock(
return_value=xrange(self.server.config.otaastart,
self.server.config.otaaend + 1))):
result = yield self.server._getFreeOTAAddress()
results.append(result)
self.assertEqual(expected, results)
@inlineCallbacks
def test_getActiveDevice(self):
# Include for coverage. We are essentially testing a returnValue() call.
device = self._test_device()
mockDBObject.return_value = device
expected = device.deveui
with patch.object(Device, 'find', classmethod(mockDBObject.findSuccess)):
result = yield self.server._getActiveDevice(device.devaddr)
self.assertEqual(expected, result.deveui)
def test_checkDuplicateMessage(self):
m = lora_mac.MACDataMessage()
m.mic = 1111
self.server.config.duplicateperiod = 10
expected = [True, False]
result = []
now = time.time()
# Test a successful find of the duplicate
for i in (1,10):
self.server.message_cache.append((randrange(1,1000), now - i))
self.server.message_cache.append(
(m.mic, now - self.server.config.duplicateperiod + 1))
result.append(self.server._checkDuplicateMessage(m))
# Test an unsuccessful find of the duplicate - the message's
# cache period has expired.
self.server.message_cache.remove(
(m.mic, now - self.server.config.duplicateperiod + 1))
self.server.message_cache.append(
(m.mic, now - self.server.config.duplicateperiod - 1))
result.append(self.server._checkDuplicateMessage(m))
self.assertEqual(expected, result)
def test_cleanMessageCache(self):
self.server.config.duplicateperiod = 10
# Create 10 cache entries, remove 5
now = time.time()
for i in range(1,21,2):
self.server.message_cache.append((i, now - i))
expected = 5
self.server._cleanMessageCache()
result = len(self.server.message_cache)
self.assertEqual(expected, result)
def test_manageMACCommandQueue(self):
self.server.config.macqueuelimit = 10
# Create 10 cache entries, remove 5
now = time.time()
for i in range(1,21,2):
self.server.commands.append((int(now - i), i, lora_mac.LinkCheckAns()))
expected = 5
self.server._manageMACCommandQueue()
result = len(self.server.commands)
self.assertEqual(expected, result)
@inlineCallbacks
def test_processADRRequests(self):
device = self._test_device()
device.snr_average = 3.5
device.adr_datr = None
# Test we set adr_datr device attribute properly
expected = ['SF9BW125', False]
results = []
mockDBObject.return_value = [device]
mockModel.mock_object = device
with patch.object(Device, 'all', classmethod(mockDBObject.all)), \
patch.object(device, 'update', mockModel.update), \
patch.object(self.server, '_sendLinkADRRequest', MagicMock()):
# Remove any delays
self.server.config.adrmessagetime = 0
yield self.server._processADRRequests()
results.append(device.adr_datr)
results.append(self.server.adrprocessing)
self.assertEqual(expected, results)
def _createCommands(self):
datarate = 'SF7BW125'
chmask = int('FF', 16)
return [lora_mac.LinkCheckAns(), lora_mac.LinkADRReq(datarate, 0, chmask, 6, 0)]
def test_queueMACCommand(self):
device = self._test_device()
commands = self._createCommands()
expected = [2, lora_mac.LINKCHECKANS, lora_mac.LINKADRREQ]
for c in commands:
self.server._queueMACCommand(device.deveui, c)
result = [len(self.server.commands), self.server.commands[0][2].cid,
self.server.commands[1][2].cid]
self.assertEqual(expected, result)
def test_dequeueMACCommand(self):
device = self._test_device()
commands = self._createCommands()
for c in commands:
self.server._queueMACCommand(device.deveui, c)
self.server._dequeueMACCommand(device.deveui, commands[1])
expected = [1, lora_mac.LINKCHECKANS]
result = [len(self.server.commands), self.server.commands[0][2].cid]
self.assertEqual(expected, result)
def test_scheduleDownlinkTime(self):
offset = 10
tmst = randrange(0, 4294967295 - 10000000)
expected = [tmst + 10000000, 5000000]
result = []
result.append(self.server._scheduleDownlinkTime(tmst, offset))
tmst = 4294967295 - 5000000
result.append(self.server._scheduleDownlinkTime(tmst, offset))
self.assertEqual(expected, result)
def test_txpkResponse(self):
self.server.lora = LoraWAN(self)
self.server.lora.addGateway(Gateway(host='192.168.1.125', name='Test',
enabled=True, power=26))
tmst = randrange(0, 4294967295)
rxpk = Rxpk(tmst=tmst, chan=3, freq=915.8, datr='SF7BW125',
data="n/uSwM0LIED8X6QV0mJMjC6oc2HOWFpCfmTry", size=54)
device = self._test_device()
device.rx = self.server.band.rxparams((rxpk.chan, rxpk.datr), join=False)
gateway = self.server.lora.gateway(device.gw_addr)
expected = [(True, device.rx[1]['freq'], device.rx[1]['datr']),
(True, device.rx[2]['freq'], device.rx[2]['datr']),
(tmst + 1000000, device.rx[1]['freq'], device.rx[1]['datr']),
(tmst + 2000000, device.rx[2]['freq'], device.rx[2]['datr'])]
result = []
txpk = self.server._txpkResponse(device, rxpk.data, gateway, tmst, immediate=True)
for i in range(1,3):
result.append((txpk[i].imme, txpk[i].freq, txpk[i].datr))
txpk = self.server._txpkResponse(device, rxpk.data, gateway, tmst, immediate=False)
for i in range(1,3):
result.append((txpk[i].tmst, txpk[i].freq, txpk[i].datr))
self.assertEqual(expected, result)
def _processJoinRequest(self, request):
"""Called by test_processJoinRequest_pass and
test_processJoinRequest_fail"""
device = self._test_device()
app = self.server.config.apps[0]
# Passing join request
request = base64.b64decode("AA0MCwoNDAsKAwIBAA0ODg9IklIgzCM=")
msg = lora_mac.MACMessage.decode(request)
result = yield self.server._processJoinRequest(msg, app, device)
self.assertTrue(result)
# Failing join request
request = base64.b64decode("AA0MCwoNDAsKAwIBAA0ODg9IklIgzCX=")
msg = lora_mac.MACMessage.decode(request)
result = yield self.server._processJoinRequest(msg, app, device)
self.assertFalse(result)
| mit | 8,204,927,493,061,594,000 | 35.916667 | 91 | 0.60554 | false |
npyoung/python-neo | neo/io/axonio.py | 1 | 32534 | # -*- coding: utf-8 -*-
"""
Classe for reading data from pCLAMP and AxoScope
files (.abf version 1 and 2), developed by Molecular device/Axon technologies.
- abf = Axon binary file
- atf is a text file based format from axon that could be
read by AsciiIO (but this file is less efficient.)
This code is a port of abfload and abf2load
written in Matlab (BSD-2-Clause licence) by :
- Copyright (c) 2009, Forrest Collman, [email protected]
- Copyright (c) 2004, Harald Hentschke
and available here :
http://www.mathworks.com/matlabcentral/fileexchange/22114-abf2load
Information on abf 1 and 2 formats is available here :
http://www.moleculardevices.com/pages/software/developer_info.html
This file supports the old (ABF1) and new (ABF2) format.
ABF1 (clampfit <=9) and ABF2 (clampfit >10)
All possible mode are possible :
- event-driven variable-length mode 1 -> return several Segments per Block
- event-driven fixed-length mode 2 or 5 -> return several Segments
- gap free mode -> return one (or sevral) Segment in the Block
Supported : Read
Author: sgarcia, jnowacki
Note: [email protected] has a C++ library with SWIG bindings which also
reads abf files - would be good to cross-check
"""
import struct
import datetime
import os
from io import open, BufferedReader
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
from neo.core import *
from neo.io.tools import iteritems
class struct_file(BufferedReader):
def read_f(self, fmt, offset=None):
if offset is not None:
self.seek(offset)
return struct.unpack(fmt, self.read(struct.calcsize(fmt)))
def write_f(self, fmt, offset=None, *args):
if offset is not None:
self.seek(offset)
self.write(struct.pack(fmt, *args))
def reformat_integer_V1(data, nbchannel, header):
"""
reformat when dtype is int16 for ABF version 1
"""
chans = [chan_num for chan_num in
header['nADCSamplingSeq'] if chan_num >= 0]
for n, i in enumerate(chans[:nbchannel]): # respect SamplingSeq
data[:, n] /= header['fInstrumentScaleFactor'][i]
data[:, n] /= header['fSignalGain'][i]
data[:, n] /= header['fADCProgrammableGain'][i]
if header['nTelegraphEnable'][i]:
data[:, n] /= header['fTelegraphAdditGain'][i]
data[:, n] *= header['fADCRange']
data[:, n] /= header['lADCResolution']
data[:, n] += header['fInstrumentOffset'][i]
data[:, n] -= header['fSignalOffset'][i]
def reformat_integer_V2(data, nbchannel, header):
"""
reformat when dtype is int16 for ABF version 2
"""
for i in range(nbchannel):
data[:, i] /= header['listADCInfo'][i]['fInstrumentScaleFactor']
data[:, i] /= header['listADCInfo'][i]['fSignalGain']
data[:, i] /= header['listADCInfo'][i]['fADCProgrammableGain']
if header['listADCInfo'][i]['nTelegraphEnable']:
data[:, i] /= header['listADCInfo'][i]['fTelegraphAdditGain']
data[:, i] *= header['protocol']['fADCRange']
data[:, i] /= header['protocol']['lADCResolution']
data[:, i] += header['listADCInfo'][i]['fInstrumentOffset']
data[:, i] -= header['listADCInfo'][i]['fSignalOffset']
def clean_string(s):
s = s.rstrip(b'\x00')
s = s.rstrip(b' ')
return s
class AxonIO(BaseIO):
"""
Class for reading abf (axon binary file) file.
Usage:
>>> from neo import io
>>> r = io.AxonIO(filename='File_axon_1.abf')
>>> bl = r.read_block(lazy=False, cascade=True)
>>> print bl.segments
[<neo.core.segment.Segment object at 0x105516fd0>]
>>> print bl.segments[0].analogsignals
[<AnalogSignal(array([2.18811035, 2.19726562, 2.21252441, ...,
1.33056641, 1.3458252, 1.3671875], dtype=float32) * pA,
[0.0 s, 191.2832 s], sampling rate: 10000.0 Hz)>]
>>> print bl.segments[0].eventarrays
[]
"""
is_readable = True
is_writable = False
supported_objects = [Block, Segment, AnalogSignal, EventArray]
readable_objects = [Block]
writeable_objects = []
has_header = False
is_streameable = False
read_params = {Block: []}
write_params = None
name = 'Axon'
extensions = ['abf']
mode = 'file'
def __init__(self, filename=None):
"""
This class read a abf file.
Arguments:
filename : the filename to read
"""
BaseIO.__init__(self)
self.filename = filename
def read_block(self, lazy=False, cascade=True):
header = self.read_header()
version = header['fFileVersionNumber']
bl = Block()
bl.file_origin = os.path.basename(self.filename)
bl.annotate(abf_version=version)
# date and time
if version < 2.:
YY = 1900
MM = 1
DD = 1
hh = int(header['lFileStartTime'] / 3600.)
mm = int((header['lFileStartTime'] - hh * 3600) / 60)
ss = header['lFileStartTime'] - hh * 3600 - mm * 60
ms = int(np.mod(ss, 1) * 1e6)
ss = int(ss)
elif version >= 2.:
YY = int(header['uFileStartDate'] / 10000)
MM = int((header['uFileStartDate'] - YY * 10000) / 100)
DD = int(header['uFileStartDate'] - YY * 10000 - MM * 100)
hh = int(header['uFileStartTimeMS'] / 1000. / 3600.)
mm = int((header['uFileStartTimeMS'] / 1000. - hh * 3600) / 60)
ss = header['uFileStartTimeMS'] / 1000. - hh * 3600 - mm * 60
ms = int(np.mod(ss, 1) * 1e6)
ss = int(ss)
bl.rec_datetime = datetime.datetime(YY, MM, DD, hh, mm, ss, ms)
if not cascade:
return bl
# file format
if header['nDataFormat'] == 0:
dt = np.dtype('i2')
elif header['nDataFormat'] == 1:
dt = np.dtype('f4')
if version < 2.:
nbchannel = header['nADCNumChannels']
headOffset = header['lDataSectionPtr'] * BLOCKSIZE +\
header['nNumPointsIgnored'] * dt.itemsize
totalsize = header['lActualAcqLength']
elif version >= 2.:
nbchannel = header['sections']['ADCSection']['llNumEntries']
headOffset = header['sections']['DataSection']['uBlockIndex'] *\
BLOCKSIZE
totalsize = header['sections']['DataSection']['llNumEntries']
data = np.memmap(self.filename, dt, 'r',
shape=(totalsize,), offset=headOffset)
# 3 possible modes
if version < 2.:
mode = header['nOperationMode']
elif version >= 2.:
mode = header['protocol']['nOperationMode']
#~ print 'mode', mode
if (mode == 1) or (mode == 2) or (mode == 5) or (mode == 3):
# event-driven variable-length mode (mode 1)
# event-driven fixed-length mode (mode 2 or 5)
# gap free mode 3 can be in several episod (strange but possible)
# read sweep pos
if version < 2.:
nbepisod = header['lSynchArraySize']
offsetEpisod = header['lSynchArrayPtr'] * BLOCKSIZE
elif version >= 2.:
SAS = header['sections']['SynchArraySection']
nbepisod = SAS['llNumEntries']
offsetEpisod = SAS['uBlockIndex'] * BLOCKSIZE
if nbepisod > 0:
episodArray = np.memmap(self.filename, [('offset', 'i4'),
('len', 'i4')], 'r', shape=(nbepisod),
offset=offsetEpisod)
else:
episodArray = np.empty((1), [('offset', 'i4'), ('len', 'i4')],)
episodArray[0]['len'] = data.size
episodArray[0]['offset'] = 0
# sampling_rate
if version < 2.:
sampling_rate = 1. / (header['fADCSampleInterval'] *
nbchannel * 1.e-6) * pq.Hz
elif version >= 2.:
sampling_rate = 1.e6 / \
header['protocol']['fADCSequenceInterval'] * pq.Hz
# construct block
# one sweep = one segment in a block
pos = 0
for j in range(episodArray.size):
seg = Segment(index=j)
length = episodArray[j]['len']
if version < 2.:
fSynchTimeUnit = header['fSynchTimeUnit']
elif version >= 2.:
fSynchTimeUnit = header['protocol']['fSynchTimeUnit']
if (fSynchTimeUnit != 0) and (mode == 1):
length /= fSynchTimeUnit
if not lazy:
subdata = data[pos:pos+length]
subdata = subdata.reshape((subdata.size/nbchannel,
nbchannel)).astype('f')
if dt == np.dtype('i2'):
if version < 2.:
reformat_integer_V1(subdata, nbchannel, header)
elif version >= 2.:
reformat_integer_V2(subdata, nbchannel, header)
pos += length
if version < 2.:
chans = [chan_num for chan_num in
header['nADCSamplingSeq'] if chan_num >= 0]
else:
chans = range(nbchannel)
for n, i in enumerate(chans[:nbchannel]): # fix SamplingSeq
if version < 2.:
name = header['sADCChannelName'][i].replace(b' ', b'')
unit = header['sADCUnits'][i].replace(b'\xb5', b'u').\
replace(b' ', b'').decode('utf-8') # \xb5 is µ
num = header['nADCPtoLChannelMap'][i]
elif version >= 2.:
lADCIi = header['listADCInfo'][i]
name = lADCIi['ADCChNames'].replace(b' ', b'')
unit = lADCIi['ADCChUnits'].replace(b'\xb5', b'u').\
replace(b' ', b'').decode('utf-8')
num = header['listADCInfo'][i]['nADCNum']
t_start = float(episodArray[j]['offset']) / sampling_rate
t_start = t_start.rescale('s')
try:
pq.Quantity(1, unit)
except:
unit = ''
if lazy:
signal = [] * pq.Quantity(1, unit)
else:
signal = pq.Quantity(subdata[:, n], unit)
anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
t_start=t_start,
name=str(name),
channel_index=int(num))
if lazy:
anaSig.lazy_shape = length / nbchannel
seg.analogsignals.append(anaSig)
bl.segments.append(seg)
if mode in [3, 5]: # TODO check if tags exits in other mode
# tag is EventArray that should be attached to Block
# It is attched to the first Segment
times = []
labels = []
comments = []
for i, tag in enumerate(header['listTag']):
times.append(tag['lTagTime']/sampling_rate)
labels.append(str(tag['nTagType']))
comments.append(clean_string(tag['sComment']))
times = np.array(times)
labels = np.array(labels, dtype='S')
comments = np.array(comments, dtype='S')
# attach all tags to the first segment.
seg = bl.segments[0]
if lazy:
ea = EventArray(times=[] * pq.s,
labels=np.array([], dtype='S'))
ea.lazy_shape = len(times)
else:
ea = EventArray(times=times*pq.s,
labels=labels, comments=comments)
seg.eventarrays.append(ea)
bl.create_many_to_one_relationship()
return bl
def read_header(self,):
"""
read the header of the file
The strategy differ here from the original script under Matlab.
In the original script for ABF2, it complete the header with
informations that are located in other structures.
In ABF2 this function return header with sub dict :
sections (ABF2)
protocol (ABF2)
listTags (ABF1&2)
listADCInfo (ABF2)
listDACInfo (ABF2)
dictEpochInfoPerDAC (ABF2)
that contain more information.
"""
fid = struct_file(open(self.filename, 'rb')) # fix for py3
# version
fFileSignature = fid.read(4)
if fFileSignature == b'ABF ': # fix for p3 where read returns bytes
headerDescription = headerDescriptionV1
elif fFileSignature == b'ABF2':
headerDescription = headerDescriptionV2
else:
return None
# construct dict
header = {}
for key, offset, fmt in headerDescription:
val = fid.read_f(fmt, offset=offset)
if len(val) == 1:
header[key] = val[0]
else:
header[key] = np.array(val)
# correction of version number and starttime
if fFileSignature == b'ABF ':
header['lFileStartTime'] = header['lFileStartTime'] +\
header['nFileStartMillisecs'] * .001
elif fFileSignature == b'ABF2':
n = header['fFileVersionNumber']
header['fFileVersionNumber'] = n[3] + 0.1 * n[2] +\
0.01 * n[1] + 0.001 * n[0]
header['lFileStartTime'] = header['uFileStartTimeMS'] * .001
if header['fFileVersionNumber'] < 2.:
# tags
listTag = []
for i in range(header['lNumTagEntries']):
fid.seek(header['lTagSectionPtr'] + i * 64)
tag = {}
for key, fmt in TagInfoDescription:
val = fid.read_f(fmt)
if len(val) == 1:
tag[key] = val[0]
else:
tag[key] = np.array(val)
listTag.append(tag)
header['listTag'] = listTag
#protocol name formatting #TODO move to read_protocol?
header['sProtocolPath'] = clean_string(header['sProtocolPath'])
header['sProtocolPath'] = header['sProtocolPath'].\
replace(b'\\', b'/')
elif header['fFileVersionNumber'] >= 2.:
# in abf2 some info are in other place
# sections
sections = {}
for s, sectionName in enumerate(sectionNames):
uBlockIndex, uBytes, llNumEntries =\
fid.read_f('IIl', offset=76 + s * 16)
sections[sectionName] = {}
sections[sectionName]['uBlockIndex'] = uBlockIndex
sections[sectionName]['uBytes'] = uBytes
sections[sectionName]['llNumEntries'] = llNumEntries
header['sections'] = sections
# strings sections
# hack for reading channels names and units
fid.seek(sections['StringsSection']['uBlockIndex'] * BLOCKSIZE)
bigString = fid.read(sections['StringsSection']['uBytes'])
goodstart = bigString.lower().find(b'clampex')
if goodstart == -1:
goodstart = bigString.lower().find(b'axoscope')
bigString = bigString[goodstart:]
strings = bigString.split(b'\x00')
# ADC sections
header['listADCInfo'] = []
for i in range(sections['ADCSection']['llNumEntries']):
# read ADCInfo
fid.seek(sections['ADCSection']['uBlockIndex'] *
BLOCKSIZE + sections['ADCSection']['uBytes'] * i)
ADCInfo = {}
for key, fmt in ADCInfoDescription:
val = fid.read_f(fmt)
if len(val) == 1:
ADCInfo[key] = val[0]
else:
ADCInfo[key] = np.array(val)
ADCInfo['ADCChNames'] = strings[ADCInfo['lADCChannelNameIndex']
- 1]
ADCInfo['ADCChUnits'] = strings[ADCInfo['lADCUnitsIndex'] - 1]
header['listADCInfo'].append(ADCInfo)
# protocol sections
protocol = {}
fid.seek(sections['ProtocolSection']['uBlockIndex'] * BLOCKSIZE)
for key, fmt in protocolInfoDescription:
val = fid.read_f(fmt)
if len(val) == 1:
protocol[key] = val[0]
else:
protocol[key] = np.array(val)
header['protocol'] = protocol
# tags
listTag = []
for i in range(sections['TagSection']['llNumEntries']):
fid.seek(sections['TagSection']['uBlockIndex'] *
BLOCKSIZE + sections['TagSection']['uBytes'] * i)
tag = {}
for key, fmt in TagInfoDescription:
val = fid.read_f(fmt)
if len(val) == 1:
tag[key] = val[0]
else:
tag[key] = np.array(val)
listTag.append(tag)
header['listTag'] = listTag
# DAC sections
header['listDACInfo'] = []
for i in range(sections['DACSection']['llNumEntries']):
# read DACInfo
fid.seek(sections['DACSection']['uBlockIndex'] *
BLOCKSIZE + sections['DACSection']['uBytes'] * i)
DACInfo = {}
for key, fmt in DACInfoDescription:
val = fid.read_f(fmt)
if len(val) == 1:
DACInfo[key] = val[0]
else:
DACInfo[key] = np.array(val)
DACInfo['DACChNames'] = strings[DACInfo['lDACChannelNameIndex']
- 1]
DACInfo['DACChUnits'] = strings[
DACInfo['lDACChannelUnitsIndex'] - 1]
header['listDACInfo'].append(DACInfo)
# EpochPerDAC sections
# header['dictEpochInfoPerDAC'] is dict of dicts:
# - the first index is the DAC number
# - the second index is the epoch number
# It has to be done like that because data may not exist
# and may not be in sorted order
header['dictEpochInfoPerDAC'] = {}
for i in range(sections['EpochPerDACSection']['llNumEntries']):
# read DACInfo
fid.seek(sections['EpochPerDACSection']['uBlockIndex'] *
BLOCKSIZE +
sections['EpochPerDACSection']['uBytes'] * i)
EpochInfoPerDAC = {}
for key, fmt in EpochInfoPerDACDescription:
val = fid.read_f(fmt)
if len(val) == 1:
EpochInfoPerDAC[key] = val[0]
else:
EpochInfoPerDAC[key] = np.array(val)
DACNum = EpochInfoPerDAC['nDACNum']
EpochNum = EpochInfoPerDAC['nEpochNum']
# Checking if the key exists, if not, the value is empty
# so we have to create empty dict to populate
if DACNum not in header['dictEpochInfoPerDAC']:
header['dictEpochInfoPerDAC'][DACNum] = {}
header['dictEpochInfoPerDAC'][DACNum][EpochNum] =\
EpochInfoPerDAC
fid.close()
return header
def read_protocol(self):
"""
Read the protocol waveform of the file, if present;
function works with ABF2 only. Protocols can be reconstructed
from the ABF1 header.
Returns: list of segments (one for every episode)
with list of analog signls (one for every DAC).
"""
header = self.read_header()
if header['fFileVersionNumber'] < 2.:
raise IOError("Protocol section is only present in ABF2 files.")
nADC = header['sections']['ADCSection']['llNumEntries'] # n ADC chans
nDAC = header['sections']['DACSection']['llNumEntries'] # n DAC chans
nSam = header['protocol']['lNumSamplesPerEpisode']/nADC # samples/ep
nEpi = header['lActualEpisodes']
sampling_rate = 1.e6/header['protocol']['fADCSequenceInterval'] * pq.Hz
# Make a list of segments with analog signals with just holding levels
# List of segments relates to number of episodes, as for recorded data
segments = []
for epiNum in range(nEpi):
seg = Segment(index=epiNum)
# One analog signal for each DAC in segment (episode)
for DACNum in range(nDAC):
t_start = 0 * pq.s # TODO: Possibly check with episode array
name = header['listDACInfo'][DACNum]['DACChNames']
unit = header['listDACInfo'][DACNum]['DACChUnits'].\
replace(b'\xb5', b'u') # \xb5 is µ
signal = np.ones(nSam) *\
header['listDACInfo'][DACNum]['fDACHoldingLevel'] *\
pq.Quantity(1, unit)
anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
t_start=t_start, name=str(name),
channel_index=DACNum)
# If there are epoch infos for this DAC
if DACNum in header['dictEpochInfoPerDAC']:
# Save last sample index
i_last = int(nSam * 15625 / 10**6)
# TODO guess for first holding
# Go over EpochInfoPerDAC and change the analog signal
# according to the epochs
epochInfo = header['dictEpochInfoPerDAC'][DACNum]
for epochNum, epoch in iteritems(epochInfo):
i_begin = i_last
i_end = i_last + epoch['lEpochInitDuration'] +\
epoch['lEpochDurationInc'] * epiNum
dif = i_end-i_begin
anaSig[i_begin:i_end] = np.ones(len(range(dif))) *\
pq.Quantity(1, unit) * (epoch['fEpochInitLevel'] +
epoch['fEpochLevelInc'] *
epiNum)
i_last += epoch['lEpochInitDuration']
seg.analogsignals.append(anaSig)
segments.append(seg)
return segments
BLOCKSIZE = 512
headerDescriptionV1 = [
('fFileSignature', 0, '4s'),
('fFileVersionNumber', 4, 'f'),
('nOperationMode', 8, 'h'),
('lActualAcqLength', 10, 'i'),
('nNumPointsIgnored', 14, 'h'),
('lActualEpisodes', 16, 'i'),
('lFileStartTime', 24, 'i'),
('lDataSectionPtr', 40, 'i'),
('lTagSectionPtr', 44, 'i'),
('lNumTagEntries', 48, 'i'),
('lSynchArrayPtr', 92, 'i'),
('lSynchArraySize', 96, 'i'),
('nDataFormat', 100, 'h'),
('nADCNumChannels', 120, 'h'),
('fADCSampleInterval', 122, 'f'),
('fSynchTimeUnit', 130, 'f'),
('lNumSamplesPerEpisode', 138, 'i'),
('lPreTriggerSamples', 142, 'i'),
('lEpisodesPerRun', 146, 'i'),
('fADCRange', 244, 'f'),
('lADCResolution', 252, 'i'),
('nFileStartMillisecs', 366, 'h'),
('nADCPtoLChannelMap', 378, '16h'),
('nADCSamplingSeq', 410, '16h'),
('sADCChannelName', 442, '10s'*16),
('sADCUnits', 602, '8s'*16),
('fADCProgrammableGain', 730, '16f'),
('fInstrumentScaleFactor', 922, '16f'),
('fInstrumentOffset', 986, '16f'),
('fSignalGain', 1050, '16f'),
('fSignalOffset', 1114, '16f'),
('nDigitalEnable', 1436, 'h'),
('nActiveDACChannel', 1440, 'h'),
('nDigitalHolding', 1584, 'h'),
('nDigitalInterEpisode', 1586, 'h'),
('nDigitalValue', 2588, '10h'),
('lDACFilePtr', 2048, '2i'),
('lDACFileNumEpisodes', 2056, '2i'),
('fDACCalibrationFactor', 2074, '4f'),
('fDACCalibrationOffset', 2090, '4f'),
('nWaveformEnable', 2296, '2h'),
('nWaveformSource', 2300, '2h'),
('nInterEpisodeLevel', 2304, '2h'),
('nEpochType', 2308, '20h'),
('fEpochInitLevel', 2348, '20f'),
('fEpochLevelInc', 2428, '20f'),
('lEpochInitDuration', 2508, '20i'),
('lEpochDurationInc', 2588, '20i'),
('nTelegraphEnable', 4512, '16h'),
('fTelegraphAdditGain', 4576, '16f'),
('sProtocolPath', 4898, '384s'),
]
headerDescriptionV2 = [
('fFileSignature', 0, '4s'),
('fFileVersionNumber', 4, '4b'),
('uFileInfoSize', 8, 'I'),
('lActualEpisodes', 12, 'I'),
('uFileStartDate', 16, 'I'),
('uFileStartTimeMS', 20, 'I'),
('uStopwatchTime', 24, 'I'),
('nFileType', 28, 'H'),
('nDataFormat', 30, 'H'),
('nSimultaneousScan', 32, 'H'),
('nCRCEnable', 34, 'H'),
('uFileCRC', 36, 'I'),
('FileGUID', 40, 'I'),
('uCreatorVersion', 56, 'I'),
('uCreatorNameIndex', 60, 'I'),
('uModifierVersion', 64, 'I'),
('uModifierNameIndex', 68, 'I'),
('uProtocolPathIndex', 72, 'I'),
]
sectionNames = [
'ProtocolSection',
'ADCSection',
'DACSection',
'EpochSection',
'ADCPerDACSection',
'EpochPerDACSection',
'UserListSection',
'StatsRegionSection',
'MathSection',
'StringsSection',
'DataSection',
'TagSection',
'ScopeSection',
'DeltaSection',
'VoiceTagSection',
'SynchArraySection',
'AnnotationSection',
'StatsSection',
]
protocolInfoDescription = [
('nOperationMode', 'h'),
('fADCSequenceInterval', 'f'),
('bEnableFileCompression', 'b'),
('sUnused1', '3s'),
('uFileCompressionRatio', 'I'),
('fSynchTimeUnit', 'f'),
('fSecondsPerRun', 'f'),
('lNumSamplesPerEpisode', 'i'),
('lPreTriggerSamples', 'i'),
('lEpisodesPerRun', 'i'),
('lRunsPerTrial', 'i'),
('lNumberOfTrials', 'i'),
('nAveragingMode', 'h'),
('nUndoRunCount', 'h'),
('nFirstEpisodeInRun', 'h'),
('fTriggerThreshold', 'f'),
('nTriggerSource', 'h'),
('nTriggerAction', 'h'),
('nTriggerPolarity', 'h'),
('fScopeOutputInterval', 'f'),
('fEpisodeStartToStart', 'f'),
('fRunStartToStart', 'f'),
('lAverageCount', 'i'),
('fTrialStartToStart', 'f'),
('nAutoTriggerStrategy', 'h'),
('fFirstRunDelayS', 'f'),
('nChannelStatsStrategy', 'h'),
('lSamplesPerTrace', 'i'),
('lStartDisplayNum', 'i'),
('lFinishDisplayNum', 'i'),
('nShowPNRawData', 'h'),
('fStatisticsPeriod', 'f'),
('lStatisticsMeasurements', 'i'),
('nStatisticsSaveStrategy', 'h'),
('fADCRange', 'f'),
('fDACRange', 'f'),
('lADCResolution', 'i'),
('lDACResolution', 'i'),
('nExperimentType', 'h'),
('nManualInfoStrategy', 'h'),
('nCommentsEnable', 'h'),
('lFileCommentIndex', 'i'),
('nAutoAnalyseEnable', 'h'),
('nSignalType', 'h'),
('nDigitalEnable', 'h'),
('nActiveDACChannel', 'h'),
('nDigitalHolding', 'h'),
('nDigitalInterEpisode', 'h'),
('nDigitalDACChannel', 'h'),
('nDigitalTrainActiveLogic', 'h'),
('nStatsEnable', 'h'),
('nStatisticsClearStrategy', 'h'),
('nLevelHysteresis', 'h'),
('lTimeHysteresis', 'i'),
('nAllowExternalTags', 'h'),
('nAverageAlgorithm', 'h'),
('fAverageWeighting', 'f'),
('nUndoPromptStrategy', 'h'),
('nTrialTriggerSource', 'h'),
('nStatisticsDisplayStrategy', 'h'),
('nExternalTagType', 'h'),
('nScopeTriggerOut', 'h'),
('nLTPType', 'h'),
('nAlternateDACOutputState', 'h'),
('nAlternateDigitalOutputState', 'h'),
('fCellID', '3f'),
('nDigitizerADCs', 'h'),
('nDigitizerDACs', 'h'),
('nDigitizerTotalDigitalOuts', 'h'),
('nDigitizerSynchDigitalOuts', 'h'),
('nDigitizerType', 'h'),
]
ADCInfoDescription = [
('nADCNum', 'h'),
('nTelegraphEnable', 'h'),
('nTelegraphInstrument', 'h'),
('fTelegraphAdditGain', 'f'),
('fTelegraphFilter', 'f'),
('fTelegraphMembraneCap', 'f'),
('nTelegraphMode', 'h'),
('fTelegraphAccessResistance', 'f'),
('nADCPtoLChannelMap', 'h'),
('nADCSamplingSeq', 'h'),
('fADCProgrammableGain', 'f'),
('fADCDisplayAmplification', 'f'),
('fADCDisplayOffset', 'f'),
('fInstrumentScaleFactor', 'f'),
('fInstrumentOffset', 'f'),
('fSignalGain', 'f'),
('fSignalOffset', 'f'),
('fSignalLowpassFilter', 'f'),
('fSignalHighpassFilter', 'f'),
('nLowpassFilterType', 'b'),
('nHighpassFilterType', 'b'),
('fPostProcessLowpassFilter', 'f'),
('nPostProcessLowpassFilterType', 'c'),
('bEnabledDuringPN', 'b'),
('nStatsChannelPolarity', 'h'),
('lADCChannelNameIndex', 'i'),
('lADCUnitsIndex', 'i'),
]
TagInfoDescription = [
('lTagTime', 'i'),
('sComment', '56s'),
('nTagType', 'h'),
('nVoiceTagNumber_or_AnnotationIndex', 'h'),
]
DACInfoDescription = [
('nDACNum', 'h'),
('nTelegraphDACScaleFactorEnable', 'h'),
('fInstrumentHoldingLevel', 'f'),
('fDACScaleFactor', 'f'),
('fDACHoldingLevel', 'f'),
('fDACCalibrationFactor', 'f'),
('fDACCalibrationOffset', 'f'),
('lDACChannelNameIndex', 'i'),
('lDACChannelUnitsIndex', 'i'),
('lDACFilePtr', 'i'),
('lDACFileNumEpisodes', 'i'),
('nWaveformEnable', 'h'),
('nWaveformSource', 'h'),
('nInterEpisodeLevel', 'h'),
('fDACFileScale', 'f'),
('fDACFileOffset', 'f'),
('lDACFileEpisodeNum', 'i'),
('nDACFileADCNum', 'h'),
('nConditEnable', 'h'),
('lConditNumPulses', 'i'),
('fBaselineDuration', 'f'),
('fBaselineLevel', 'f'),
('fStepDuration', 'f'),
('fStepLevel', 'f'),
('fPostTrainPeriod', 'f'),
('fPostTrainLevel', 'f'),
('nMembTestEnable', 'h'),
('nLeakSubtractType', 'h'),
('nPNPolarity', 'h'),
('fPNHoldingLevel', 'f'),
('nPNNumADCChannels', 'h'),
('nPNPosition', 'h'),
('nPNNumPulses', 'h'),
('fPNSettlingTime', 'f'),
('fPNInterpulse', 'f'),
('nLTPUsageOfDAC', 'h'),
('nLTPPresynapticPulses', 'h'),
('lDACFilePathIndex', 'i'),
('fMembTestPreSettlingTimeMS', 'f'),
('fMembTestPostSettlingTimeMS', 'f'),
('nLeakSubtractADCIndex', 'h'),
('sUnused', '124s'),
]
EpochInfoPerDACDescription = [
('nEpochNum', 'h'),
('nDACNum', 'h'),
('nEpochType', 'h'),
('fEpochInitLevel', 'f'),
('fEpochLevelInc', 'f'),
('lEpochInitDuration', 'i'),
('lEpochDurationInc', 'i'),
('lEpochPulsePeriod', 'i'),
('lEpochPulseWidth', 'i'),
('sUnused', '18s'),
]
EpochInfoDescription = [
('nEpochNum', 'h'),
('nDigitalValue', 'h'),
('nDigitalTrainValue', 'h'),
('nAlternateDigitalValue', 'h'),
('nAlternateDigitalTrainValue', 'h'),
('bEpochCompression', 'b'),
('sUnused', '21s'),
]
| bsd-3-clause | -1,758,531,858,273,080,600 | 35.436133 | 79 | 0.507408 | false |
savex/spectra | tools/salt_networks.py | 1 | 2298 | import re
import sys
import subprocess
import json
def shell(command):
_ps = subprocess.Popen(
command.split(),
stdout=subprocess.PIPE
).communicate()[0].decode()
return _ps
def cut_option(_param, _options_list):
_option = "n/a"
_result_list = []
if _param in _options_list:
_index = _options_list.index(_param)
_option = _options_list[_index+1]
_l1 = _options_list[:_index]
_l2 = _options_list[_index+2:]
_result_list = _l1 + _l2
else:
_result_list = _options_list
return _option, _result_list
def get_ifs_data():
_ifs_raw = shell('ip a')
if_start = re.compile("^[0-9]+: .*: \<.*\> .*$")
if_ipv4 = re.compile("^\s{4}inet\ .*$")
_ifs = {}
_if_name = None
for line in _ifs_raw.splitlines():
_if_data = {}
if if_start.match(line):
_tmp = line.split(':')
_if_name = _tmp[1].strip()
_if_options = _tmp[2].strip().split(' ')
_if_data['order'] = _tmp[0]
_if_data['mtu'], _if_options = cut_option("mtu", _if_options)
_if_data['qlen'], _if_options = cut_option("qlen", _if_options)
_if_data['state'], _if_options = cut_option("state", _if_options)
_if_data['other'] = _if_options
_if_data['ipv4'] = {}
_ifs[_if_name] = _if_data
elif if_ipv4.match(line):
if _if_name is None:
continue
else:
_tmp = line.strip().split(' ', 2)
_ip = _tmp[1]
_options = _tmp[2].split(' ')
_brd, _options = cut_option("brd", _options)
# TODO: Parse other options, mask, brd, etc...
_ifs[_if_name]['ipv4'][_ip] = {}
_ifs[_if_name]['ipv4'][_ip]['brd'] = _brd
_ifs[_if_name]['ipv4'][_ip]['other'] = _options
return _ifs
ifs_data = get_ifs_data()
# _ifs = sorted(ifs_data.keys())
# _ifs.remove("lo")
# for _idx in range(len(_ifs)):
# print("{:25}: {:20} {:10} {:5}".format(
# _ifs[_idx],
# " ".join(ifs_data[_ifs[_idx]]['ipv4'].keys()),
# ifs_data[_ifs[_idx]]['mtu'],
# ifs_data[_ifs[_idx]]['state']
# ))
buff = json.dumps(ifs_data)
sys.stdout.write(buff) | apache-2.0 | -6,106,116,033,646,082,000 | 33.313433 | 77 | 0.483899 | false |
nschloe/quadpy | src/quadpy/c1/_fejer.py | 1 | 1547 | import numpy as np
from ..helpers import article
from ._helpers import C1Scheme
source = article(
authors=["J. Waldvogel"],
title="Fast Construction of the Fejér and Clenshaw–Curtis Quadrature Rules",
journal="BIT Numerical Mathematics",
month="mar",
year="2006",
volume="46",
number="1",
pages="195–202",
url="https://doi.org/10.1007/s10543-006-0045-4",
)
def fejer_1(n):
degree = n
points = -np.cos(np.pi * (np.arange(n) + 0.5) / n)
# n -= 1
N = np.arange(1, n, 2)
length = len(N)
m = n - length
K = np.arange(m)
v0 = np.concatenate(
[
2 * np.exp(1j * np.pi * K / n) / (1 - 4 * K ** 2),
np.zeros(length + 1),
]
)
v1 = v0[:-1] + np.conjugate(v0[:0:-1])
w = np.fft.ifft(v1)
assert max(w.imag) < 1.0e-15
weights = w.real
return C1Scheme("Fejér 1", degree, weights, points, source)
def fejer_2(n):
degree = n
points = -np.cos((np.pi * np.arange(1, n + 1)) / (n + 1))
n += 1
N = np.arange(1, n, 2)
length = len(N)
m = n - length
v0 = np.concatenate([2.0 / N / (N - 2), np.array([1.0 / N[-1]]), np.zeros(m)])
v2 = -v0[:-1] - v0[:0:-1]
w = np.fft.ihfft(v2)
assert max(w.imag) < 1.0e-15
w = w.real
if n % 2 == 1:
weights = np.concatenate([w, w[::-1]])
else:
weights = np.concatenate([w, w[len(w) - 2 :: -1]])
# cut off first and last
weights = weights[1:-1]
return C1Scheme("Fejér 2", degree, weights, points, source)
| mit | -6,587,747,661,665,124,000 | 21.647059 | 82 | 0.523377 | false |
felixonmars/suds-ng | suds/bindings/document.py | 1 | 5736 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides classes for the (WS) SOAP I{document/literal}.
"""
from logging import getLogger
from suds.bindings.binding import Binding
from suds.sax.element import Element
log = getLogger(__name__)
class Document(Binding):
"""
The document/literal style. Literal is the only (@use) supported
since document/encoded is pretty much dead.
Although the soap specification supports multiple documents within the soap
<body/>, it is very uncommon. As such, suds presents an I{RPC} view of
service methods defined with a single document parameter. This is done so
that the user can pass individual parameters instead of one, single document.
To support the complete specification, service methods defined with multiple documents
(multiple message parts), must present a I{document} view for that method.
"""
def bodycontent(self, method, args, kwargs):
#
# The I{wrapped} vs I{bare} style is detected in 2 ways.
# If there is 2+ parts in the message then it is I{bare}.
# If there is only (1) part and that part resolves to a builtin then
# it is I{bare}. Otherwise, it is I{wrapped}.
#
if not len(method.soap.input.body.parts):
return ()
wrapped = method.soap.input.body.wrapped
if wrapped:
pts = self.bodypart_types(method)
root = self.document(pts[0])
else:
root = []
n = 0
for pd in self.param_defs(method):
if n < len(args):
value = args[n]
else:
value = kwargs.get(pd[0])
n += 1
p = self.mkparam(method, pd, value)
if p is None:
continue
if not wrapped:
ns = pd[1].namespace('ns0')
p.setPrefix(ns[0], ns[1])
root.append(p)
return root
def replycontent(self, method, body):
wrapped = method.soap.output.body.wrapped
if wrapped:
return body[0].children
else:
return body.children
def document(self, wrapper):
"""
Get the document root. For I{document/literal}, this is the
name of the wrapper element qualifed by the schema tns.
@param wrapper: The method name.
@type wrapper: L{xsd.sxbase.SchemaObject}
@return: A root element.
@rtype: L{Element}
"""
tag = wrapper[1].name
ns = wrapper[1].namespace('ns0')
d = Element(tag, ns=ns)
return d
def mkparam(self, method, pdef, object):
#
# Expand list parameters into individual parameters
# each with the type information. This is because in document
# arrays are simply unbounded elements.
#
if isinstance(object, (list, tuple)):
tags = []
for item in object:
tags.append(self.mkparam(method, pdef, item))
return tags
else:
return Binding.mkparam(self, method, pdef, object)
def param_defs(self, method):
#
# Get parameter definitions for document literal.
# The I{wrapped} vs I{bare} style is detected in 2 ways.
# If there is 2+ parts in the message then it is I{bare}.
# If there is only (1) part and that part resolves to a builtin then
# it is I{bare}. Otherwise, it is I{wrapped}.
#
pts = self.bodypart_types(method)
wrapped = method.soap.input.body.wrapped
if not wrapped:
return pts
result = []
# wrapped
for p in pts:
resolved = p[1].resolve()
for child, ancestry in resolved:
if child.isattr():
continue
if self.bychoice(ancestry):
log.debug(
'%s\ncontained by <choice/>, excluded as param for %s()',
child,
method.name)
continue
result.append((child.name, child))
return result
def returned_types(self, method):
result = []
wrapped = method.soap.output.body.wrapped
rts = self.bodypart_types(method, input=False)
if wrapped:
for pt in rts:
resolved = pt.resolve(nobuiltin=True)
for child, ancestry in resolved:
result.append(child)
break
else:
result += rts
return result
def bychoice(self, ancestry):
"""
The ancestry contains a <choice/>
@param ancestry: A list of ancestors.
@type ancestry: list
@return: True if contains <choice/>
@rtype: boolean
"""
for x in ancestry:
if x.choice():
return True
return False
| lgpl-3.0 | 1,296,709,443,985,954,300 | 35.075472 | 90 | 0.584902 | false |
n-kats/tf-gogh | models.py | 1 | 5142 | import tensorflow as tf
import numpy as np
from PIL import Image
from caffe_to_tf import load_caffemodel
from data import transform_from_train
def pool(x, ksize, stride, padding="SAME"):
return tf.nn.max_pool(x, ksize=[1, ksize, ksize, 1],
strides=[1, stride, stride, 1],
padding=padding)
class BaseModel:
"""
特徴量を得るためのモデルのAbstract class
"""
default_caffemodel = None
default_alpha = None
default_beta = None
def __init__(self, caffemodel=None, alpha=None, beta=None):
self.conv = load_caffemodel(caffemodel or self.default_caffemodel)
self.alpha = alpha or self.default_alpha
self.beta = beta or self.default_beta
class NIN(BaseModel):
"""
NINを用いた特徴量
"""
default_caffemodel = "nin_imagenet.caffemodel"
default_alpha = [0., 0., 1., 1.]
default_beta = [1., 1., 1., 1.]
def __call__(self, x):
"""NINの特徴量"""
x0 = self.conv("conv1")(x, stride=4)
y1 = self.conv("cccp2")(self.conv("cccp1")(x0), activation_fn=None)
pool1 = pool(tf.nn.relu(y1), ksize=3, stride=2)
x1 = self.conv("conv2")(pool1, stride=1)
y2 = self.conv("cccp4")(self.conv("cccp3")(x1), activation_fn=None)
pool2 = pool(tf.nn.relu(y2), ksize=3, stride=2)
x2 = self.conv("conv3")(pool2, stride=1)
y3 = self.conv("cccp6")(self.conv("cccp5")(x2), activation_fn=None)
pool3 = pool(tf.nn.relu(y3), ksize=3, stride=2)
drop = tf.nn.dropout(pool3, 0.5)
x3 = self.conv("conv4-1024")(drop)
return [x0, x1, x2, x3]
class VGG(BaseModel):
"""
VGGを用いた特徴量
"""
default_caffemodel = "VGG_ILSVRC_16_layers.caffemodel"
default_alpha = [0., 0., 1., 1.]
default_beta = [1., 1., 1., 1.]
def __call__(self, x):
"""VGGの特徴量"""
y1 = self.conv("conv1_2")(self.conv("conv1_1")(x), activation_fn=None)
x1 = pool(tf.nn.relu(y1), ksize=2, stride=2)
y2 = self.conv("conv2_2")(self.conv("conv2_1")(x1), activation_fn=None)
x2 = pool(tf.nn.relu(y2), ksize=2, stride=2)
y3 = self.conv("conv3_3")(self.conv("conv3_2")(self.conv("conv3_1")(x2)), activation_fn=None)
x3 = pool(tf.nn.relu(y3), ksize=2, stride=2)
y4 = self.conv("conv4_3")(self.conv("conv4_2")(self.conv("conv4_1")(x3)), activation_fn=None)
return [y1, y2, y3, y4]
def generate_model(model_name, **args):
if model_name == 'nin':
return NIN(**args)
if model_name == 'vgg':
return VGG(**args)
def style_matrix(y):
"""画風を表現する行列"""
_, height, width, ch_num = y.get_shape().as_list()
y_reshaped = tf.reshape(y, [-1, height * width, ch_num])
if tf.__version__[0] == '1':
return tf.matmul(y_reshaped, y_reshaped, adjoint_a=True) / (height * width * ch_num)
elif tf.__version__[0] == '0':
return tf.batch_matmul(y_reshaped, y_reshaped, adj_x=True) / (height * width * ch_num)
else:
raise
class Generator:
def __init__(self, base_model, img_orig, img_style, config):
# 特徴抽出を行う
mids_orig = base_model(img_orig)
mids_style = base_model(img_style)
# 損失関数に使うものを作る
prods_style = [style_matrix(y) for y in mids_style]
# img_genを初期化する
img_gen = tf.Variable(tf.random_uniform(config.output_shape, -20, 20))
self.img_gen = img_gen
mids = base_model(img_gen)
self.loss_orig = []
self.loss_style = []
for mid, mid_orig in zip(mids, mids_orig):
shape = mid.get_shape().as_list()
self.loss_orig.append(tf.nn.l2_loss(mid - mid_orig) / np.prod(shape))
for mid, prod_style in zip(mids, prods_style):
shape = prod_style.get_shape().as_list()
self.loss_style.append(tf.nn.l2_loss(style_matrix(mid) - prod_style) / np.prod(shape))
total_loss = 0
for l, a in zip(self.loss_orig, base_model.alpha):
if a != 0:
total_loss += l * (a * config.lam)
for l, b in zip(self.loss_style, base_model.beta):
if b != 0:
total_loss += l * b
self.total_loss = total_loss
self.total_train = config.optimizer.minimize(total_loss)
clipped = tf.clip_by_value(self.img_gen, -120., 135.)
self.clip = tf.assign(self.img_gen, clipped)
def generate(self, config):
with tf.Session() as sess:
if hasattr(tf, "global_variables_initializer"):
sess.run(tf.global_variables_initializer())
else:
sess.run(tf.initialize_all_variables())
print("start")
# 学習開始
for i in range(config.iteration):
sess.run([self.total_train, self.clip])
if (i + 1) % 50 == 0:
l, l1, l2 = sess.run([self.total_loss, self.loss_orig, self.loss_style])
print("%d| loss: %f, loss_orig: %f, loss_style: %f" % (i + 1, l, sum(l1), sum(l2)))
for l1_, l2_ in zip(l1, l2):
print("loss_orig: %f, loss_style: %f" % (l1_, l2_))
self.save_image(sess, config.save_path % (i + 1))
def save_image(self, sess, path):
data = sess.run(self.img_gen)[0]
data = transform_from_train(data)
img = Image.fromarray(data.astype(np.uint8))
print("save %s" % path)
img.save(path)
| mit | 4,915,162,749,251,367,000 | 29.463415 | 97 | 0.604283 | false |
kbaseapps/GenomeFileUtil | test/supplemental_gff_tests/fasta_gff_upload_test.py | 1 | 30327 | # -*- coding: utf-8 -*-
import os # noqa: F401
import re
import shutil
import time
import unittest
from os import environ
from configparser import ConfigParser # py3
from installed_clients.WorkspaceClient import Workspace as workspaceService
from GenomeFileUtil.GenomeFileUtilImpl import GenomeFileUtil
from GenomeFileUtil.GenomeFileUtilImpl import SDKConfig
from GenomeFileUtil.GenomeFileUtilServer import MethodContext
from GenomeFileUtil.authclient import KBaseAuth as _KBaseAuth
from GenomeFileUtil.core.FastaGFFToGenome import FastaGFFToGenome
from installed_clients.DataFileUtilClient import DataFileUtil
class FastaGFFToGenomeUploadTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.token = environ.get('KB_AUTH_TOKEN', None)
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file) # type: ignore
for nameval in config.items('GenomeFileUtil'):
cls.cfg[nameval[0]] = nameval[1]
authServiceUrl = cls.cfg.get('auth-service-url',
"https://kbase.us/services/authorization/Sessions/Login")
auth_client = _KBaseAuth(authServiceUrl)
cls.user_id = auth_client.get_user(cls.token)
cls.ctx = MethodContext(None)
cls.ctx.update({'token': cls.token,
'user_id': cls.user_id,
'provenance': [
{'service': 'GenomeFileUtil',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL, token=cls.token)
cls.serviceImpl = GenomeFileUtil(cls.cfg)
cls.dfu = DataFileUtil(os.environ['SDK_CALLBACK_URL'], token=cls.token)
cls.scratch = cls.cfg['scratch']
cls.shockURL = cls.cfg['shock-url']
cls.gfu_cfg = SDKConfig(cls.cfg)
cls.prepare_data()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_GenomeFileUtil_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
@classmethod
def prepare_data(cls):
cls.importer = FastaGFFToGenome(cls.gfu_cfg)
cls.gff_filename = 'Test_v1.0.gene.gff3.gz'
cls.gff_path = os.path.join(cls.scratch, cls.gff_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Plant_Data", cls.gff_filename), cls.gff_path)
cls.fa_filename = 'Test_v1.0.fa.gz'
cls.fa_path = os.path.join(cls.scratch, cls.fa_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Plant_Data", cls.fa_filename), cls.fa_path)
cls.fungal_gff_filename = 'Neucr2.filtered_proteins.BroadModels.gff3.gz'
cls.fungal_gff_path = os.path.join(cls.scratch, cls.fungal_gff_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Fungal_Data", cls.fungal_gff_filename),
cls.fungal_gff_path)
cls.fungal_fa_filename = 'Neucr2_AssemblyScaffolds.fasta.gz'
cls.fungal_fa_path = os.path.join(cls.scratch, cls.fungal_fa_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Fungal_Data", cls.fungal_fa_filename),
cls.fungal_fa_path)
cls.jgi_bacterial_gff_filename = '2547132501.gff.gz'
cls.jgi_bacterial_gff_path = os.path.join(cls.scratch, cls.jgi_bacterial_gff_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Bacterial_Data", cls.jgi_bacterial_gff_filename),
cls.jgi_bacterial_gff_path)
cls.jgi_bacterial_fa_filename = '2547132501.fna.gz'
cls.jgi_bacterial_fa_path = os.path.join(cls.scratch, cls.jgi_bacterial_fa_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Bacterial_Data", cls.jgi_bacterial_fa_filename),
cls.jgi_bacterial_fa_path)
cls.jgi_bacterial_gff2_filename = '91705.assembled.gff'
cls.jgi_bacterial_gff2_path = os.path.join(cls.scratch, cls.jgi_bacterial_gff2_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Bacterial_Data", cls.jgi_bacterial_gff2_filename),
cls.jgi_bacterial_gff2_path)
cls.jgi_bacterial_fa2_filename = '91705.assembled.fna'
cls.jgi_bacterial_fa2_path = os.path.join(cls.scratch, cls.jgi_bacterial_fa2_filename)
shutil.copy(os.path.join("data", "fasta_gff", "JGI", "Bacterial_Data", cls.jgi_bacterial_fa2_filename),
cls.jgi_bacterial_fa2_path)
cls.patric_bacterial_gff_filename = '1240778.3.PATRIC.gff.gz'
cls.patric_bacterial_gff_path = os.path.join(cls.scratch, cls.patric_bacterial_gff_filename)
shutil.copy(os.path.join("data", "fasta_gff", "PATRIC", "Ecoli_O104", cls.patric_bacterial_gff_filename),
cls.patric_bacterial_gff_path)
cls.patric_bacterial_fa_filename = '1240778.3.fna.gz'
cls.patric_bacterial_fa_path = os.path.join(cls.scratch, cls.patric_bacterial_fa_filename)
shutil.copy(os.path.join("data", "fasta_gff", "PATRIC", "Ecoli_O104", cls.patric_bacterial_fa_filename),
cls.patric_bacterial_fa_path)
cls.refseq_bacterial_gff_filename = 'NC_021490.gff.gz'
cls.refseq_bacterial_gff_path = os.path.join(cls.scratch, cls.refseq_bacterial_gff_filename)
shutil.copy(os.path.join("data", "fasta_gff", "RefSeq", "Bacterial_Data", cls.refseq_bacterial_gff_filename),
cls.refseq_bacterial_gff_path)
cls.refseq_bacterial_fa_filename = 'NC_021490.fasta.gz'
cls.refseq_bacterial_fa_path = os.path.join(cls.scratch, cls.refseq_bacterial_fa_filename)
shutil.copy(os.path.join("data", "fasta_gff", "RefSeq", "Bacterial_Data", cls.refseq_bacterial_fa_filename),
cls.refseq_bacterial_fa_path)
def check_minimal_items_exist(self, result):
self.assertTrue('genome_info' in result)
self.assertTrue('genome_ref' in result)
genome_info = result['genome_info']
self.assertEqual(genome_info[10]['Domain'], 'Unknown')
self.assertEqual(genome_info[10]['Genetic code'], '11')
self.assertEqual(genome_info[10]['Name'], 'unknown_taxon')
self.assertEqual(genome_info[10]['Source'], 'Genbank')
self.assertTrue('GC content' in genome_info[10])
self.assertTrue(re.match("^\d+?\.\d+?$", genome_info[10]['GC content']) is not None)
self.assertTrue('Number of Protein Encoding Genes' in genome_info[10])
self.assertTrue(genome_info[10]['Number of Protein Encoding Genes'].isdigit())
self.assertTrue('Size' in genome_info[10])
self.assertTrue(genome_info[10]['Size'].isdigit())
self.assertEqual(genome_info[10]['Taxonomy'], 'Unconfirmed Organism: unknown_taxon')
def print_genome_warnings(self, result):
data_file_cli = DataFileUtil(os.environ['SDK_CALLBACK_URL'])
genome = data_file_cli.get_objects(
{'object_refs': [result['genome_ref']]}
)['data'][0]['data']
if 'warnings' in genome:
print("Genome warnings:" + str(genome['warnings']))
def check_CDS_warnings(self, result, test_name):
data_file_cli = DataFileUtil(os.environ['SDK_CALLBACK_URL'])
genome = data_file_cli.get_objects(
{'object_refs': [result['genome_ref']]}
)['data'][0]['data']
print("IN TEST NAME : " + str(test_name))
cds_warning_count = 0
cds_with_warning_count = 0
if 'cdss' in genome:
total_cds_count = len(genome['cdss'])
for feature in genome['cdss']:
if 'warnings' in feature:
cds_with_warning_count = cds_with_warning_count + 1
cds_warning_count = cds_warning_count + len(feature['warnings'])
print("Total CDS: " + str(total_cds_count))
print("CDS Warning Count: " + str(cds_warning_count))
print("CDSs with a warning Count: " + str(cds_with_warning_count))
print("Percent CDS with warning: " + str((cds_with_warning_count/float(total_cds_count)) * 100))
"""
def test_simple_fasta_gff_to_genome_w_null_params(self):
input_params = {
"fasta_file": {'path': self.fa_path},
"gff_file": {'path': self.gff_path},
"workspace_name": self.getWsName(),
"genome_name": 'MyGenome',
"scientific_name": None,
"taxon_reference": None,
"genetic_code": None,
"source": None,
"taxon_wsname": None,
"release": None,
"type": None
}
result = self.getImpl().fasta_gff_to_genome(self.getContext(), input_params)[0]
self.assertTrue('genome_info' in result)
self.assertTrue('genome_ref' in result)
genome_info = result['genome_info']
self.assertEqual(genome_info[10]['Domain'], 'Unknown')
self.assertEqual(genome_info[10]['Genetic code'], '11')
self.assertEqual(genome_info[10]['Name'], 'unknown_taxon')
self.assertEqual(genome_info[10]['Source'], 'User')
self.assertTrue('GC content' in genome_info[10])
self.assertTrue(re.match("^\d+?\.\d+?$", genome_info[10]['GC content']) is not None)
self.assertTrue('Number of Protein Encoding Genes' in genome_info[10])
self.assertTrue(genome_info[10]['Number of Protein Encoding Genes'].isdigit())
self.assertTrue('Size' in genome_info[10])
self.assertTrue(genome_info[10]['Size'].isdigit())
self.assertEqual(genome_info[10]['Taxonomy'], 'Unconfirmed Organism: unknown_taxon')
def test_simple_fasta_gff_to_genome(self):
input_params = {
'fasta_file': {'path': self.fa_path},
'gff_file': {'path': self.gff_path},
'genome_name': 'Plant',
'workspace_name': self.getWsName(),
'source': 'Genbank',
'type': 'Reference',
'scientific_name': 'Populus trichocarpa'
}
result = self.getImpl().fasta_gff_to_genome(self.getContext(), input_params)[0]
self.assertTrue('genome_info' in result)
self.assertTrue('genome_ref' in result)
genome_info = result['genome_info']
self.assertEqual(genome_info[10]['Number of Protein Encoding Genes'], '1028')
self.assertEqual(genome_info[10]['Domain'], 'Eukaryota')
self.assertEqual(genome_info[10]['Genetic code'], '11')
self.assertEqual(genome_info[10]['Name'], 'Populus trichocarpa')
self.assertEqual(genome_info[10]['Source'], 'Genbank')
self.assertTrue('GC content' in genome_info[10])
self.assertTrue(re.match("^\d+?\.\d+?$", genome_info[10]['GC content']) is not None)
self.assertTrue('Number of Protein Encoding Genes' in genome_info[10])
self.assertTrue(genome_info[10]['Number of Protein Encoding Genes'].isdigit())
self.assertTrue('Size' in genome_info[10])
self.assertTrue(genome_info[10]['Size'].isdigit())
self.assertEqual(genome_info[10]['Taxonomy'],
'cellular organisms; Eukaryota; Viridiplantae; Streptophyta; ' +
'Streptophytina; Embryophyta; Tracheophyta; Euphyllophyta; ' +
'Spermatophyta; Magnoliophyta; Mesangiospermae; eudicotyledons; ' +
'Gunneridae; Pentapetalae; rosids; fabids; Malpighiales; Salicaceae; ' +
'Saliceae; Populus')
def test_taxon_reference_fasta_gff_to_genome(self):
taxon_wsname = 'ReferenceTaxons'
taxon_object_name = "unknown_taxon"
taxon_info = self.dfu.get_objects({'object_refs': [taxon_wsname+"/"+taxon_object_name],
'ignore_errors': 0})['data'][0]
taxon_reference = "{}/{}/{}".format(taxon_info['info'][6],
taxon_info['info'][0],
taxon_info['info'][4])
input_params = {
'fasta_file': {'path': self.fa_path},
'gff_file': {'path': self.gff_path},
'genome_name': 'MyGenome',
'workspace_name': self.getWsName(),
'source': 'Genbank',
'taxon_reference': taxon_reference,
'type': 'Reference',
'genetic_code': 1
}
result = self.getImpl().fasta_gff_to_genome(self.getContext(), input_params)[0]
self.assertEqual(result['genome_info'][10]['Genetic code'], '1')
self.check_CDS_warnings(result,"test_taxon_reference_fasta_gff_to_genome")
def test_shock_fasta_gff_to_genome(self):
gff_shock_id = self.dfu.file_to_shock({'file_path': self.gff_path})['shock_id']
fa_shock_id = self.dfu.file_to_shock({'file_path': self.fa_path})['shock_id']
input_params = {
'workspace_name': self.getWsName(),
'genome_name': 'MyGenome',
'fasta_file': {'shock_id': fa_shock_id},
'gff_file': {'shock_id': gff_shock_id},
'source': 'Genbank',
'type': 'Reference'
}
result = self.getImpl().fasta_gff_to_genome(self.getContext(), input_params)[0]
self.check_minimal_items_exist(result)
self.check_CDS_warnings(result,"test_shock_fasta_gff_to_genome")
def test_fungal_fasta_gff_to_genome(self):
input_params = {
'workspace_name': self.getWsName(),
'genome_name': 'jgi_fungal',
'fasta_file': {'path': self.fungal_fa_path},
'gff_file': {'path': self.fungal_gff_path},
'source': 'Genbank',
'type': 'Reference'
}
result = self.getImpl().fasta_gff_to_genome(self.getContext(), input_params)[0]
self.check_minimal_items_exist(result)
self.check_CDS_warnings(result,"test_fungal_fasta_gff_to_genome")
def test_jgi_bacterial_fasta_gff_to_genome(self):
input_params = {
'workspace_name': self.getWsName(),
'genome_name': 'jgi_bacterial',
'fasta_file': {'path': self.jgi_bacterial_fa_path},
'gff_file': {'path': self.jgi_bacterial_gff_path},
'source': 'Genbank',
'type': 'Reference',
'generate_missing_genes': 1
}
result = self.getImpl().fasta_gff_to_genome(self.getContext(), input_params)[0]
self.check_minimal_items_exist(result)
self.check_CDS_warnings(result,"test_jgi_bacterial_fasta_gff_to_genome")
def test_jgi_bacterial_fasta_gff2_to_genome(self):
input_params = {
'workspace_name': self.getWsName(),
'genome_name': 'jgi_bacterial2',
'fasta_file': {'path': self.jgi_bacterial_fa2_path},
'gff_file': {'path': self.jgi_bacterial_gff2_path},
'source': 'Genbank',
'type': 'Reference',
'generate_missing_genes': 1
}
result = self.getImpl().fasta_gff_to_genome(self.getContext(), input_params)[0]
self.check_minimal_items_exist(result)
self.check_CDS_warnings(result,"test_jgi_bacterial_fasta_gff2_to_genome")
def test_refseq_bacterial_fasta_gff_to_genome(self):
input_params = {
'workspace_name': self.getWsName(),
'genome_name': 'refseq',
'fasta_file': {'path': self.refseq_bacterial_fa_path},
'gff_file': {'path': self.refseq_bacterial_gff_path},
'source': 'Genbank',
'type': 'Reference'
}
result = self.getImpl().fasta_gff_to_genome(self.getContext(), input_params)[0]
self.check_minimal_items_exist(result)
self.check_CDS_warnings(result,"test_refseq_bacterial_fasta_gff_to_genome")
def test_patric_bacterial_fasta_gff_to_genome(self):
input_params = {
'workspace_name': self.getWsName(),
'genome_name': 'patric_bacterial',
'fasta_file': {'path': self.patric_bacterial_fa_path},
'gff_file': {'path': self.patric_bacterial_gff_path},
'source': 'Genbank',
'type': 'Reference',
'generate_missing_genes': 1
}
result = self.getImpl().fasta_gff_to_genome(self.getContext(), input_params)[0]
self.check_minimal_items_exist(result)
self.check_CDS_warnings(result,"test_patric_bacterial_fasta_gff_to_genome")"""
def test_bad_fasta_gff_to_genome_params(self):
invalidate_input_params = {
'missing_workspace_name': 'workspace_name',
'genome_name': 'genome_name',
'fasta_file': {'path': 'fasta_file'},
'gff_file': {'path': 'gff_file'}
}
with self.assertRaisesRegex(
ValueError,
'"workspace_name" parameter is required, but missing'):
self.getImpl().fasta_gff_to_genome(self.getContext(), invalidate_input_params)
invalidate_input_params = {
'workspace_name': 'workspace_name',
'missing_genome_name': 'genome_name',
'fasta_file': {'path': 'fasta_file'},
'gff_file': {'path': 'gff_file'}
}
with self.assertRaisesRegex(
ValueError,
'"genome_name" parameter is required, but missing'):
self.getImpl().fasta_gff_to_genome(self.getContext(), invalidate_input_params)
invalidate_input_params = {
'workspace_name': 'workspace_name',
'genome_name': 'genome_name',
'missing_fasta_file': {'path': 'fasta_file'},
'gff_file': {'path': 'gff_file'}
}
with self.assertRaisesRegex(
ValueError,
'"fasta_file" parameter is required, but missing'):
self.getImpl().fasta_gff_to_genome(self.getContext(), invalidate_input_params)
invalidate_input_params = {
'workspace_name': 'workspace_name',
'genome_name': 'genome_name',
'fasta_file': {'path': 'fasta_file'},
'missing_gff_file': {'path': 'gff_file'}
}
with self.assertRaisesRegex(
ValueError,
'"gff_file" parameter is required, but missing'):
self.getImpl().fasta_gff_to_genome(self.getContext(), invalidate_input_params)
invalidate_input_params = {
'workspace_name': 'workspace_name',
'genome_name': 'genome_name',
'fasta_file': 'not_a_dict',
'gff_file': {'path': 'gff_file'}
}
with self.assertRaisesRegex(
ValueError,
'Required "fasta_file" field must be a map/dict'):
self.getImpl().fasta_gff_to_genome(self.getContext(), invalidate_input_params)
invalidate_input_params = {
'workspace_name': 'workspace_name',
'genome_name': 'genome_name',
'fasta_file': {'path': 'fasta_file'},
'gff_file': 'not_a_dict'
}
with self.assertRaisesRegex(
ValueError,
'Required "gff_file" field must be a map/dict'):
self.getImpl().fasta_gff_to_genome(self.getContext(), invalidate_input_params)
invalidate_input_params = {
'workspace_name': 'workspace_name',
'genome_name': 'genome_name',
'fasta_file': {'path': 'fasta_file'},
'gff_file': {'ftp_url': 'gff_file'}
}
with self.assertRaisesRegex(
ValueError,
'must include one source'):
self.getImpl().fasta_gff_to_genome(self.getContext(), invalidate_input_params)
invalidate_input_params = {
'workspace_name': 'workspace_name',
'genome_name': 'genome_name',
'fasta_file': {'missing_path': 'fasta_file'},
'gff_file': {'path': 'gff_file'}
}
with self.assertRaisesRegex(
ValueError,
'Required "fasta_file" field must include one source: path | shock_id'):
self.getImpl().fasta_gff_to_genome(self.getContext(), invalidate_input_params)
invalidate_input_params = {
'workspace_name': 'workspace_name',
'genome_name': 'genome_name',
'fasta_file': {'path': 'fasta_file', 'shock_id': 'shock_id'},
'gff_file': {'path': 'gff_file'}
}
with self.assertRaisesRegex(
ValueError,
'Required "fasta_file" field has too many sources specified: '):
self.getImpl().fasta_gff_to_genome(self.getContext(), invalidate_input_params)
invalidate_input_params = {
'workspace_name': 'workspace_name',
'genome_name': 'genome_name',
'fasta_file': {'path': 'fasta_file'},
'gff_file': {'path': 'gff_file'},
'genetic_code': 'meh'
}
with self.assertRaisesRegex(
ValueError,
'Invalid genetic code specified'):
self.getImpl().fasta_gff_to_genome(self.getContext(), invalidate_input_params)
invalidate_input_params = {
'workspace_name': self.getWsName(),
'genome_name': 'MyGenome',
'fasta_file': {'path': self.patric_bacterial_fa_path},
'gff_file': {'path': self.patric_bacterial_gff_path},
'source': 'Genbank',
'type': 'Reference',
}
with self.assertRaisesRegex(
ValueError, "generate_missing_genes"):
self.getImpl().fasta_gff_to_genome(self.getContext(),
invalidate_input_params)
"""
def test_FastaGFFToGenome_stage_input(self):
# test absolute file path
input_params = {
'workspace_name': self.getWsName(),
'genome_name': 'MyGenome',
'fasta_file': {'path': self.fa_path},
'gff_file': {'path': self.gff_path},
}
input_directory = os.path.join(self.scratch, 'test_FastaGFFToGenome_stage_input')
os.makedirs(input_directory)
file_paths = self.importer._stage_input(input_params, input_directory)
self.assertTrue(self.gff_filename.rpartition('.')[0] in os.listdir(input_directory))
self.assertTrue(self.fa_filename.rpartition('.')[0] in os.listdir(input_directory))
self.assertTrue('gff_file' in file_paths)
self.assertTrue('fasta_file' in file_paths)
self.assertEqual(file_paths.get('gff_file'),
os.path.join(input_directory, self.gff_filename).rpartition('.')[0])
self.assertEqual(file_paths.get('fasta_file'),
os.path.join(input_directory, self.fa_filename).rpartition('.')[0])
# test shock id
gff_shock_id = self.dfu.file_to_shock({'file_path': self.gff_path})['shock_id']
fa_shock_id = self.dfu.file_to_shock({'file_path': self.fa_path})['shock_id']
input_params = {
'workspace_name': self.getWsName(),
'genome_name': 'MyGenome',
'fasta_file': {'shock_id': fa_shock_id},
'gff_file': {'shock_id': gff_shock_id},
}
shutil.rmtree(input_directory)
os.makedirs(input_directory)
file_paths = self.importer._stage_input(input_params, input_directory)
self.assertTrue(self.gff_filename.rpartition('.')[0] in os.listdir(input_directory))
self.assertTrue(self.fa_filename.rpartition('.')[0] in os.listdir(input_directory))
self.assertTrue('gff_file' in file_paths)
self.assertTrue('fasta_file' in file_paths)
self.assertEqual(file_paths.get('gff_file'),
os.path.join(input_directory, self.gff_filename).rpartition('.')[0])
self.assertEqual(file_paths.get('fasta_file'),
os.path.join(input_directory, self.fa_filename).rpartition('.')[0])
def test_FastaGFFToGenome_set_parsed_params(self):
input_params = {
'workspace_name': self.getWsName(),
'genome_name': 'MyGenome',
'fasta_file': {'path': self.fa_path},
'gff_file': {'path': self.gff_path},
'source': 'Genbank',
'type': 'Reference'
}
parsed_params = self.importer._set_parsed_params(input_params)
expect_param_keys = {'source', 'taxon_wsname', 'taxon_reference', 'release',
'type', 'metadata', 'workspace_name', 'source_id',
'genome_name', 'scientific_name', 'gff_file', 'fasta_file'}
self.assertEqual(set(parsed_params.keys()), expect_param_keys)
self.assertEqual(parsed_params['genome_name'], 'MyGenome')
self.assertEqual(parsed_params['source'], 'Genbank')
self.assertEqual(parsed_params['type'], 'Reference')
def test_FastaGFFToGenome_retrieve_gff_file(self):
input_gff_file = self.dfu.unpack_file({'file_path': self.gff_path})['file_path']
feature_list = self.importer._retrieve_gff_file(input_gff_file)
self.assertIsInstance(feature_list, dict)
def test_update_phytozome(self):
features_identifiers_list = {'Chr01': [{'end': 8201443,
'Name': 'Potri.001G102800',
'start': 8200895,
'score': '.',
'phase': '.',
'contig': 'Chr01',
'type': 'gene',
'ID': 'Potri.001G102800.v3.0',
'strand': '-'},
{'end': 8201443,
'Name': 'Potri.001G102800.1',
'Parent': 'Potri.001G102800.v3.0',
'pacid': '27047128',
'start': 8200895,
'score': '.',
'longest': '1',
'phase': '.',
'contig': 'Chr01',
'type': 'mRNA',
'ID': 'Potri.001G102800.1.v3.0',
'strand': '-'},
{'end': 8201443,
'Parent': 'Potri.001G102800.1.v3.0',
'pacid': '27047128',
'start': 8200895,
'score': '.',
'phase': '0',
'contig': 'Chr01',
'type': 'CDS',
'ID': 'Potri.001G102800.1.v3.0.CDS.1',
'strand': '-'}]}
updated_features_list = self.importer._update_identifiers(
features_identifiers_list)
self.assertEqual(updated_features_list['Chr01'][-1]['ID'],
'Potri.001G102800.1.v3.0.CDS')
def test_FastaGFFToGenome_update_feature_identifiers(self):
features_identifiers_list = {'Chr01':[{'end': 8201443,
'Name': 'Potri.001G102800',
'start': 8200895,
'score': '.',
'phase': '.',
'contig': 'Chr01',
'type': 'gene',
'ID': 'Potri.001G102800.v3.0',
'strand': '-'},
{'end': 8201443,
'Name': 'Potri.001G102800.1',
'Parent': 'Potri.001G102800.v3.0',
'pacid': '27047128',
'start': 8200895,
'score': '.',
'longest': '1',
'phase': '.',
'contig': 'Chr01',
'type': 'mRNA',
'ID': 'Potri.001G102800.1.v3.0',
'strand': '-'},
{'end': 8201443,
'Parent': 'Potri.001G102800.1.v3.0',
'pacid': '27047128',
'start': 8200895,
'score': '.',
'phase': '0',
'contig': 'Chr01',
'type': 'CDS',
'ID': 'Potri.001G102800.1.v3.0.CDS',
'strand': '-'}]}
updated_features_list = self.importer._update_identifiers(features_identifiers_list)
self.assertEqual(updated_features_list, features_identifiers_list)"""
| mit | 3,324,510,919,945,438,000 | 45.800926 | 117 | 0.536618 | false |
chienlieu2017/it_management | odoo/addons/mail/tests/test_mail_message.py | 1 | 15183 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import itertools
from odoo.addons.mail.tests.common import TestMail
from odoo.exceptions import AccessError, except_orm
from odoo.tools import mute_logger
class TestMailMessage(TestMail):
def setUp(self):
super(TestMailMessage, self).setUp()
self.group_private = self.env['mail.channel'].with_context({
'mail_create_nolog': True,
'mail_create_nosubscribe': True
}).create({
'name': 'Private',
'public': 'private'}
).with_context({'mail_create_nosubscribe': False})
self.message = self.env['mail.message'].create({
'body': 'My Body',
'model': 'mail.channel',
'res_id': self.group_private.id,
})
def test_mail_message_values_basic(self):
self.env['ir.config_parameter'].search([('key', '=', 'mail.catchall.domain')]).unlink()
msg = self.env['mail.message'].sudo(self.user_employee).create({
'reply_to': '[email protected]',
'email_from': '[email protected]',
})
self.assertIn('-private', msg.message_id, 'mail_message: message_id for a void message should be a "private" one')
self.assertEqual(msg.reply_to, '[email protected]')
self.assertEqual(msg.email_from, '[email protected]')
def test_mail_message_values_default(self):
self.env['ir.config_parameter'].search([('key', '=', 'mail.catchall.domain')]).unlink()
msg = self.env['mail.message'].sudo(self.user_employee).create({})
self.assertIn('-private', msg.message_id, 'mail_message: message_id for a void message should be a "private" one')
self.assertEqual(msg.reply_to, '%s <%s>' % (self.user_employee.name, self.user_employee.email))
self.assertEqual(msg.email_from, '%s <%s>' % (self.user_employee.name, self.user_employee.email))
def test_mail_message_values_alias(self):
alias_domain = 'example.com'
self.env['ir.config_parameter'].set_param('mail.catchall.domain', alias_domain)
self.env['ir.config_parameter'].search([('key', '=', 'mail.catchall.alias')]).unlink()
msg = self.env['mail.message'].sudo(self.user_employee).create({})
self.assertIn('-private', msg.message_id, 'mail_message: message_id for a void message should be a "private" one')
self.assertEqual(msg.reply_to, '%s <%s>' % (self.user_employee.name, self.user_employee.email))
self.assertEqual(msg.email_from, '%s <%s>' % (self.user_employee.name, self.user_employee.email))
def test_mail_message_values_alias_catchall(self):
alias_domain = 'example.com'
alias_catchall = 'pokemon'
self.env['ir.config_parameter'].set_param('mail.catchall.domain', alias_domain)
self.env['ir.config_parameter'].set_param('mail.catchall.alias', alias_catchall)
msg = self.env['mail.message'].sudo(self.user_employee).create({})
self.assertIn('-private', msg.message_id, 'mail_message: message_id for a void message should be a "private" one')
self.assertEqual(msg.reply_to, '%s <%s@%s>' % (self.env.user.company_id.name, alias_catchall, alias_domain))
self.assertEqual(msg.email_from, '%s <%s>' % (self.user_employee.name, self.user_employee.email))
def test_mail_message_values_document_no_alias(self):
self.env['ir.config_parameter'].search([('key', '=', 'mail.catchall.domain')]).unlink()
msg = self.env['mail.message'].sudo(self.user_employee).create({
'model': 'mail.channel',
'res_id': self.group_pigs.id
})
self.assertIn('-openerp-%d-mail.channel' % self.group_pigs.id, msg.message_id, 'mail_message: message_id for a void message should be a "private" one')
self.assertEqual(msg.reply_to, '%s <%s>' % (self.user_employee.name, self.user_employee.email))
self.assertEqual(msg.email_from, '%s <%s>' % (self.user_employee.name, self.user_employee.email))
def test_mail_message_values_document_alias(self):
alias_domain = 'example.com'
self.env['ir.config_parameter'].set_param('mail.catchall.domain', alias_domain)
self.env['ir.config_parameter'].search([('key', '=', 'mail.catchall.alias')]).unlink()
msg = self.env['mail.message'].sudo(self.user_employee).create({
'model': 'mail.channel',
'res_id': self.group_pigs.id
})
self.assertIn('-openerp-%d-mail.channel' % self.group_pigs.id, msg.message_id, 'mail_message: message_id for a void message should be a "private" one')
self.assertEqual(msg.reply_to, '%s %s <%s@%s>' % (self.env.user.company_id.name, self.group_pigs.name, self.group_pigs.alias_name, alias_domain))
self.assertEqual(msg.email_from, '%s <%s>' % (self.user_employee.name, self.user_employee.email))
def test_mail_message_values_document_alias_catchall(self):
alias_domain = 'example.com'
alias_catchall = 'pokemon'
self.env['ir.config_parameter'].set_param('mail.catchall.domain', alias_domain)
self.env['ir.config_parameter'].set_param('mail.catchall.alias', alias_catchall)
msg = self.env['mail.message'].sudo(self.user_employee).create({
'model': 'mail.channel',
'res_id': self.group_pigs.id
})
self.assertIn('-openerp-%d-mail.channel' % self.group_pigs.id, msg.message_id, 'mail_message: message_id for a void message should be a "private" one')
self.assertEqual(msg.reply_to, '%s %s <%s@%s>' % (self.env.user.company_id.name, self.group_pigs.name, self.group_pigs.alias_name, alias_domain))
self.assertEqual(msg.email_from, '%s <%s>' % (self.user_employee.name, self.user_employee.email))
def test_mail_message_values_no_auto_thread(self):
msg = self.env['mail.message'].sudo(self.user_employee).create({
'model': 'mail.channel',
'res_id': self.group_pigs.id,
'no_auto_thread': True,
})
self.assertIn('reply_to', msg.message_id)
self.assertNotIn('mail.channel', msg.message_id)
self.assertNotIn('-%d-' % self.group_pigs.id, msg.message_id)
def test_mail_message_notify_from_mail_mail(self):
# Due ot post-commit hooks, store send emails in every step
self.email_to_list = []
mail = self.env['mail.mail'].create({
'body_html': '<p>Test</p>',
'email_to': '[email protected]',
'partner_ids': [(4, self.user_employee.partner_id.id)]
})
self.email_to_list.extend(itertools.chain.from_iterable(sent_email['email_to'] for sent_email in self._mails if sent_email.get('email_to')))
self.assertNotIn(u'Ernest Employee <[email protected]>', self.email_to_list)
mail.send()
self.email_to_list.extend(itertools.chain.from_iterable(sent_email['email_to'] for sent_email in self._mails if sent_email.get('email_to')))
self.assertNotIn(u'Ernest Employee <[email protected]>', self.email_to_list)
self.assertIn(u'[email protected]', self.email_to_list)
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_mail_message_access_search(self):
# Data: various author_ids, partner_ids, documents
msg1 = self.env['mail.message'].create({
'subject': '_Test', 'body': 'A', 'subtype_id': self.ref('mail.mt_comment')})
msg2 = self.env['mail.message'].create({
'subject': '_Test', 'body': 'A+B', 'subtype_id': self.ref('mail.mt_comment'),
'partner_ids': [(6, 0, [self.user_public.partner_id.id])]})
msg3 = self.env['mail.message'].create({
'subject': '_Test', 'body': 'A Pigs', 'subtype_id': False,
'model': 'mail.channel', 'res_id': self.group_pigs.id})
msg4 = self.env['mail.message'].create({
'subject': '_Test', 'body': 'A+P Pigs', 'subtype_id': self.ref('mail.mt_comment'),
'model': 'mail.channel', 'res_id': self.group_pigs.id,
'partner_ids': [(6, 0, [self.user_public.partner_id.id])]})
msg5 = self.env['mail.message'].create({
'subject': '_Test', 'body': 'A+E Pigs', 'subtype_id': self.ref('mail.mt_comment'),
'model': 'mail.channel', 'res_id': self.group_pigs.id,
'partner_ids': [(6, 0, [self.user_employee.partner_id.id])]})
msg6 = self.env['mail.message'].create({
'subject': '_Test', 'body': 'A Birds', 'subtype_id': self.ref('mail.mt_comment'),
'model': 'mail.channel', 'res_id': self.group_private.id})
msg7 = self.env['mail.message'].sudo(self.user_employee).create({
'subject': '_Test', 'body': 'B', 'subtype_id': self.ref('mail.mt_comment')})
msg8 = self.env['mail.message'].sudo(self.user_employee).create({
'subject': '_Test', 'body': 'B+E', 'subtype_id': self.ref('mail.mt_comment'),
'partner_ids': [(6, 0, [self.user_employee.partner_id.id])]})
# Test: Public: 2 messages (recipient)
messages = self.env['mail.message'].sudo(self.user_public).search([('subject', 'like', '_Test')])
self.assertEqual(messages, msg2 | msg4)
# Test: Employee: 3 messages on Pigs Raoul can read (employee can read group with default values)
messages = self.env['mail.message'].sudo(self.user_employee).search([('subject', 'like', '_Test'), ('body', 'ilike', 'A')])
self.assertEqual(messages, msg3 | msg4 | msg5)
# Test: Raoul: 3 messages on Pigs Raoul can read (employee can read group with default values), 0 on Birds (private group) + 2 messages as author
messages = self.env['mail.message'].sudo(self.user_employee).search([('subject', 'like', '_Test')])
self.assertEqual(messages, msg3 | msg4 | msg5 | msg7 | msg8)
# Test: Admin: all messages
messages = self.env['mail.message'].search([('subject', 'like', '_Test')])
self.assertEqual(messages, msg1 | msg2 | msg3 | msg4 | msg5 | msg6 | msg7 | msg8)
# Test: Portal: 0 (no access to groups, not recipient)
messages = self.env['mail.message'].sudo(self.user_portal).search([('subject', 'like', '_Test')])
self.assertFalse(messages)
# Test: Portal: 2 messages (public group with a subtype)
self.group_pigs.write({'public': 'public'})
messages = self.env['mail.message'].sudo(self.user_portal).search([('subject', 'like', '_Test')])
self.assertEqual(messages, msg4 | msg5)
@mute_logger('odoo.addons.base.ir.ir_model', 'odoo.models')
def test_mail_message_access_read_crash(self):
# TODO: Change the except_orm to Warning ( Because here it's call check_access_rule
# which still generate exception in except_orm.So we need to change all
# except_orm to warning in mail module.)
with self.assertRaises(except_orm):
self.message.sudo(self.user_employee).read()
@mute_logger('odoo.models')
def test_mail_message_access_read_crash_portal(self):
with self.assertRaises(except_orm):
self.message.sudo(self.user_portal).read(['body', 'message_type', 'subtype_id'])
def test_mail_message_access_read_ok_portal(self):
self.message.write({'subtype_id': self.ref('mail.mt_comment'), 'res_id': self.group_public.id})
self.message.sudo(self.user_portal).read(['body', 'message_type', 'subtype_id'])
def test_mail_message_access_read_notification(self):
attachment = self.env['ir.attachment'].create({
'datas': 'My attachment'.encode('base64'),
'name': 'doc.txt',
'datas_fname': 'doc.txt'})
# attach the attachment to the message
self.message.write({'attachment_ids': [(4, attachment.id)]})
self.message.write({'partner_ids': [(4, self.user_employee.partner_id.id)]})
self.message.sudo(self.user_employee).read()
# Test: Bert has access to attachment, ok because he can read message
attachment.sudo(self.user_employee).read(['name', 'datas'])
def test_mail_message_access_read_author(self):
self.message.write({'author_id': self.user_employee.partner_id.id})
self.message.sudo(self.user_employee).read()
def test_mail_message_access_read_doc(self):
self.message.write({'model': 'mail.channel', 'res_id': self.group_public.id})
# Test: Bert reads the message, ok because linked to a doc he is allowed to read
self.message.sudo(self.user_employee).read()
@mute_logger('odoo.addons.base.ir.ir_model')
def test_mail_message_access_create_crash_public(self):
# Do: Bert creates a message on Pigs -> ko, no creation rights
with self.assertRaises(AccessError):
self.env['mail.message'].sudo(self.user_public).create({'model': 'mail.channel', 'res_id': self.group_pigs.id, 'body': 'Test'})
# Do: Bert create a message on Jobs -> ko, no creation rights
with self.assertRaises(AccessError):
self.env['mail.message'].sudo(self.user_public).create({'model': 'mail.channel', 'res_id': self.group_public.id, 'body': 'Test'})
@mute_logger('odoo.models')
def test_mail_message_access_create_crash(self):
# Do: Bert create a private message -> ko, no creation rights
with self.assertRaises(except_orm):
self.env['mail.message'].sudo(self.user_employee).create({'model': 'mail.channel', 'res_id': self.group_private.id, 'body': 'Test'})
@mute_logger('odoo.models')
def test_mail_message_access_create_doc(self):
# TODO Change the except_orm to Warning
Message = self.env['mail.message'].sudo(self.user_employee)
# Do: Raoul creates a message on Jobs -> ok, write access to the related document
Message.create({'model': 'mail.channel', 'res_id': self.group_public.id, 'body': 'Test'})
# Do: Raoul creates a message on Priv -> ko, no write access to the related document
with self.assertRaises(except_orm):
Message.create({'model': 'mail.channel', 'res_id': self.group_private.id, 'body': 'Test'})
def test_mail_message_access_create_private(self):
self.env['mail.message'].sudo(self.user_employee).create({'body': 'Test'})
def test_mail_message_access_create_reply(self):
self.message.write({'partner_ids': [(4, self.user_employee.partner_id.id)]})
self.env['mail.message'].sudo(self.user_employee).create({'model': 'mail.channel', 'res_id': self.group_private.id, 'body': 'Test', 'parent_id': self.message.id})
def test_message_set_star(self):
msg = self.group_pigs.message_post(body='My Body', subject='1')
msg_emp = self.env['mail.message'].sudo(self.user_employee).browse(msg.id)
# Admin set as starred
msg.toggle_message_starred()
self.assertTrue(msg.starred)
# Employee set as starred
msg_emp.toggle_message_starred()
self.assertTrue(msg_emp.starred)
# Do: Admin unstars msg
msg.toggle_message_starred()
self.assertFalse(msg.starred)
self.assertTrue(msg_emp.starred)
| gpl-3.0 | 4,789,235,050,100,425,000 | 55.233333 | 170 | 0.627742 | false |
davisd50/sparc.i18n | setup.py | 1 | 1392 | from setuptools import setup, find_packages
import os
version = '0.0.1'
setup(name='sparc.i18n',
version=version,
description="i18n components for the SPARC platform",
long_description=open("README.md").read() + "\n" +
open("HISTORY.txt").read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
],
keywords=['zca'],
author='David Davis',
author_email='[email protected]',
url='https://github.com/davisd50/sparc.i18n',
download_url = '',
license='MIT',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['sparc'],
include_package_data=True,
package_data = {
'': ['*.zcml']
},
zip_safe=False,
install_requires=[
'setuptools',
'zope.interface',
'zope.component',
'zope.i18nmessageid'
# -*- Extra requirements: -*-
],
tests_require=[
'sparc.testing'
],
entry_points="""
# -*- Entry points: -*-
""",
)
| mit | 7,376,492,356,747,145,000 | 28.617021 | 60 | 0.54023 | false |
kalkin/qubes-core-admin | qubes/vm/qubesvm.py | 1 | 65272 | #
# The Qubes OS Project, https://www.qubes-os.org/
#
# Copyright (C) 2010-2015 Joanna Rutkowska <[email protected]>
# Copyright (C) 2013-2015 Marek Marczykowski-Górecki
# <[email protected]>
# Copyright (C) 2014-2015 Wojtek Porczyk <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import absolute_import
import asyncio
import base64
import datetime
import errno
import grp
import os
import os.path
import shutil
import string
import subprocess
import uuid
import warnings
import libvirt # pylint: disable=import-error
import lxml
import qubes
import qubes.config
import qubes.exc
import qubes.storage
import qubes.storage.file
import qubes.utils
import qubes.vm
import qubes.vm.mix.net
qmemman_present = False
try:
import qubes.qmemman.client # pylint: disable=wrong-import-position
qmemman_present = True
except ImportError:
pass
MEM_OVERHEAD_BASE = (3 + 1) * 1024 * 1024
MEM_OVERHEAD_PER_VCPU = 3 * 1024 * 1024 / 2
def _setter_qid(self, prop, value):
''' Helper for setting the domain qid '''
# pylint: disable=unused-argument
value = int(value)
if not 0 <= value <= qubes.config.max_qid:
raise ValueError(
'{} value must be between 0 and qubes.config.max_qid'.format(
prop.__name__))
return value
def _setter_kernel(self, prop, value):
''' Helper for setting the domain kernel and running sanity checks on it.
''' # pylint: disable=unused-argument
if not value:
return ''
value = str(value)
if '/' in value:
raise qubes.exc.QubesPropertyValueError(self, prop, value,
'Kernel name cannot contain \'/\'')
return value
def _setter_positive_int(self, prop, value):
''' Helper for setting a positive int. Checks that the int is >= 0 '''
# pylint: disable=unused-argument
value = int(value)
if value <= 0:
raise ValueError('Value must be positive')
return value
def _setter_default_user(self, prop, value):
''' Helper for setting default user '''
value = str(value)
# specifically forbid: ':', ' ', ''', '"'
allowed_chars = string.ascii_letters + string.digits + '_-+,.'
if not all(c in allowed_chars for c in value):
raise qubes.exc.QubesPropertyValueError(self, prop, value,
'Username can contain only those characters: ' + allowed_chars)
return value
def _setter_virt_mode(self, prop, value):
value = str(value)
value = value.lower()
if value not in ('hvm', 'pv'):
raise qubes.exc.QubesPropertyValueError(self, prop, value,
'Invalid virtualization mode, supported values: hvm, pv')
return value
class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
'''Base functionality of Qubes VM shared between all VMs.
The following events are raised on this class or its subclasses:
.. event:: domain-init (subject, event)
Fired at the end of class' constructor.
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-init'``)
.. event:: domain-load (subject, event)
Fired after the qube was loaded from :file:`qubes.xml`
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-loaded'``)
.. event:: domain-pre-start \
(subject, event, start_guid, mem_required)
Fired at the beginning of :py:meth:`start` method.
Handler for this event can be asynchronous (a coroutine).
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-pre-start'``)
*other arguments are as in :py:meth:`start`*
.. event:: domain-spawn (subject, event, start_guid)
Fired after creating libvirt domain.
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-spawn'``)
Handler for this event can be asynchronous (a coroutine).
*other arguments are as in :py:meth:`start`*
.. event:: domain-start (subject, event, start_guid)
Fired at the end of :py:meth:`start` method.
Handler for this event can be asynchronous (a coroutine).
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-start'``)
*other arguments are as in :py:meth:`start`*
.. event:: domain-shutdown (subject, event)
Fired when domain has been shut down.
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-shutdown'``)
.. event:: domain-pre-shutdown (subject, event, force)
Fired at the beginning of :py:meth:`shutdown` method.
Handler for this event can be asynchronous (a coroutine).
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-pre-shutdown'``)
:param force: If the shutdown is to be forceful
.. event:: domain-cmd-pre-run (subject, event, start_guid)
Fired at the beginning of :py:meth:`run_service` method.
Handler for this event can be asynchronous (a coroutine).
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-cmd-pre-run'``)
:param start_guid: If the gui daemon can be started
.. event:: domain-create-on-disk (subject, event)
Fired at the end of :py:meth:`create_on_disk` method.
Handler for this event can be asynchronous (a coroutine).
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-create-on-disk'``)
.. event:: domain-remove-from-disk (subject, event)
Fired at the beginning of :py:meth:`remove_from_disk` method, before
the qube directory is removed.
Handler for this event can be asynchronous (a coroutine).
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-remove-from-disk'``)
.. event:: domain-clone-files (subject, event, src)
Fired at the end of :py:meth:`clone_disk_files` method.
Handler for this event can be asynchronous (a coroutine).
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-clone-files'``)
:param src: source qube
.. event:: domain-verify-files (subject, event)
Fired at the end of :py:meth:`clone_disk_files` method.
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-verify-files'``)
If you think some files are missing or damaged, raise an exception.
.. event:: domain-is-fully-usable (subject, event)
Fired at the end of :py:meth:`clone_disk_files` method.
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-is-fully-usable'``)
You may ``yield False`` from the handler if you think the qube is
not fully usable. This will cause the domain to be in "transient"
state in the domain lifecycle.
.. event:: domain-qdb-create (subject, event)
Fired at the end of :py:meth:`create_qdb_entries` method.
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-qdb-create'``)
This event is a good place to add your custom entries to the qdb.
.. event:: domain-qdb-change:watched-path (subject, event, path)
Fired when watched QubesDB entry is changed. See
:py:meth:`watch_qdb_path`. *watched-path* part of event name is
what path was registered for watching, *path* in event argument
is what actually have changed (which may be different if watching a
directory, i.e. a path with `/` at the end).
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-qdb-change'``)
:param path: changed QubesDB path
.. event:: backup-get-files (subject, event)
Collects additional file to be included in a backup.
:param subject: Event emitter (the qube object)
:param event: Event name (``'backup-get-files'``)
Handlers should yield paths of the files.
.. event:: domain-restore (subject, event)
Domain was just restored from backup, although the storage was not
yet verified and the app object was not yet saved.
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-restore'``)
.. event:: domain-feature-set (subject, event, feature, value
[, oldvalue])
A feature was changed.
*oldvalue* is present only when there was any.
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-feature-set'``)
:param feature: feature name
:param value: new value
:param oldvalue: old value, if any
.. event:: domain-feature-delete (subject, event, feature)
A feature was removed.
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-feature-delete'``)
:param feature: feature name
.. event:: domain-tag-add (subject, event, tag)
A tag was added.
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-tag-add'``)
:param tag: tag name
.. event:: domain-tag-delete (subject, event, tag)
A feature was removed.
:param subject: Event emitter (the qube object)
:param event: Event name (``'domain-tag-delete'``)
:param tag: tag name
.. event:: feature-request (subject, event, *, untrusted_features)
The domain is performing a feature request.
:param subject: Event emitter (the qube object)
:param event: Event name (``'feature-request'``)
:param untrusted_features: :py:class:`dict` containing the feature \
request
The content of the `untrusted_features` variable is, as the name
implies, **UNTRUSTED**. The remind this to programmer, the variable
name has to be exactly as provided.
It is up to the extensions to decide, what to do with request,
ranging from plainly ignoring the request to verbatim copy into
:py:attr:`features` with only minimal sanitisation.
.. event:: monitor-layout-change (subject, event, monitor_layout)
Desktop layout was changed, probably because a display was plugged
in or out.
:param subject: Event emitter (the qube object)
:param event: Event name (``'monitor-layout-change'``)
:param monitor_layout: The new layout
.. event:: firewall-changed (subject, event)
Firewall was changed.
:param subject: Event emitter (the qube object)
:param event: Event name (``'firewall-changed'``)
.. event:: net-domain-connect (subject, event, vm)
Fired after connecting a domiain to this vm.
:param subject: Event emitter (the qube object)
:param event: Event name (``'net-domain-connect'``)
:param vm: The domain that was just connected.
On the `vm` object there was probably ``property-set:netvm`` fired
earlier.
'''
#
# per-class properties
#
#: directory in which domains of this class will reside
dir_path_prefix = qubes.config.system_path['qubes_appvms_dir']
#
# properties loaded from XML
#
label = qubes.property('label',
setter=qubes.vm.setter_label,
saver=(lambda self, prop, value: 'label-{}'.format(value.index)),
doc='''Colourful label assigned to VM. This is where the colour of the
padlock is set.''')
# provides_network = qubes.property('provides_network',
# type=bool, setter=qubes.property.bool,
# doc='`True` if it is NetVM or ProxyVM, false otherwise.')
qid = qubes.property('qid', type=int, write_once=True,
setter=_setter_qid,
clone=False,
doc='''Internal, persistent identificator of particular domain. Note
this is different from Xen domid.''')
name = qubes.property('name', type=str, write_once=True,
clone=False,
doc='User-specified name of the domain.')
uuid = qubes.property('uuid', type=uuid.UUID, write_once=True,
clone=False,
doc='UUID from libvirt.')
virt_mode = qubes.property('virt_mode',
type=str, setter=_setter_virt_mode,
default='hvm',
doc='''Virtualisation mode: full virtualisation ("hvm"),
or paravirtualisation ("pv")''')
installed_by_rpm = qubes.property('installed_by_rpm',
type=bool, setter=qubes.property.bool,
default=False,
doc='''If this domain's image was installed from package tracked by
package manager.''')
memory = qubes.property('memory', type=int,
setter=_setter_positive_int,
default=(lambda self:
qubes.config.defaults[
'hvm_memory' if self.virt_mode == 'hvm' else 'memory']),
doc='Memory currently available for this VM.')
maxmem = qubes.property('maxmem', type=int,
setter=_setter_positive_int,
default=(lambda self:
int(min(self.app.host.memory_total / 1024 / 2, 4000))),
doc='''Maximum amount of memory available for this VM (for the purpose
of the memory balancer).''')
stubdom_mem = qubes.property('stubdom_mem', type=int,
setter=_setter_positive_int,
default=None,
doc='Memory ammount allocated for the stubdom')
vcpus = qubes.property('vcpus',
type=int,
setter=_setter_positive_int,
default=2,
doc='Number of virtual CPUs for a qube')
# CORE2: swallowed uses_default_kernel
kernel = qubes.property('kernel', type=str,
setter=_setter_kernel,
default=(lambda self: self.app.default_kernel),
doc='Kernel used by this domain.')
# CORE2: swallowed uses_default_kernelopts
# pylint: disable=no-member
kernelopts = qubes.property('kernelopts', type=str, load_stage=4,
default=(lambda self: qubes.config.defaults['kernelopts_pcidevs']
# pylint: disable=no-member
if list(self.devices['pci'].persistent())
else self.template.kernelopts if hasattr(self, 'template')
else qubes.config.defaults['kernelopts']),
doc='Kernel command line passed to domain.')
debug = qubes.property('debug', type=bool, default=False,
setter=qubes.property.bool,
doc='Turns on debugging features.')
# XXX what this exactly does?
# XXX shouldn't this go to standalone VM and TemplateVM, and leave here
# only plain property?
default_user = qubes.property('default_user', type=str,
# pylint: disable=no-member
default=(lambda self: self.template.default_user
if hasattr(self, 'template') else 'user'),
setter=_setter_default_user,
doc='FIXME')
# pylint: enable=no-member
# @property
# def default_user(self):
# if self.template is not None:
# return self.template.default_user
# else:
# return self._default_user
qrexec_timeout = qubes.property('qrexec_timeout', type=int, default=60,
setter=_setter_positive_int,
doc='''Time in seconds after which qrexec connection attempt is deemed
failed. Operating system inside VM should be able to boot in this
time.''')
autostart = qubes.property('autostart', default=False,
type=bool, setter=qubes.property.bool,
doc='''Setting this to `True` means that VM should be autostarted on
dom0 boot.''')
include_in_backups = qubes.property('include_in_backups',
default=True,
type=bool, setter=qubes.property.bool,
doc='If this domain is to be included in default backup.')
# format got changed from %s to str(datetime.datetime)
backup_timestamp = qubes.property('backup_timestamp', default=None,
setter=(lambda self, prop, value:
value if isinstance(value, datetime.datetime) else
datetime.datetime.fromtimestamp(int(value))),
saver=(lambda self, prop, value: value.strftime('%s')),
doc='FIXME')
default_dispvm = qubes.VMProperty('default_dispvm',
load_stage=4,
allow_none=True,
default=(lambda self: self.app.default_dispvm),
doc='Default VM to be used as Disposable VM for service calls.')
updateable = qubes.property('updateable',
default=(lambda self: not hasattr(self, 'template')),
type=bool,
setter=qubes.property.forbidden,
doc='True if this machine may be updated on its own.')
#
# static, class-wide properties
#
#
# properties not loaded from XML, calculated at run-time
#
def __str__(self):
return self.name
# VMM-related
@qubes.stateless_property
def xid(self):
'''Xen ID.
Or not Xen, but ID.
'''
if self.libvirt_domain is None:
return -1
try:
return self.libvirt_domain.ID()
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return -1
else:
self.log.exception('libvirt error code: {!r}'.format(
e.get_error_code()))
raise
@qubes.stateless_property
def stubdom_xid(self):
if not self.is_running():
return -1
if self.app.vmm.xs is None:
return -1
stubdom_xid_str = self.app.vmm.xs.read('',
'/local/domain/{}/image/device-model-domid'.format(self.xid))
if stubdom_xid_str is None or not stubdom_xid_str.isdigit():
return -1
return int(stubdom_xid_str)
@property
def attached_volumes(self):
result = []
xml_desc = self.libvirt_domain.XMLDesc()
xml = lxml.etree.fromstring(xml_desc)
for disk in xml.xpath("//domain/devices/disk"):
if disk.find('backenddomain') is not None:
pool_name = 'p_%s' % disk.find('backenddomain').get('name')
pool = self.app.pools[pool_name]
vid = disk.find('source').get('dev').split('/dev/')[1]
for volume in pool.volumes:
if volume.vid == vid:
result += [volume]
break
return result + list(self.volumes.values())
@property
def libvirt_domain(self):
'''Libvirt domain object from libvirt.
May be :py:obj:`None`, if libvirt knows nothing about this domain.
'''
if self._libvirt_domain is not None:
return self._libvirt_domain
# XXX _update_libvirt_domain?
try:
self._libvirt_domain = self.app.vmm.libvirt_conn.lookupByUUID(
self.uuid.bytes)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
self._update_libvirt_domain()
else:
raise
return self._libvirt_domain
@property
def block_devices(self):
''' Return all :py:class:`qubes.storage.BlockDevice` for current domain
for serialization in the libvirt XML template as <disk>.
'''
for v in self.volumes.values():
block_dev = v.block_device()
if block_dev is not None:
yield block_dev
@property
def untrusted_qdb(self):
'''QubesDB handle for this domain.'''
if self._qdb_connection is None:
if self.is_running():
import qubesdb # pylint: disable=import-error
self._qdb_connection = qubesdb.QubesDB(self.name)
return self._qdb_connection
@property
def dir_path(self):
'''Root directory for files related to this domain'''
return os.path.join(
qubes.config.qubes_base_dir,
self.dir_path_prefix,
self.name)
@property
def icon_path(self):
return os.path.join(self.dir_path, 'icon.png')
@property
def conf_file(self):
return os.path.join(self.dir_path, 'libvirt.xml')
# network-related
#
# constructor
#
def __init__(self, app, xml, volume_config=None, **kwargs):
# migrate renamed properties
if xml is not None:
node_hvm = xml.find('./properties/property[@name=\'hvm\']')
if node_hvm is not None:
if qubes.property.bool(None, None, node_hvm.text):
kwargs['virt_mode'] = 'hvm'
else:
kwargs['virt_mode'] = 'pv'
node_hvm.getparent().remove(node_hvm)
super(QubesVM, self).__init__(app, xml, **kwargs)
if volume_config is None:
volume_config = {}
if hasattr(self, 'volume_config'):
if xml is not None:
for node in xml.xpath('volume-config/volume'):
name = node.get('name')
assert name
for key, value in node.items():
# pylint: disable=no-member
if value == 'True':
value = True
try:
self.volume_config[name][key] = value
except KeyError:
self.volume_config[name] = {key: value}
for name, conf in volume_config.items():
for key, value in conf.items():
# pylint: disable=no-member
try:
self.volume_config[name][key] = value
except KeyError:
self.volume_config[name] = {key: value}
elif volume_config:
raise TypeError(
'volume_config specified, but {} did not expect that.'.format(
self.__class__.__name__))
# Init private attrs
self._libvirt_domain = None
self._qdb_connection = None
if xml is None:
# we are creating new VM and attributes came through kwargs
assert hasattr(self, 'qid')
assert hasattr(self, 'name')
# Linux specific cap: max memory can't scale beyond 10.79*init_mem
# see https://groups.google.com/forum/#!topic/qubes-devel/VRqkFj1IOtA
if self.maxmem > self.memory * 10:
self.maxmem = self.memory * 10
if xml is None:
# new qube, disable updates check if requested for new qubes
# SEE: 1637 when features are done, migrate to plugin
if not self.app.check_updates_vm:
self.features['check-updates'] = False
# will be initialized after loading all the properties
#: operations which shouldn't happen simultaneously with qube startup
# (including another startup of the same qube)
self.startup_lock = asyncio.Lock()
# fire hooks
if xml is None:
self.events_enabled = True
self.fire_event('domain-init')
def close(self):
if self._qdb_connection is not None:
self._qdb_connection.close()
self._qdb_connection = None
super().close()
def __hash__(self):
return self.qid
def __lt__(self, other):
return self.name < other.name
def __xml__(self):
element = super(QubesVM, self).__xml__()
if hasattr(self, 'volumes'):
volume_config_node = lxml.etree.Element('volume-config')
for volume in self.volumes.values():
volume_config_node.append(volume.__xml__())
element.append(volume_config_node)
return element
#
# event handlers
#
@qubes.events.handler('domain-init', 'domain-load')
def on_domain_init_loaded(self, event):
# pylint: disable=unused-argument
if not hasattr(self, 'uuid'):
self.uuid = uuid.uuid4()
# Initialize VM image storage class;
# it might be already initialized by a recursive call from a child VM
if self.storage is None:
self.storage = qubes.storage.Storage(self)
if not self.app.vmm.offline_mode and self.is_running():
self.start_qdb_watch(self.name)
@qubes.events.handler('property-set:label')
def on_property_set_label(self, event, name, newvalue, oldvalue=None):
# pylint: disable=unused-argument
if self.icon_path:
try:
os.remove(self.icon_path)
except OSError:
pass
if hasattr(os, "symlink"):
os.symlink(newvalue.icon_path, self.icon_path)
subprocess.call(['sudo', 'xdg-icon-resource', 'forceupdate'])
else:
shutil.copy(newvalue.icon_path, self.icon_path)
@qubes.events.handler('property-pre-set:kernel')
def on_property_pre_set_kernel(self, event, name, newvalue, oldvalue=None):
# pylint: disable=unused-argument
if not newvalue:
return
dirname = os.path.join(
qubes.config.qubes_base_dir,
qubes.config.system_path['qubes_kernels_base_dir'],
newvalue)
if not os.path.exists(dirname):
raise qubes.exc.QubesPropertyValueError(self,
self.property_get_def(name), newvalue,
'Kernel {!r} not installed'.format(newvalue))
for filename in ('vmlinuz', 'initramfs'):
if not os.path.exists(os.path.join(dirname, filename)):
raise qubes.exc.QubesPropertyValueError(self,
self.property_get_def(name), newvalue,
'Kernel {!r} not properly installed: '
'missing {!r} file'.format(newvalue, filename))
@qubes.events.handler('property-pre-set:autostart')
def on_property_pre_set_autostart(self, event, name, newvalue,
oldvalue=None):
# pylint: disable=unused-argument
# workaround https://bugzilla.redhat.com/show_bug.cgi?id=1181922
if newvalue:
retcode = subprocess.call(
["sudo", "ln", "-sf",
"/usr/lib/systemd/system/[email protected]",
"/etc/systemd/system/multi-user.target.wants/qubes-vm@"
"{}.service".format(self.name)])
else:
retcode = subprocess.call(
['sudo', 'systemctl', 'disable',
'qubes-vm@{}.service'.format(self.name)])
if retcode:
raise qubes.exc.QubesException(
'Failed to set autostart for VM in systemd')
@qubes.events.handler('property-pre-del:autostart')
def on_property_pre_del_autostart(self, event, name, oldvalue=None):
# pylint: disable=unused-argument
if oldvalue:
retcode = subprocess.call(
['sudo', 'systemctl', 'disable',
'qubes-vm@{}.service'.format(self.name)])
if retcode:
raise qubes.exc.QubesException(
'Failed to reset autostart for VM in systemd')
#
# methods for changing domain state
#
@asyncio.coroutine
def start(self, start_guid=True, notify_function=None,
mem_required=None):
'''Start domain
:param bool start_guid: FIXME
:param collections.Callable notify_function: FIXME
:param int mem_required: FIXME
'''
with (yield from self.startup_lock):
# Intentionally not used is_running(): eliminate also "Paused",
# "Crashed", "Halting"
if self.get_power_state() != 'Halted':
return
self.log.info('Starting {}'.format(self.name))
yield from self.fire_event_async('domain-pre-start',
pre_event=True,
start_guid=start_guid, mem_required=mem_required)
yield from self.storage.verify()
if self.netvm is not None:
# pylint: disable = no-member
if self.netvm.qid != 0:
if not self.netvm.is_running():
yield from self.netvm.start(start_guid=start_guid,
notify_function=notify_function)
qmemman_client = yield from asyncio.get_event_loop().\
run_in_executor(None, self.request_memory, mem_required)
try:
yield from self.storage.start()
self._update_libvirt_domain()
self.libvirt_domain.createWithFlags(
libvirt.VIR_DOMAIN_START_PAUSED)
finally:
if qmemman_client:
qmemman_client.close()
try:
yield from self.fire_event_async('domain-spawn',
start_guid=start_guid)
self.log.info('Setting Qubes DB info for the VM')
yield from self.start_qubesdb()
self.create_qdb_entries()
self.log.warning('Activating the {} VM'.format(self.name))
self.libvirt_domain.resume()
# close() is not really needed, because the descriptor is
# close-on-exec anyway, the reason to postpone close() is that
# possibly xl is not done constructing the domain after its main
# process exits so we close() when we know the domain is up the
# successful unpause is some indicator of it
if qmemman_client:
qmemman_client.close()
qmemman_client = None
yield from self.start_qrexec_daemon()
yield from self.fire_event_async('domain-start',
start_guid=start_guid)
except: # pylint: disable=bare-except
if self.is_running() or self.is_paused():
# This avoids losing the exception if an exception is
# raised in self.force_shutdown(), because the vm is not
# running or paused
yield from self.kill() # pylint: disable=not-an-iterable
raise
finally:
if qmemman_client:
qmemman_client.close()
return self
@asyncio.coroutine
def on_domain_shutdown_coro(self):
'''Coroutine for executing cleanup after domain shutdown.
Do not allow domain to be started again until this finishes.
'''
with (yield from self.startup_lock):
yield from self.storage.stop()
@qubes.events.handler('domain-shutdown')
def on_domain_shutdown(self, _event, **_kwargs):
'''Cleanup after domain shutdown'''
# TODO: ensure that domain haven't been started _before_ this
# coroutine got a chance to acquire a lock
asyncio.ensure_future(self.on_domain_shutdown_coro())
@asyncio.coroutine
def shutdown(self, force=False, wait=False):
'''Shutdown domain.
:raises qubes.exc.QubesVMNotStartedError: \
when domain is already shut down.
'''
if self.is_halted():
raise qubes.exc.QubesVMNotStartedError(self)
yield from self.fire_event_async('domain-pre-shutdown', pre_event=True,
force=force)
self.libvirt_domain.shutdown()
while wait and not self.is_halted():
yield from asyncio.sleep(0.25)
return self
@asyncio.coroutine
def kill(self):
'''Forcefuly shutdown (destroy) domain.
:raises qubes.exc.QubesVMNotStartedError: \
when domain is already shut down.
'''
if not self.is_running() and not self.is_paused():
raise qubes.exc.QubesVMNotStartedError(self)
self.libvirt_domain.destroy()
return self
def force_shutdown(self, *args, **kwargs):
'''Deprecated alias for :py:meth:`kill`'''
warnings.warn(
'Call to deprecated function force_shutdown(), use kill() instead',
DeprecationWarning, stacklevel=2)
return self.kill(*args, **kwargs)
@asyncio.coroutine
def suspend(self):
'''Suspend (pause) domain.
:raises qubes.exc.QubesVMNotRunnignError: \
when domain is already shut down.
'''
if not self.is_running() and not self.is_paused():
raise qubes.exc.QubesVMNotRunningError(self)
if list(self.devices['pci'].attached()):
yield from self.run_service_for_stdio('qubes.SuspendPre')
self.libvirt_domain.pMSuspendForDuration(
libvirt.VIR_NODE_SUSPEND_TARGET_MEM, 0, 0)
else:
self.libvirt_domain.suspend()
return self
@asyncio.coroutine
def pause(self):
'''Pause (suspend) domain.'''
if not self.is_running():
raise qubes.exc.QubesVMNotRunningError(self)
self.libvirt_domain.suspend()
return self
@asyncio.coroutine
def resume(self):
'''Resume suspended domain.
:raises qubes.exc.QubesVMNotSuspendedError: when machine is not paused
:raises qubes.exc.QubesVMError: when machine is suspended
'''
# pylint: disable=not-an-iterable
if self.get_power_state() == "Suspended":
self.libvirt_domain.pMWakeup()
yield from self.run_service_for_stdio('qubes.SuspendPost')
else:
yield from self.unpause()
return self
@asyncio.coroutine
def unpause(self):
'''Resume (unpause) a domain'''
if not self.is_paused():
raise qubes.exc.QubesVMNotPausedError(self)
self.libvirt_domain.resume()
return self
@asyncio.coroutine
def run_service(self, service, source=None, user=None,
filter_esc=False, autostart=False, gui=False, **kwargs):
'''Run service on this VM
:param str service: service name
:param qubes.vm.qubesvm.QubesVM source: source domain as presented to
this VM
:param str user: username to run service as
:param bool filter_esc: filter escape sequences to protect terminal \
emulator
:param bool autostart: if :py:obj:`True`, machine will be started if \
it is not running
:param bool gui: when autostarting, also start gui daemon
:rtype: asyncio.subprocess.Process
.. note::
User ``root`` is redefined to ``SYSTEM`` in the Windows agent code
'''
# UNSUPPORTED from previous incarnation:
# localcmd, wait, passio*, notify_function, `-e` switch
#
# - passio* and friends depend on params to command (like in stdlib)
# - the filter_esc is orthogonal to passio*
# - input: see run_service_for_stdio
# - wait has no purpose since this is asynchronous
# - notify_function is gone
source = 'dom0' if source is None else self.app.domains[source].name
if user is None:
user = self.default_user
if self.is_paused():
# XXX what about autostart?
raise qubes.exc.QubesVMNotRunningError(
self, 'Domain {!r} is paused'.format(self.name))
elif not self.is_running():
if not autostart:
raise qubes.exc.QubesVMNotRunningError(self)
yield from self.start(start_guid=gui)
if not self.is_qrexec_running():
raise qubes.exc.QubesVMError(
self, 'Domain {!r}: qrexec not connected'.format(self.name))
yield from self.fire_event_async('domain-cmd-pre-run', pre_event=True,
start_guid=gui)
return (yield from asyncio.create_subprocess_exec(
qubes.config.system_path['qrexec_client_path'],
'-d', str(self.name),
*(('-t', '-T') if filter_esc else ()),
'{}:QUBESRPC {} {}'.format(user, service, source),
**kwargs))
@asyncio.coroutine
def run_service_for_stdio(self, *args, input=None, **kwargs):
'''Run a service, pass an optional input and return (stdout, stderr).
Raises an exception if return code != 0.
*args* and *kwargs* are passed verbatim to :py:meth:`run_service`.
.. warning::
There are some combinations if stdio-related *kwargs*, which are
not filtered for problems originating between the keyboard and the
chair.
''' # pylint: disable=redefined-builtin
kwargs.setdefault('stdin', subprocess.PIPE)
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('stderr', subprocess.PIPE)
p = yield from self.run_service(*args, **kwargs)
# this one is actually a tuple, but there is no need to unpack it
stdouterr = yield from p.communicate(input=input)
if p.returncode:
raise subprocess.CalledProcessError(p.returncode,
args[0], *stdouterr)
return stdouterr
@staticmethod
def _prepare_input_for_vmshell(command, input):
'''Prepare shell input for the given command and optional (real) input
''' # pylint: disable=redefined-builtin
if input is None:
input = b''
return b''.join((command.rstrip('\n').encode('utf-8'), b'\n', input))
def run(self, command, user=None, **kwargs):
'''Run a shell command inside the domain using qrexec.
This method is a coroutine.
''' # pylint: disable=redefined-builtin
if user is None:
user = self.default_user
return asyncio.create_subprocess_exec(
qubes.config.system_path['qrexec_client_path'],
'-d', str(self.name),
'{}:{}'.format(user, command),
**kwargs)
@asyncio.coroutine
def run_for_stdio(self, *args, input=None, **kwargs):
'''Run a shell command inside the domain using qubes.VMShell qrexec.
This method is a coroutine.
*kwargs* are passed verbatim to :py:meth:`run_service_for_stdio`.
See disclaimer there.
''' # pylint: disable=redefined-builtin
kwargs.setdefault('stdin', subprocess.PIPE)
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('stderr', subprocess.PIPE)
p = yield from self.run(*args, **kwargs)
stdouterr = yield from p.communicate(input=input)
if p.returncode:
raise subprocess.CalledProcessError(p.returncode,
args[0], *stdouterr)
return stdouterr
def request_memory(self, mem_required=None):
# overhead of per-qube/per-vcpu Xen structures,
# taken from OpenStack nova/virt/xenapi/driver.py
# see https://wiki.openstack.org/wiki/XenServer/Overhead
# add an extra MB because Nova rounds up to MBs
if not qmemman_present:
return
if mem_required is None:
if self.virt_mode == 'hvm':
if self.stubdom_mem:
stubdom_mem = self.stubdom_mem
else:
if self.features.check_with_template('linux-stubdom', True):
stubdom_mem = 128 # from libxl_create.c
else:
stubdom_mem = 28 # from libxl_create.c
stubdom_mem += 16 # video ram
else:
stubdom_mem = 0
mem_required = int(self.memory + stubdom_mem) * 1024 * 1024
qmemman_client = qubes.qmemman.client.QMemmanClient()
try:
mem_required_with_overhead = mem_required + MEM_OVERHEAD_BASE \
+ self.vcpus * MEM_OVERHEAD_PER_VCPU
got_memory = qmemman_client.request_memory(
mem_required_with_overhead)
except IOError as e:
raise IOError('Failed to connect to qmemman: {!s}'.format(e))
if not got_memory:
qmemman_client.close()
raise qubes.exc.QubesMemoryError(self)
return qmemman_client
@staticmethod
@asyncio.coroutine
def start_daemon(*command, input=None, **kwargs):
'''Start a daemon for the VM
This function take care to run it as appropriate user.
:param command: command to run (array for
:py:meth:`subprocess.check_call`)
:param kwargs: args for :py:meth:`subprocess.check_call`
:return: None
''' # pylint: disable=redefined-builtin
if os.getuid() == 0:
# try to always have VM daemons running as normal user, otherwise
# some files (like clipboard) may be created as root and cause
# permission problems
qubes_group = grp.getgrnam('qubes')
command = ['runuser', '-u', qubes_group.gr_mem[0], '--'] + \
list(command)
p = yield from asyncio.create_subprocess_exec(*command, **kwargs)
stdout, stderr = yield from p.communicate(input=input)
if p.returncode:
raise subprocess.CalledProcessError(p.returncode, command,
output=stdout, stderr=stderr)
@asyncio.coroutine
def start_qrexec_daemon(self):
'''Start qrexec daemon.
:raises OSError: when starting fails.
'''
self.log.debug('Starting the qrexec daemon')
qrexec_args = [str(self.xid), self.name, self.default_user]
if not self.debug:
qrexec_args.insert(0, "-q")
qrexec_env = os.environ.copy()
if not self.features.check_with_template('qrexec', False):
self.log.debug(
'Starting the qrexec daemon in background, because of features')
qrexec_env['QREXEC_STARTUP_NOWAIT'] = '1'
else:
qrexec_env['QREXEC_STARTUP_TIMEOUT'] = str(self.qrexec_timeout)
try:
yield from self.start_daemon(
qubes.config.system_path['qrexec_daemon_path'], *qrexec_args,
env=qrexec_env)
except subprocess.CalledProcessError:
raise qubes.exc.QubesVMError(self, 'Cannot execute qrexec-daemon!')
@asyncio.coroutine
def start_qubesdb(self):
'''Start QubesDB daemon.
:raises OSError: when starting fails.
'''
# drop old connection to QubesDB, if any
self._qdb_connection = None
self.log.info('Starting Qubes DB')
try:
yield from self.start_daemon(
qubes.config.system_path['qubesdb_daemon_path'],
str(self.xid),
self.name)
except subprocess.CalledProcessError:
raise qubes.exc.QubesException('Cannot execute qubesdb-daemon')
@asyncio.coroutine
def create_on_disk(self, pool=None, pools=None):
'''Create files needed for VM.
'''
self.log.info('Creating directory: {0}'.format(self.dir_path))
os.makedirs(self.dir_path, mode=0o775)
if pool or pools:
# pylint: disable=attribute-defined-outside-init
self.volume_config = _patch_volume_config(self.volume_config, pool,
pools)
self.storage = qubes.storage.Storage(self)
try:
yield from self.storage.create()
except:
try:
yield from self.storage.remove()
os.rmdir(self.dir_path)
except: # pylint: disable=bare-except
self.log.exception('failed to cleanup {} after failed VM '
'creation'.format(self.dir_path))
raise
self.log.info('Creating icon symlink: {} -> {}'.format(
self.icon_path, self.label.icon_path))
if hasattr(os, "symlink"):
os.symlink(self.label.icon_path, self.icon_path)
else:
shutil.copy(self.label.icon_path, self.icon_path)
# fire hooks
yield from self.fire_event_async('domain-create-on-disk')
@asyncio.coroutine
def remove_from_disk(self):
'''Remove domain remnants from disk.'''
if not self.is_halted():
raise qubes.exc.QubesVMNotHaltedError(
"Can't remove VM {!s}, beacuse it's in state {!r}.".format(
self, self.get_power_state()))
yield from self.fire_event_async('domain-remove-from-disk')
try:
# TODO: make it async?
shutil.rmtree(self.dir_path)
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
yield from self.storage.remove()
@asyncio.coroutine
def clone_disk_files(self, src, pool=None, pools=None, ):
'''Clone files from other vm.
:param qubes.vm.qubesvm.QubesVM src: source VM
'''
# If the current vm name is not a part of `self.app.domains.keys()`,
# then the current vm is in creation process. Calling
# `self.is_halted()` at this point, would instantiate libvirt, we want
# avoid that.
if self.name in self.app.domains.keys() and not self.is_halted():
raise qubes.exc.QubesVMNotHaltedError(
self, 'Cannot clone a running domain {!r}'.format(self.name))
msg = "Destination {!s} already exists".format(self.dir_path)
assert not os.path.exists(self.dir_path), msg
self.log.info('Creating directory: {0}'.format(self.dir_path))
os.makedirs(self.dir_path, mode=0o775)
if pool or pools:
# pylint: disable=attribute-defined-outside-init
self.volume_config = _patch_volume_config(self.volume_config, pool,
pools)
self.storage = qubes.storage.Storage(self)
yield from self.storage.clone(src)
self.storage.verify()
assert self.volumes != {}
if src.icon_path is not None \
and os.path.exists(src.dir_path) \
and self.icon_path is not None:
if os.path.islink(src.icon_path):
icon_path = os.readlink(src.icon_path)
self.log.info(
'Creating icon symlink {} -> {}'.format(
self.icon_path, icon_path))
os.symlink(icon_path, self.icon_path)
else:
self.log.info(
'Copying icon {} -> {}'.format(
src.icon_path, self.icon_path))
shutil.copy(src.icon_path, self.icon_path)
# fire hooks
yield from self.fire_event_async('domain-clone-files', src=src)
#
# methods for querying domain state
#
# state of the machine
def get_power_state(self):
'''Return power state description string.
Return value may be one of those:
=============== ========================================================
return value meaning
=============== ========================================================
``'Halted'`` Machine is not active.
``'Transient'`` Machine is running, but does not have :program:`guid`
or :program:`qrexec` available.
``'Running'`` Machine is ready and running.
``'Paused'`` Machine is paused.
``'Suspended'`` Machine is S3-suspended.
``'Halting'`` Machine is in process of shutting down.
``'Dying'`` Machine crashed and is unusable.
``'Crashed'`` Machine crashed and is unusable, probably because of
bug in dom0.
``'NA'`` Machine is in unknown state (most likely libvirt domain
is undefined).
=============== ========================================================
FIXME: graph below may be incomplete and wrong. Click on method name to
see its documentation.
.. graphviz::
digraph {
node [fontname="sans-serif"];
edge [fontname="mono"];
Halted;
NA;
Dying;
Crashed;
Transient;
Halting;
Running;
Paused [color=gray75 fontcolor=gray75];
Suspended;
NA -> Halted;
Halted -> NA [constraint=false];
Halted -> Transient
[xlabel="start()" URL="#qubes.vm.qubesvm.QubesVM.start"];
Transient -> Running;
Running -> Halting
[xlabel="shutdown()"
URL="#qubes.vm.qubesvm.QubesVM.shutdown"
constraint=false];
Halting -> Dying -> Halted [constraint=false];
/* cosmetic, invisible edges to put rank constraint */
Dying -> Halting [style="invis"];
Halting -> Transient [style="invis"];
Running -> Halted
[label="force_shutdown()"
URL="#qubes.vm.qubesvm.QubesVM.force_shutdown"
constraint=false];
Running -> Crashed [constraint=false];
Crashed -> Halted [constraint=false];
Running -> Paused
[label="pause()" URL="#qubes.vm.qubesvm.QubesVM.pause"
color=gray75 fontcolor=gray75];
Running -> Suspended
[label="suspend()" URL="#qubes.vm.qubesvm.QubesVM.suspend"
color=gray50 fontcolor=gray50];
Paused -> Running
[label="unpause()" URL="#qubes.vm.qubesvm.QubesVM.unpause"
color=gray75 fontcolor=gray75];
Suspended -> Running
[label="resume()" URL="#qubes.vm.qubesvm.QubesVM.resume"
color=gray50 fontcolor=gray50];
Running -> Suspended
[label="suspend()" URL="#qubes.vm.qubesvm.QubesVM.suspend"];
Suspended -> Running
[label="resume()" URL="#qubes.vm.qubesvm.QubesVM.resume"];
{ rank=source; Halted NA };
{ rank=same; Transient Halting };
{ rank=same; Crashed Dying };
{ rank=sink; Paused Suspended };
}
.. seealso::
http://wiki.libvirt.org/page/VM_lifecycle
Description of VM life cycle from the point of view of libvirt.
https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainState
Libvirt's enum describing precise state of a domain.
''' # pylint: disable=too-many-return-statements
# don't try to define libvirt domain, if it isn't there, VM surely
# isn't running
# reason for this "if": allow vm.is_running() in PCI (or other
# device) extension while constructing libvirt XML
if self.app.vmm.offline_mode:
return 'Halted'
if self._libvirt_domain is None:
try:
self._libvirt_domain = self.app.vmm.libvirt_conn.lookupByUUID(
self.uuid.bytes)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return 'Halted'
else:
raise
libvirt_domain = self.libvirt_domain
if libvirt_domain is None:
return 'Halted'
try:
if libvirt_domain.isActive():
# pylint: disable=line-too-long
if libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_PAUSED:
return "Paused"
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_CRASHED:
return "Crashed"
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_SHUTDOWN:
return "Halting"
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_SHUTOFF:
return "Dying"
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_PMSUSPENDED: # nopep8
return "Suspended"
else:
if not self.is_fully_usable():
return "Transient"
return "Running"
return 'Halted'
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return 'Halted'
raise
assert False
def is_halted(self):
''' Check whether this domain's state is 'Halted'
:returns: :py:obj:`True` if this domain is halted, \
:py:obj:`False` otherwise.
:rtype: bool
'''
return self.get_power_state() == 'Halted'
def is_running(self):
'''Check whether this domain is running.
:returns: :py:obj:`True` if this domain is started, \
:py:obj:`False` otherwise.
:rtype: bool
'''
if self.app.vmm.offline_mode:
return False
# don't try to define libvirt domain, if it isn't there, VM surely
# isn't running
# reason for this "if": allow vm.is_running() in PCI (or other
# device) extension while constructing libvirt XML
if self._libvirt_domain is None:
try:
self._libvirt_domain = self.app.vmm.libvirt_conn.lookupByUUID(
self.uuid.bytes)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return False
else:
raise
return self.libvirt_domain.isActive()
def is_paused(self):
'''Check whether this domain is paused.
:returns: :py:obj:`True` if this domain is paused, \
:py:obj:`False` otherwise.
:rtype: bool
'''
return self.libvirt_domain \
and self.libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_PAUSED
def is_qrexec_running(self):
'''Check whether qrexec for this domain is available.
:returns: :py:obj:`True` if qrexec is running, \
:py:obj:`False` otherwise.
:rtype: bool
'''
if self.xid < 0:
return False
return os.path.exists('/var/run/qubes/qrexec.%s' % self.name)
def is_fully_usable(self):
return all(self.fire_event('domain-is-fully-usable'))
@qubes.events.handler('domain-is-fully-usable')
def on_domain_is_fully_usable(self, event):
'''Check whether domain is running and sane.
Currently this checks for running qrexec.
''' # pylint: disable=unused-argument
# Running gui-daemon implies also VM running
if not self.is_qrexec_running():
yield False
# memory and disk
def get_mem(self):
'''Get current memory usage from VM.
:returns: Memory usage [FIXME unit].
:rtype: FIXME
'''
if self.libvirt_domain is None:
return 0
try:
if not self.libvirt_domain.isActive():
return 0
return self.libvirt_domain.info()[1]
except libvirt.libvirtError as e:
if e.get_error_code() in (
# qube no longer exists
libvirt.VIR_ERR_NO_DOMAIN,
# libxl_domain_info failed (race condition from isActive)
libvirt.VIR_ERR_INTERNAL_ERROR):
return 0
else:
self.log.exception(
'libvirt error code: {!r}'.format(e.get_error_code()))
raise
def get_mem_static_max(self):
'''Get maximum memory available to VM.
:returns: Memory limit [FIXME unit].
:rtype: FIXME
'''
if self.libvirt_domain is None:
return 0
try:
return self.libvirt_domain.maxMemory()
except libvirt.libvirtError as e:
if e.get_error_code() in (
# qube no longer exists
libvirt.VIR_ERR_NO_DOMAIN,
# libxl_domain_info failed (race condition from isActive)
libvirt.VIR_ERR_INTERNAL_ERROR):
return 0
else:
self.log.exception(
'libvirt error code: {!r}'.format(e.get_error_code()))
raise
def get_cputime(self):
'''Get total CPU time burned by this domain since start.
:returns: CPU time usage [FIXME unit].
:rtype: FIXME
'''
if self.libvirt_domain is None:
return 0
if self.libvirt_domain is None:
return 0
if not self.libvirt_domain.isActive():
return 0
try:
if not self.libvirt_domain.isActive():
return 0
# this does not work, because libvirt
# return self.libvirt_domain.getCPUStats(
# libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)[0]['cpu_time']/10**9
return self.libvirt_domain.info()[4]
except libvirt.libvirtError as e:
if e.get_error_code() in (
# qube no longer exists
libvirt.VIR_ERR_NO_DOMAIN,
# libxl_domain_info failed (race condition from isActive)
libvirt.VIR_ERR_INTERNAL_ERROR):
return 0
else:
self.log.exception(
'libvirt error code: {!r}'.format(e.get_error_code()))
raise
# miscellanous
def get_start_time(self):
'''Tell when machine was started.
:rtype: datetime.datetime
'''
if not self.is_running():
return None
# TODO shouldn't this be qubesdb?
start_time = self.app.vmm.xs.read('',
'/vm/{}/start_time'.format(self.uuid))
if start_time != '':
return datetime.datetime.fromtimestamp(float(start_time))
return None
#
# helper methods
#
def relative_path(self, path):
'''Return path relative to py:attr:`dir_path`.
:param str path: Path in question.
:returns: Relative path.
'''
return os.path.relpath(path, self.dir_path)
def create_qdb_entries(self):
'''Create entries in Qubes DB.
'''
# pylint: disable=no-member
self.untrusted_qdb.write('/name', self.name)
self.untrusted_qdb.write('/type', self.__class__.__name__)
self.untrusted_qdb.write('/qubes-vm-updateable', str(self.updateable))
self.untrusted_qdb.write('/qubes-vm-persistence',
'full' if self.updateable else 'rw-only')
self.untrusted_qdb.write('/qubes-debug-mode', str(int(self.debug)))
try:
self.untrusted_qdb.write('/qubes-base-template', self.template.name)
except AttributeError:
self.untrusted_qdb.write('/qubes-base-template', '')
self.untrusted_qdb.write('/qubes-random-seed',
base64.b64encode(qubes.utils.urandom(64)))
if self.provides_network:
# '/qubes-netvm-network' value is only checked for being non empty
self.untrusted_qdb.write('/qubes-netvm-network', self.gateway)
self.untrusted_qdb.write('/qubes-netvm-gateway', self.gateway)
self.untrusted_qdb.write('/qubes-netvm-netmask', self.netmask)
for i, addr in zip(('primary', 'secondary'), self.dns):
self.untrusted_qdb.write('/qubes-netvm-{}-dns'.format(i), addr)
if self.netvm is not None:
self.untrusted_qdb.write('/qubes-ip', self.visible_ip)
self.untrusted_qdb.write('/qubes-netmask', self.visible_netmask)
self.untrusted_qdb.write('/qubes-gateway', self.visible_gateway)
for i, addr in zip(('primary', 'secondary'), self.dns):
self.untrusted_qdb.write('/qubes-{}-dns'.format(i), addr)
tzname = qubes.utils.get_timezone()
if tzname:
self.untrusted_qdb.write('/qubes-timezone', tzname)
self.untrusted_qdb.write('/qubes-block-devices', '')
self.untrusted_qdb.write('/qubes-usb-devices', '')
# TODO: Currently the whole qmemman is quite Xen-specific, so stay with
# xenstore for it until decided otherwise
if qmemman_present:
self.app.vmm.xs.set_permissions('',
'/local/domain/{}/memory'.format(self.xid),
[{'dom': self.xid}])
self.fire_event('domain-qdb-create')
self.start_qdb_watch(self.name)
# TODO async; update this in constructor
def _update_libvirt_domain(self):
'''Re-initialise :py:attr:`libvirt_domain`.'''
domain_config = self.create_config_file()
try:
self._libvirt_domain = self.app.vmm.libvirt_conn.defineXML(
domain_config)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_OS_TYPE \
and e.get_str2() == 'hvm':
raise qubes.exc.QubesVMError(self,
'HVM qubes are not supported on this machine. '
'Check BIOS settings for VT-x/AMD-V extensions.')
else:
raise
#
# workshop -- those are to be reworked later
#
def get_prefmem(self):
# TODO: qmemman is still xen specific
untrusted_meminfo_key = self.app.vmm.xs.read('',
'/local/domain/{}/memory/meminfo'.format(self.xid))
if untrusted_meminfo_key is None or untrusted_meminfo_key == '':
return 0
domain = qubes.qmemman.DomainState(self.xid)
qubes.qmemman.algo.refresh_meminfo_for_domain(
domain, untrusted_meminfo_key)
if domain.mem_used is None:
# apparently invalid xenstore content
return 0
domain.memory_maximum = self.get_mem_static_max() * 1024
return qubes.qmemman.algo.prefmem(domain) / 1024
def _clean_volume_config(config):
common_attributes = ['name', 'pool', 'size',
'revisions_to_keep', 'rw', 'snap_on_start',
'save_on_stop', 'source']
return {k: v for k, v in config.items() if k in common_attributes}
def _patch_pool_config(config, pool=None, pools=None):
assert pool is not None or pools is not None
is_snapshot = config['snap_on_start']
is_rw = config['rw']
name = config['name']
if pool and not is_snapshot and is_rw:
config['pool'] = str(pool)
elif pool:
pass
elif pools and name in pools.keys():
if not is_snapshot:
config['pool'] = str(pools[name])
else:
msg = "Can't clone a snapshot volume {!s} to pool {!s} " \
.format(name, pools[name])
raise qubes.exc.QubesException(msg)
return config
def _patch_volume_config(volume_config, pool=None, pools=None):
assert not (pool and pools), \
'You can not pass pool & pools parameter at same time'
assert pool or pools
result = {}
for name, config in volume_config.items():
# copy only the subset of volume_config key/values
dst_config = _clean_volume_config(config)
if pool is not None or pools is not None:
dst_config = _patch_pool_config(dst_config, pool, pools)
result[name] = dst_config
return result
| gpl-2.0 | 8,375,534,680,518,389,000 | 34.243521 | 91 | 0.570728 | false |
pestefo/viz-youtube | data/check_id_in_different_days.py | 1 | 1547 | import csv
from collections import defaultdict
file1 = '/Users/pestefo/Sync/projects/information-visualization-course/proyecto/data/data.csv'
file2 = '/Users/pestefo/Sync/projects/information-visualization-course/proyecto/data/302.csv'
columns1 = defaultdict(list)
columns2 = defaultdict(list)
with open(file1, 'rU') as f:
reader = csv.DictReader(f)
for row in reader:
for (k,v) in row.items():
columns1[k].append(v)
with open(file2, 'rU') as f:
reader = csv.DictReader(f)
for row in reader:
for (k,v) in row.items():
columns2[k].append(v)
# related = set(columns1['related_1'])
# related.update(columns1['related_2'])
# related.update(columns1['related_3'])
# related.update(columns1['related_4'])
# related.update(columns1['related_5'])
# related.update(columns1['related_6'])
# related.update(columns1['related_7'])
# related.update(columns1['related_8'])
# related.update(columns1['related_9'])
# related.update(columns1['related_10'])
# related.update(columns1['related_11'])
# related.update(columns1['related_12'])
# related.update(columns1['related_13'])
# related.update(columns1['related_14'])
# related.update(columns1['related_15'])
# related.update(columns1['related_16'])
# related.update(columns1['related_17'])
# related.update(columns1['related_18'])
# related.update(columns1['related_19'])
# related.update(columns1['related_20'])
related = set(columns1['id'])
interseccion = related.intersection(set(columns2['id']))
union = related.union(set(columns2['id']))
print len(interseccion)
print len(union) | mit | 8,330,908,455,000,977,000 | 29.96 | 94 | 0.725921 | false |
suizokukan/getpurejinmeiyo | purejinmeiyo.py | 1 | 4310 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
################################################################################
# purejinmeiyo Copyright (C) 2012 Suizokukan
# Contact: suizokukan _A.T._ orange dot fr
#
# This file is part of purejinmeiyo.
# purejinmeiyo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# purejinmeiyo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with purejinmeiyo. If not, see <http://www.gnu.org/licenses/>.
################################################################################
"""
purejinmeiyo : getting a list of the 人名用漢字 (jinmeiyō kanji) not part
of the 常用漢字 (jōyō kanji) extracted from Wikipedia
A quick and dirty script to get all (pure) jinmeiyō kanjis and export
them.
________________________________________________________________________
2014_01_14 : how the data are stored on the Wikipedia page at
http://en.wikipedia.org/wiki/Jinmeiy%C5%8D_kanji :
"KKKKK(V)KKKKK(V)K..." where K=kanji, V = variant
"""
data = "丑丞乃之乎也云亘(亙)些亦亥亨亮仔伊伍伽佃佑伶侃侑俄俠俣俐倭俱倦倖偲傭儲允兎兜其冴凌凜(凛)凧凪凰凱函劉劫勁勺勿匁匡廿卜卯卿厨厩叉叡叢叶只吾吞吻哉哨啄哩喬喧喰喋嘩嘉嘗噌噂圃圭坐尭(堯)坦埴堰堺堵塙壕壬夷奄奎套娃姪姥娩嬉孟宏宋宕宥寅寓寵尖尤屑峨峻崚嵯嵩嶺巌(巖)已巳巴巷巽帖幌幡庄庇庚庵廟廻弘弛彗彦彪彬徠忽怜恢恰恕悌惟惚悉惇惹惺惣慧憐戊或戟托按挺挽掬捲捷捺捧掠揃(摑)摺撒撰撞播撫擢孜敦斐斡斧斯於旭昂昊昏昌昴晏晃(晄)晒晋晟晦晨智暉暢曙曝曳朋朔杏杖杜李杭杵杷枇柑柴柘柊柏柾柚桧(檜)栞桔桂栖桐栗梧梓梢梛梯桶梶椛梁棲椋椀楯楚楕椿楠楓椰楢楊榎樺榊榛槙(槇)槍槌樫槻樟樋橘樽橙檎檀櫂櫛櫓欣欽歎此殆毅毘毬汀汝汐汲沌沓沫洸洲洵洛浩浬淵淳渚(渚)淀淋渥湘湊湛溢滉溜漱漕漣澪濡瀕灘灸灼烏焰焚煌煤煉熙燕燎燦燭燿爾牒牟牡牽犀狼猪(猪)獅玖珂珈珊珀玲琢(琢)琉瑛琥琶琵琳瑚瑞瑶瑳瓜瓢甥甫畠畢疋疏皐皓眸瞥矩砦砥砧硯碓碗碩碧磐磯祇祢(禰)祐(祐)祷(禱)禄(祿)禎(禎)禽禾秦秤稀稔稟稜穣(穰)穹穿窄窪窺竣竪竺竿笈笹笙笠筈筑箕箔篇篠簞簾籾粥粟糊紘紗紐絃紬絆絢綺綜綴緋綾綸縞徽繫繡纂纏羚翔翠耀而耶耽聡肇肋肴胤胡脩腔脹膏臥舜舵芥芹芭芙芦苑茄苔苺茅茉茸茜莞荻莫莉菅菫菖萄菩萌(萠)萊菱葦葵萱葺萩董葡蓑蒔蒐蒼蒲蒙蓉蓮蔭蔣蔦蓬蔓蕎蕨蕉蕃蕪薙蕾蕗藁薩蘇蘭蝦蝶螺蟬蟹蠟衿袈袴裡裟裳襖訊訣註詢詫誼諏諄諒謂諺讃豹貰賑赳跨蹄蹟輔輯輿轟辰辻迂迄辿迪迦這逞逗逢遥(遙)遁遼邑祁郁鄭酉醇醐醍醬釉釘釧銑鋒鋸錘錐錆錫鍬鎧閃閏閤阿陀隈隼雀雁雛雫霞靖鞄鞍鞘鞠鞭頁頌頗顚颯饗馨馴馳駕駿驍魁魯鮎鯉鯛鰯鱒鱗鳩鳶鳳鴨鴻鵜鵬鷗鷲鷺鷹麒麟麿黎黛鼎"
# [ [kanji, None],
# [kanji, variant],
# ...
# ]
kanjis = []
last_last_kanji = None
last_kanji = None
for char in data:
if char == '(':
pass
elif char == ')':
variant = kanjis.pop()[0]
kanjis[-1][1] = variant
else:
kanjis.append( [char, None] )
format_str1 = " '{0}' : ('{0}',),"
format_str2 = " '{0}' : ('{0}', '{1}'),"
for num, kanji in enumerate(kanjis):
if kanji[1] is None:
# no variant
print( " # jinmeiyō kanji #"+str(num+1))
print( format_str1.format(kanji[0]))
else:
# one variant :
print( " # jinmeiyō kanji #"+str(num+1))
print( format_str2.format(kanji[0], kanji[1]))
| gpl-3.0 | -904,835,057,305,429,100 | 42.304348 | 696 | 0.626841 | false |
Petr-By/qtpyvis | qtgui/panels/logging.py | 1 | 14855 | """
File: logging.py
Author: Ulf Krumnack
Email: [email protected]
Github: https://github.com/krumnack
"""
# standard imports
import logging
# Qt imports
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QLabel, QPushButton
from PyQt5.QtWidgets import QCheckBox, QRadioButton, QButtonGroup
from PyQt5.QtWidgets import QListWidget, QListWidgetItem, QComboBox
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QGroupBox
# toolbox imports
from dltb.util.logging import RecorderHandler
from toolbox import Toolbox
# GUI imports
from .panel import Panel
from ..utils import protect
from ..widgets.logging import QLogHandler, QExceptionView
# logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class LoggingPanel(Panel):
"""A panel containing elements to log messages.
Attributes
----------
_log_handler: QLogHandler
A widget to display log messages
"""
_levels = {
"Fatal": logging.FATAL,
"Error": logging.ERROR,
"Warning": logging.WARNING,
"Info": logging.INFO,
"Debug": logging.DEBUG
}
def __init__(self, toolbox: Toolbox=None, **kwargs):
"""Initialization of the LoggingPael.
Parameters
----------
parent : QWidget
The parent argument is sent to the QWidget constructor.
"""
super().__init__(**kwargs)
self._loggingRecorder = None
self._toolbox = None
self._initUI()
self.setToolbox(toolbox)
def _initUI(self):
"""Add the UI elements
* The ``QLogHandler`` showing the log messages
"""
#
# Controls
#
self._log_handler = QLogHandler()
self._log_handler._message_signal.connect(self._new_message)
self._total = QLabel()
self._updateLogViewButton = QPushButton("Update")
self._updateLogViewButton.clicked.connect(self._onUpdateLogView)
self._updateLogViewButton.setEnabled(self._loggingRecorder is not None)
self._clearLogViewButton = QPushButton("Clear")
self._clearLogViewButton.clicked.connect(self._onClearLogView)
self._modules = QListWidget()
self._checkProcess = QCheckBox("Process")
self._checkProcess.clicked.connect(self._updateFormatter)
self._checkThread = QCheckBox("Thread")
self._checkThread.clicked.connect(self._updateFormatter)
self._checkName = QCheckBox("Name")
self._checkName.clicked.connect(self._updateFormatter)
self._checkModule = QCheckBox("Module")
self._checkModule.clicked.connect(self._updateFormatter)
self._checkFile = QCheckBox("File")
self._checkFile.clicked.connect(self._updateFormatter)
self._checkLevel = QCheckBox("Level")
self._checkLevel.clicked.connect(self._updateFormatter)
self._radio = {}
self._levelButtonGroup = QButtonGroup()
for label in self._levels.keys():
self._radio[label] = QRadioButton(label)
self._radio[label].clicked.connect(self._onLoggerLevelClicked)
self._levelButtonGroup.addButton(self._radio[label])
self._checkLoggerEnabled = QCheckBox("enabled")
self._checkLoggerEnabled.clicked.connect(self._onLoggerEnabledClicked)
self._buttonLoggerClearLevel = QPushButton("Clear Level")
self._buttonLoggerClearLevel.clicked.connect(self._onClearLevel)
self._effectiveLevel = QLabel()
self._loggerList = QListWidget()
self._loggerList.setSortingEnabled(True)
self._loggerList.currentItemChanged.connect(self._onCurrentLoggerChanged)
self._loggerList_refresh = QPushButton("Refresh")
self._loggerList_refresh.clicked.connect(self._updateLoggerList)
self._rootLoggerLevel = QComboBox()
for name, level in self._levels.items():
self._rootLoggerLevel.addItem(name, level)
self._rootLoggerLevel.currentIndexChanged.connect(self._onRootLevelChanged)
self._exceptionPanel = QExceptionPanel()
self._updateLoggerList()
self._layoutComponents()
def _layoutComponents(self):
"""Layout the UI elements.
* The ``QLogHandler`` displaying the log messages
"""
layout = QVBoxLayout()
row = QHBoxLayout()
row.addWidget(self._log_handler)
row.addWidget(self._exceptionPanel)
layout.addLayout(row)
row = QHBoxLayout()
text = QHBoxLayout()
text.addWidget(QLabel("Messages: "))
text.addWidget(self._total)
row.addLayout(text)
row.addWidget(self._updateLogViewButton)
row.addWidget(self._clearLogViewButton)
row.addWidget(self._checkProcess)
row.addWidget(self._checkThread)
row.addWidget(self._checkName)
row.addWidget(self._checkModule)
row.addWidget(self._checkFile)
row.addWidget(self._checkLevel)
row.addStretch()
layout.addLayout(row)
row = QHBoxLayout()
column = QVBoxLayout()
column.addWidget(self._loggerList)
column.addWidget(self._loggerList_refresh)
row.addLayout(column)
column = QVBoxLayout()
box = QGroupBox("Root Logger")
boxLayout = QVBoxLayout()
boxLayout.addWidget(self._rootLoggerLevel)
box.setLayout(boxLayout)
column.addWidget(box)
box = QGroupBox("Logger Details")
boxLayout = QVBoxLayout()
boxLayout.addWidget(self._checkLoggerEnabled)
line = QHBoxLayout()
line.addWidget(QLabel("Effective Level: "))
line.addWidget(self._effectiveLevel)
boxLayout.addLayout(line)
for button in self._radio.values():
boxLayout.addWidget(button)
boxLayout.addWidget(self._buttonLoggerClearLevel)
box.setLayout(boxLayout)
column.addWidget(box)
column.addStretch()
row.addLayout(column)
row.addWidget(self._modules)
layout.addLayout(row)
self.setLayout(layout)
def setToolbox(self, toolbox: Toolbox=None) -> None:
self._exceptionPanel.setToolbox(toolbox)
def addLogger(self, logger):
"""Add a logger to this :py:class:LoggingPanel.
LogRecords emitted by that logger will be processed.
"""
logger.addHandler(self._log_handler)
if self._loggingRecorder is not None:
logger.addHandler(self._loggingRecorder)
def removeLogger(self, logger):
"""Remove a logger from this :py:class:LoggingPanel.
LogRecords emitted by that logger will no longer be processed.
"""
logger.removeHandler(self._log_handler)
if self._loggingRecorder is not None:
logger.removeHandler(self._loggingRecorder)
def setLoggingRecorder(self, recorder: RecorderHandler) -> None:
"""Set a logging recorder for this :py:class:LoggingPanel.
Having a logging recorder allows to replay the log messages
recorded by that recorder.
"""
self._loggingRecorder = recorder
self._onUpdateLogView()
self._updateLogViewButton.setEnabled(recorder is not None)
def _new_message(self, message):
total = str(len(self._log_handler))
if self._loggingRecorder is not None:
total += "/" + str(len(self._loggingRecorder))
self._total.setText(total)
def _updateFormatter(self):
format = ""
if self._checkProcess.isChecked():
format += "[%(processName)s] "
if self._checkThread.isChecked():
format += "[%(threadName)s] "
if self._checkName.isChecked():
format += "(%(name)s) "
if self._checkModule.isChecked():
format += "%(module)s "
if self._checkFile.isChecked():
format += "%(filename)s:%(lineno)d: "
if self._checkLevel.isChecked():
format += "%(levelname)s: "
format += "%(message)s"
formatter = logging.Formatter(fmt=format, datefmt="%(asctime)s")
self._log_handler.setFormatter(formatter)
def _onClearLogView(self):
"""Update the log view.
"""
self._log_handler.clear()
def _onUpdateLogView(self):
"""Update the log view.
"""
if self._loggingRecorder is not None:
self._loggingRecorder.replay(self._log_handler)
def _decorateLoggerItem(self, item: QListWidgetItem,
logger: logging.Logger) -> None:
"""Decorate an entry in the logger list reflecting the properties
of the logger.
"""
item.setForeground(self._colorForLogLevel(logger.getEffectiveLevel()))
font = item.font()
font.setBold(bool(logger.level))
item.setFont(font)
item.setBackground(Qt.lightGray if logger.disabled else Qt.white)
def _updateLoggerList(self):
self._loggerList.clear()
self._updateLogger(None)
# FIXME[bug]: this may raise a RuntimeError:
# dictionary changed size during iteration
for name, logger in logging.Logger.manager.loggerDict.items():
if not isinstance(logger, logging.Logger):
continue
level = logger.getEffectiveLevel()
item = QListWidgetItem(name)
self._decorateLoggerItem(item, logger)
self._loggerList.addItem(item)
index = self._rootLoggerLevel.findData(logging.Logger.root.level)
self._rootLoggerLevel.setCurrentIndex(index)
def _onCurrentLoggerChanged(self, item: QListWidgetItem,
previous: QListWidgetItem) -> None:
"""A logger was selected in the logger list.
"""
logger = (None if item is None else
logging.Logger.manager.loggerDict[item.text()])
self._updateLogger(logger)
def _onRootLevelChanged(self, index: int) -> None:
logging.Logger.root.setLevel(self._rootLoggerLevel.currentData())
self._updateLoggerList()
def _updateLogger(self, logger: logging.Logger):
"""Update the logger group to reflect the currently selected
logger. If ther is no current logger (logger is None), then
the logger group is cleared and disabled.
"""
if logger is None or not logger.level:
checked = self._levelButtonGroup.checkedButton()
if checked is not None:
self._levelButtonGroup.setExclusive(False)
checked.setChecked(False)
self._levelButtonGroup.setExclusive(True)
self._checkLoggerEnabled.setCheckable(logger is not None)
for button in self._levelButtonGroup.buttons():
button.setCheckable(logger is not None)
if logger is None:
self._effectiveLevel.setText("")
else:
self._checkLoggerEnabled.setChecked(not logger.disabled)
self._effectiveLevel.setText(str(logger.getEffectiveLevel()))
if logger.level:
button = self._buttonForForLogLevel(logger.level)
if button is not None:
button.setChecked(True)
def _onLoggerEnabledClicked(self, checked: bool) -> None:
"""A logger enable/disable button was pressed.
"""
for item in self._loggerList.selectedItems():
logger = logging.Logger.manager.loggerDict[item.text()]
logger.disabled = not checked
self._decorateLoggerItem(item, logger)
def _onLoggerLevelClicked(self, checked: bool) -> None:
"""A logger level radio button was pressed.
"""
checked = self._levelButtonGroup.checkedButton()
level = 0 if checked is None else self._levels[checked.text()]
for item in self._loggerList.selectedItems():
logger = logging.Logger.manager.loggerDict[item.text()]
logger.setLevel(level)
self._decorateLoggerItem(item, logger)
def _onClearLevel(self) -> None:
"""Clear the individual log level of the current logger.
"""
logger = None
for item in self._loggerList.selectedItems():
logger = logging.Logger.manager.loggerDict[item.text()]
logger.setLevel(0)
self._decorateLoggerItem(item, logger)
self._updateLogger(logger)
def _buttonForForLogLevel(self, level):
for label, _level in self._levels.items():
if level == _level:
return self._radio[label]
return None
def _colorForLogLevel(self, level):
if level <= logging.DEBUG: return Qt.blue
if level <= logging.INFO: return Qt.green
if level <= logging.WARNING: return Qt.darkYellow
if level <= logging.ERROR: return Qt.red
if level <= logging.FATAL: return Qt.magenta
return Qt.black
from PyQt5.QtWidgets import QPlainTextEdit, QListWidget, QPushButton
class QExceptionPanel(QWidget):
"""
"""
def __init__(self, toolbox: Toolbox=None, **kwargs):
super().__init__(**kwargs)
self._toolbox = None
self._initUI()
self._layoutComponents()
def _initUI(self):
self._exceptionList = QListWidget()
self._exceptionList.currentItemChanged.\
connect(self._onCurrentExceptionChanged)
self._exceptionView = QExceptionView()
self._exceptionButton = QPushButton("Raise Test Exception")
self._exceptionButton.clicked.connect(self._onButtonClicked)
def _layoutComponents(self):
row = QHBoxLayout()
column = QVBoxLayout()
column.addWidget(self._exceptionList)
column.addWidget(self._exceptionButton)
row.addLayout(column)
row.addWidget(self._exceptionView)
self.setLayout(row)
def setToolbox(self, toolbox: Toolbox=None) -> None:
if self._toolbox is not None:
self._toolbox.remove_exception_handler(self.handleException)
self._toolbox = toolbox
if self._toolbox is not None:
self._toolbox.add_exception_handler(self.handleException)
def handleException(self, exception: BaseException) -> None:
self._exceptionView.setException(exception)
@protect
def _onCurrentExceptionChanged(self, item: QListWidgetItem,
previous: QListWidgetItem) -> None:
"""An exception was selected in the exception list.
"""
print(f"FIXME[todo]: exception changed: {item}, {previous}")
@protect
def _onButtonClicked(self, checked: bool) -> None:
"""The raise exceptoin button was pressed.
"""
raise RuntimeError("Just a test error.")
| mit | 4,858,607,966,636,792,000 | 34.538278 | 83 | 0.629754 | false |
adobe-research/spark-gpu | data/generate_kmeans.py | 1 | 1745 | #!/home/ec2-user/anaconda/bin/python
###########################################################################
##
## Copyright (c) 2015 Adobe Systems Incorporated. All rights reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###########################################################################
import sys, getopt
import numpy as np
def kmeansGenerate(k, filename):
data = ""
for i in range(k):
floatlist = list(np.random.uniform(low=0.1, high=10, size=(3)))
floatlist = " ".join(map(str, floatlist)) + '\n'
data = data + floatlist
target = open(filename, 'w')
target.write(str(data))
target.close()
def bayesGenerate(k, filename):
data = ""
for i in range(k):
nplist = list(np.random.uniform(low=0.1, high=10, size=(4)))
intlist = [int(x) for x in nplist]
intlist = " ".join(map(str, intlist)) + '\n'
data = data + intlist
target = open(filename, 'w')
target.write(str(data))
target.close()
def main():
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: generate <length> <file>"
exit(-1)
kmeansGenerate(int(sys.argv[1]),sys.argv[2])
#bayesGenerate(int(sys.argv[1]),sys.argv[2])
if __name__ == "__main__":
main()
| apache-2.0 | 4,764,295,083,545,138,000 | 30.727273 | 75 | 0.597708 | false |
h3llrais3r/Auto-Subliminal | tests/server/api/test_api_movies.py | 1 | 4716 | # coding=utf-8
from autosubliminal.core.movie import MovieDetails, MovieSettings
from autosubliminal.core.subtitle import Subtitle, EXTERNAL
from autosubliminal.db import FailedMoviesDb, MovieDetailsDb, MovieSettingsDb
from autosubliminal.server.api.movies import MoviesApi
from tests.server.api.test_api import pickle_api_result
movie_details_1 = MovieDetails(path='/path/to/movie1/movie1.mkv', imdb_id='tt1', title='title1', year=2018,
overview='overview1', poster='poster1.jpg', missing_languages=['en'],
subtitles=[Subtitle(type=EXTERNAL, language='nl', path='/path/to/movie1/subtitle1.srt')])
movie_details_2 = MovieDetails(path='/path/to/movie2/movie2.mkv', imdb_id='tt2', title='title2', year=2019,
overview='overview2', poster='poster2.jpg', missing_languages=['nl', 'en'], subtitles=[])
movie_settings_1 = MovieSettings(imdb_id='tt1', wanted_languages=['en', 'nl'], refine=True, hearing_impaired=False,
utf8_encoding=True)
movie_settings_2 = MovieSettings(imdb_id='tt2', wanted_languages=['en', 'nl'], refine=True, hearing_impaired=False,
utf8_encoding=True)
movies_json = '[{"imdb_id": "tt1", ' \
'"overview": "overview1", "path": "/path/to/movie1", "poster": true, "settings": ' \
'{"hearing_impaired": false, "refine": true, "utf8_encoding": true, "wanted_languages": ["en", "nl"]}, ' \
'"title": "title1", ' \
'"total_subtitles_available": 1, "total_subtitles_missing": 1, "total_subtitles_wanted": 2, ' \
'"year": 2018}, ' \
'{"imdb_id": "tt2", ' \
'"overview": "overview2", "path": "/path/to/movie2", "poster": true, "settings": ' \
'{"hearing_impaired": false, "refine": true, "utf8_encoding": true, "wanted_languages": ["en", "nl"]}, ' \
'"title": "title2", ' \
'"total_subtitles_available": 0, "total_subtitles_missing": 2, "total_subtitles_wanted": 2, ' \
'"year": 2019}]'
movie_1_json = '{"files": [{"embedded_languages": [], "filename": "movie1.mkv", "hardcoded_languages": [], ' \
'"type": "video"}, {"filename": "subtitle1.srt", "language": "nl", "type": "subtitle"}], ' \
'"imdb_id": "tt1", "overview": "overview1", "path": "/path/to/movie1", "poster": true, ' \
'"settings": {"hearing_impaired": false, "refine": true, "utf8_encoding": true, ' \
'"wanted_languages": ["en", "nl"]}, ' \
'"title": "title1", "total_subtitles_available": 1, "total_subtitles_missing": 1, ' \
'"total_subtitles_wanted": 2, "year": 2018}'
movie_settings_1_json = '{"hearing_impaired": false, "refine": true, "utf8_encoding": true, ' \
'"wanted_languages": ["en", "nl"]}'
def test_get_movies(mocker):
mocker.patch.object(MovieDetailsDb, 'get_all_movies', return_value=[movie_details_1, movie_details_2])
mocker.patch.object(MovieSettingsDb, 'get_movie_settings', side_effect=[movie_settings_1, movie_settings_2])
assert movies_json == pickle_api_result(MoviesApi().get())
def test_get_movie(mocker):
mocker.patch.object(MovieDetailsDb, 'get_movie', return_value=movie_details_1)
mocker.patch('os.path.exists', return_value=True)
mocker.patch('autosubliminal.server.api.movies.MoviesApi._get_movie_files',
return_value=[
{'filename': 'movie1.mkv', 'type': 'video', 'embedded_languages': [], 'hardcoded_languages': []},
{'filename': 'subtitle1.srt', 'type': 'subtitle', 'language': 'nl'}])
mocker.patch.object(MovieSettingsDb, 'get_movie_settings', return_value=movie_settings_1)
assert movie_1_json == pickle_api_result(MoviesApi().get('tt1'))
def test_get_movie_settings(mocker):
mocker.patch.object(MovieSettingsDb, 'get_movie_settings', return_value=movie_settings_1)
assert movie_settings_1_json == pickle_api_result(MoviesApi().settings.get('tt1'))
def test_get_movies_overview(mocker):
mocker.patch.object(FailedMoviesDb, 'get_failed_movies', return_value=['/path/to/failed/movie'])
mocker.patch.object(MovieDetailsDb, 'get_all_movies', return_value=[movie_details_1])
mocker.patch.object(MovieSettingsDb, 'get_movie_settings', side_effect=[movie_settings_1, movie_settings_2])
overview_json = '{"failed_movies": ["/path/to/failed/movie"], "total_movies": 1, "total_subtitles_available": 1, ' \
'"total_subtitles_missing": 1, "total_subtitles_wanted": 2}'
assert overview_json == pickle_api_result(MoviesApi().overview.get())
| gpl-3.0 | 8,911,283,726,948,796,000 | 61.052632 | 120 | 0.616412 | false |
Cosiroc/bleau-database | Triangulation/test-triangulation.py | 2 | 5970 | ####################################################################################################
#
# Bleau Database - A database of the bouldering area of Fontainebleau
# Copyright (C) Salvaire Fabrice 2016
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
from Triangulation.Vector import Vector
from Triangulation import TriangulationGeometry, TriangulationForce
####################################################################################################
geometry = TriangulationGeometry(anchor_distance=50,
anchor_angle=30,
webbing_length=150,
webbing_ratio=.45)
triangulation = TriangulationForce(geometry=geometry,
weight=100,
deviation=0)
anchor1 = geometry.anchor1
anchor2 = geometry.anchor2
node_point = geometry.node_point
weight_force = triangulation.weight_force
force1 = triangulation.force1
force2 = triangulation.force2
orientation1 = force1.orientation()
orientation2 = force2.orientation()
print("Anchor2 : {} {}".format(anchor2.x, anchor2.y))
print("Node : {} {}".format(node_point.x, node_point.y))
print("Weight : {} {}".format(weight_force.x, weight_force.y))
print("Force1 : {} {}".format(force1.x, force1.y))
print("Force2 : {} {}".format(force2.x, force2.y))
force_point = node_point + force1
weight_point = node_point + weight_force
####################################################################################################
import numpy as np
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
figure, axes = plt.subplots()
points = np.array((anchor1, anchor2,
node_point, force_point, weight_point))
x_min = np.min(points[:,0])
x_max = np.max(points[:,0])
y_min = np.min(points[:,1])
y_max = np.max(points[:,1])
x_margin = (x_max - x_min) * .1
y_margin = (y_max - y_min) * .1
x_min = x_min - x_margin
x_max = x_max + x_margin
y_min = y_min - y_margin
y_max = y_max + y_margin
axes.axis('equal')
axes.set_xlim(x_min, x_max)
axes.set_ylim(y_min, y_max)
wedge1 = mpatches.Wedge(node_point, triangulation.weight, -180, orientation2, color='red', alpha=.1)
wedge2 = mpatches.Wedge(node_point, triangulation.weight, orientation1, 0, color='red', alpha=.1)
wedge3 = mpatches.Wedge(node_point, triangulation.weight, orientation2, orientation1, color='green', alpha=.1)
for wedge in wedge1, wedge2, wedge3:
axes.add_patch(wedge)
for point in anchor1, node_point: # , force_point, weight_point
axes.axvline(point.x)
axes.axhline(point.y)
line = mlines.Line2D((force_point.x, force_point.x),
(node_point.y, weight_point.y))
axes.add_line(line)
# line = mlines.Line2D((weight_point.x, weight_point.x),
# (node_point.y, weight_point.y))
# axes.add_line(line)
# line = mlines.Line2D((node_point.x, force_point.x),
# (force_point.y, force_point.y))
# axes.add_line(line)
line = mlines.Line2D((node_point.x, force_point.x),
(weight_point.y, weight_point.y))
axes.add_line(line)
# Draw force 1
force_line = mlines.Line2D(np.array((node_point.x, force_point.x)),
np.array((node_point.y, force_point.y)),
color='orange', linewidth=2)
axes.add_line(force_line)
# Draw force 2
force_line = mlines.Line2D(np.array((force_point.x, weight_point.x)),
np.array((force_point.y, weight_point.y)),
color='magenta', linewidth=2)
axes.add_line(force_line)
# Draw weight
weight_line = mlines.Line2D(np.array((node_point.x, weight_point.x)),
np.array((node_point.y, weight_point.y)),
color='red', linewidth=3)
axes.add_line(weight_line)
# Draw webbing
geometry_line = mlines.Line2D((0, anchor2.x),
(0, anchor2.y),
color='black')
axes.add_line(geometry_line)
geometry_line = mlines.Line2D((0, node_point.x, anchor2.x),
(0, node_point.y, anchor2.y),
color='black', marker='o', linewidth=3)
axes.add_line(geometry_line)
plt.annotate('P1', xy=anchor1, xytext=anchor1 + Vector.from_polar_coordinate(135, 5), horizontalalignment='right')
plt.annotate('P2', xy=anchor2, xytext=anchor2 + Vector.from_polar_coordinate(45, 5))
plt.annotate('N', xy=node_point, xytext=node_point + Vector.from_polar_coordinate(45, 5))
Tp = (node_point + weight_point + weight_point + Vector(-weight_force.x, 0)) / 3
T1 = (node_point + force_point + force_point + Vector(0, -force1.y)) / 3
T2 = (weight_point + force_point + force_point + Vector(0, force2.y)) / 3
Tf = (node_point + force_point + weight_point) / 3
plt.annotate('Tp', xy=node_point, xytext=Tp, horizontalalignment='center')
plt.annotate('T1', xy=node_point, xytext=T1, horizontalalignment='center')
plt.annotate('T2', xy=node_point, xytext=T2, horizontalalignment='center')
plt.annotate('Tf', xy=node_point, xytext=Tf, horizontalalignment='center')
plt.show()
| agpl-3.0 | -3,093,722,702,365,640,000 | 39.337838 | 114 | 0.595477 | false |
saevarom/django-startappextracontext | startappextracontext/management/commands/startappextra.py | 1 | 2456 | from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
from django.utils.importlib import import_module
from optparse import Option
# http://djangosnippets.org/snippets/1617/
class DictOption(Option):
"""
A parseopt option that let's me define a dictionary through
the commandline.
optparse example:
parser.add_option(DictOption("-p","--passwords",dest="passwords",type="string",action="dic"))
Commandline usage:
--passwords=[localhost]value,[slicehost]whatever
Commandline, if spaces are needed:
--passwords="[localhost]my Password,[slicehost]Anot erPassword"
This would be defined in the final options dictionary as another dictionary:
example 1: { 'passwords':{'localhost':'value' } }
example 2: { 'passwords':{'localhost':'my Password', 'slicehost':'Anot erPassword' } }
"""
ACTIONS = Option.ACTIONS + ("dic",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("dic",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("dic",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("dic",)
def take_action(self,action,dest,opt,value,values,parser):
if action=="dic":
vals=value.split(",")
d={}
for val in vals:
p=val.split("]")
k=p[0][1:]
v=p[1]
d[k]=v
setattr(values,dest,d)
else: Option.take_action(self, action, dest, opt, value, values, parser)
class Command(TemplateCommand):
help = ("Creates a Django app directory structure for the given app "
"name in the current directory or optionally in the given "
"directory. Additionally takes extra context and passes it to"
"the app template.")
option_list = TemplateCommand.option_list + (
DictOption('--extra-context',
dest='extra_context', help='Extra context in dictionary form. Example:'
' --extra-context=[key]value,[key2]value2'
'Use double quotes around argument if spaces are needed.',
type='string', action='dic'),
)
def handle(self, app_name=None, target=None, **options):
extra_context = options.pop('extra_context', None)
if extra_context != None:
options.update(extra_context)
super(Command, self).handle('app', app_name, target, **options)
| mit | -6,838,444,731,547,849,000 | 37.984127 | 97 | 0.622964 | false |
lamazavr/gpio | wh1602.py | 1 | 1351 | from gpio import Gpio
from time import sleep
class Wh1602:
def __init__(self):
self.reserve_gpios()
self.rw.set_value(0)
sleep(0.05)
def __del__(self):
pass
def reserve_gpios(self):
self.rs = Gpio(2, "out")
self.rw = Gpio(3, "out")
self.e = Gpio(4, "out")
self.d = [Gpio(17, "out"), Gpio(27, "out"),
Gpio(22, "out"), Gpio(23, "out")]
def lcd_write_nibble(self, val):
for i, p in enumerate(self.d):
p.set_value(0 if (val & 1 << i) == 0 else 1)
self.e.set_value(1)
sleep(0.02)
self.e.set_value(0)
def lcd_write_data(self, data):
self.lcd_write_nibble(data >> 4)
self.lcd_write_nibble(data & 0xF)
def init_lcd(self):
self.rs.set_value(0)
sleep(0.2)
self.lcd_write_nibble(0x03)
sleep(0.05)
self.lcd_write_nibble(0x03)
sleep(0.05)
self.lcd_write_nibble(0x02)
sleep(0.02)
self.lcd_write_data(0x08)
sleep(0.02)
self.lcd_write_data(0x01)
sleep(0.02)
self.lcd_write_data(0x06)
sleep(0.02)
self.lcd_write_data(0x0D)
sleep(0.02)
self.rs.set_value(1)
def lcd_write_string(self, str):
for s in str:
self.lcd_write_data(s)
| mit | 1,486,276,355,248,256,500 | 21.147541 | 56 | 0.511473 | false |
wgwoods/fedup2 | fedup2/plymouth.py | 1 | 1182 | from subprocess import call
__all__ = [
'PlymouthOutput','message','progress','set_mode','ping'
]
PLYMOUTH = '/usr/bin/plymouth'
def message(msg):
return call([PLYMOUTH, "display-message", "--text", msg]) == 0
def progress(percent):
return call([PLYMOUTH, "system-update", "--progress", str(percent)]) == 0
def set_mode(mode):
return call([PLYMOUTH, "change-mode", "--"+mode]) == 0
def ping():
return call([PLYMOUTH, "--ping"]) == 0
class _PlymouthOutput(object):
def __init__(self):
self.msg = ""
self.mode = ""
self.percent = -1
self.alive = ping()
def ping(self):
self.alive = ping()
return self.alive
def message(self, msg):
if msg != self.msg:
self.alive = message(msg)
self.msg = msg
def set_mode(self, mode):
if mode != self.mode:
self.alive = set_mode(mode)
self.mode = mode
def progress(self, percent):
if percent != self.percent:
self.alive = progress(percent)
self.percent = percent
_PlymouthSingleton = _PlymouthOutput()
def PlymouthOutput():
return _PlymouthSingleton
| gpl-2.0 | -5,460,564,796,527,295,000 | 22.64 | 77 | 0.573604 | false |
iris-edu/ispaq | ispaq/crossCorrelation_metrics.py | 1 | 15971 | """
ISPAQ Business Logic for Cross-Correlation Metrics.
:copyright:
Mazama Science
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
# output is polarity_check metric
from __future__ import (absolute_import, division, print_function)
import math
import numpy as np
import pandas as pd
import obspy
from obspy import UTCDateTime
from obspy import geodetics
from obspy import taup
from obspy.taup import TauPyModel
model = TauPyModel(model="iasp91")
from .concierge import NoAvailableDataError
from . import utils
from . import irisseismic
from . import irismustangmetrics
def crossCorrelation_metrics(concierge):
"""
Generate *crossCorrelation* metrics.
:type concierge: :class:`~ispaq.concierge.Concierge`
:param concierge: Data access expiditer.
:rtype: pandas dataframe
:return: Dataframe of simple metrics.
.. rubric:: Example
TODO: doctest examples
"""
# Get the logger from the concierge
logger = concierge.logger
# Default parameters from IRISMustangUtils::generateMetrics_crossCorrelation or crossCorrelationMetrics_exec.R
channelFilter = "BH[0-9ENZRT]|CH[0-9ENZRT]|DH[0-9ENZRT]|FH[0-9ENZRT]|HH[0-9ENZRT]|LH[0-9ENZRT]|MH[0-9ENZRT]|BX[12Z]|HX[12Z]"
logger.debug("channelFilter %s" % channelFilter)
minmag = 6.5
eventMinradius = 15
eventMaxradius = 90
snclMinradius = 0
snclMaxradius = 15
windowSecs = 600
maxLagSecs = 10
# Sanity check for metadata
if concierge.station_url is None:
logger.warning('No station metadata found for crossCorrelation metrics')
return None
# Get the seismic events in this time period
events = concierge.get_event(minmag=minmag)
# Sanity check
if events is None or events.shape[0] == 0:
logger.info('No events found for crossCorrelation metrics.')
return None
# Container for all of the metrics dataframes generated
dataframes = []
#############################################################
## Loop through each event.
#############################################################
logger.info('Calculating crossCorrelation metrics for %d events' % events.shape[0])
for (index, event) in events.iterrows():
logger.info('%03d Magnitude %3.1f event: %s %s' % (int(index), event.magnitude, event.eventLocationName, event.time.strftime("%Y-%m-%dT%H:%M:%S")))
# Sanity check
if pd.isnull(event.latitude) or pd.isnull(event.longitude):
logger.info('Skipping event because of missing longitude or latitude')
continue
# Sanity check
if pd.isnull(event.depth):
logger.info('Skipping event because of missing depth')
continue
# Get the data availability around this event
# NOTE: Get availability from 2 minutes before event until 28 minutes after
# Get the data availability using spatial search parameters
halfHourStart = event.time - 60 * 2
halfHourEnd = event.time + 60 * 28
logger.debug("Looking for metadata from %s to %s" % (halfHourStart,halfHourEnd))
try:
availability = concierge.get_availability(starttime=halfHourStart, endtime=halfHourEnd,
longitude=event.longitude, latitude=event.latitude,
minradius=eventMinradius, maxradius=eventMaxradius)
except NoAvailableDataError as e:
logger.info('Skipping event with no available data')
continue
except Exception as e:
logger.warning('Skipping event %s %s because concierge.get_availability failed: %s' % (event.magnitude, event.eventLocationName, e))
continue
if availability is None:
logger.info("Skipping event with no available data")
continue
# Apply the channelFilter
availability = availability[availability.channel.str.contains(channelFilter)]
# ----- All available SNCLs -------------------------------------------------
# function metadata dictionary
function_metadata = concierge.function_by_logic['crossCorrelation']
# Loop over rows of the availability dataframe
for (index, av1) in availability.iterrows():
if math.isnan(av1.latitude) or math.isnan(av1.longitude):
logger.info("No metadata for " + av1.snclId + ": skipping")
continue
snclId = av1.snclId
logger.debug('Working on %s' % (snclId))
# Get data in a window centered on the event's arrival at station #1
dist = obspy.geodetics.base.locations2degrees(event.latitude, event.longitude, av1.latitude, av1.longitude)
arrivals = model.get_travel_times(source_depth_in_km=event.depth,distance_in_degree=dist)
tt=min(arrivals,key=lambda x: x.time).time
windowStart = event.time + tt - windowSecs/2.0
windowEnd = event.time + tt + windowSecs/2.0
logger.debug("Looking for data for %s from %s to %s" % (av1.snclId, windowStart, windowEnd))
try:
r_stream1 = concierge.get_dataselect(av1.network, av1.station, av1.location, av1.channel, windowStart, windowEnd)
except Exception as e:
if str(e).lower().find('no data') > -1:
logger.info('No data available for %s' % (av1.snclId))
elif str(e).lower().find('multiple epochs') :
logger.info('Skipping %s because multiple metadata epochs found' % (av1.snclId))
else:
logger.warning('No data available for %s from %s: %s' % (av1.snclId, concierge.dataselect_url, e))
continue
# No metric calculation possible if SNCL has more than one trace
if len(utils.get_slot(r_stream1, 'traces')) > 1 :
logger.info('Skipping %s because it has gaps' % (av1.snclId))
continue
# If metadata indicates reversed polarity (dip>0), invert the amplitudes
if av1.channel[2] == 'Z' and av1.dip > 0:
r_stream1 = irisseismic.multiplyBy(r_stream1, -1.0)
# ----- Now query again to find ANY SNCL near the SNCL of interest ---------
# Create the regex for channel matching - must be same channel type
sncl1ch1 = snclId.split('.')[-1][0]
sncl1ch2 = snclId.split('.')[-1][1]
channelString = "%s%s?" % (sncl1ch1,sncl1ch2)
logger.debug("Looking for metadata for %s to %s within radius %s-%s degrees" % (halfHourStart, halfHourEnd, snclMinradius, snclMaxradius))
# Get the data availability using spatial search parameters
try:
availability2 = concierge.get_availability(network='*', station='*', location='*', channel=channelString,
starttime=halfHourStart, endtime=halfHourEnd,
longitude=av1.longitude, latitude=av1.latitude,
minradius=snclMinradius, maxradius=snclMaxradius)
except Exception as e:
logger.warning('Skipping %s because get_availability failed for nearby stations: %s' % (av1.snclId, e))
continue
if availability2 is None:
logger.info("Skipping %s with no available stations" % (av1.snclId))
continue
# Sanity check that some SNCLs exist
if availability2.shape[0] == 0:
logger.info('Skipping %s with no available stations' % (av1.snclId))
continue
# Not this station
stationMask = availability2.station != av1.station
availability2 = availability2[stationMask].reset_index()
logger.debug('Found %d nearby SNCLs' % (availability2.shape[0]))
# Create masks to find any other SNCLs against which we want to cross-correlate
# We only want to include those sncls that have sample rate information
metaMask = availability2.samplerate.isnull().values
metaMask = metaMask == False
availability2 = availability2[metaMask].reset_index()
# Sample rate compatibility, sample rates must be multiples of each other (assumes sample rate >= 1, pracma::rem requires integer values)
# FutureWarning: in the future, np.full(3, 40) will return an array of dtype('int64')
a = availability2.samplerate.apply(lambda x: int(x))
b = pd.Series(np.full(len(a),int(av1.samplerate)))
sampleRateMask = (a >= np.ones(len(a))) & ( (a % b == 0) | (b % a == 0) )
# Channel compatibility
if av1.channel[2] == 'Z':
# For Z channels, any matching channel is compatible
channelMask = availability2.channel == av1.channel
else:
# For horizontal channels, find all non-Z channel with an azimuth within 5 degrees of av1
ch = av1.channel[0:2]
chMask = availability2.channel.str.contains(ch)
nonZMask = -availability2.channel.str.contains('Z')
azimuthAngle = abs(av1.azimuth - availability2.azimuth) * math.pi/180.0
maxAzimuthAngle = 5.0 * math.pi/180.0
azimuthMask = azimuthAngle.apply(math.cos) >= math.cos(maxAzimuthAngle)
channelMask = chMask & nonZMask & azimuthMask
# Bitwise AND to get the final mask
mask = channelMask & sampleRateMask
if not any(mask):
logger.info('Skipping %s with no compatible stations' % (av1.snclId))
continue
else:
avCompatible = availability2[mask].reset_index(drop=True)
# To find the closest SNCL -- order rows by distance and take the first row
#avCompatible['dist'] = pd.Series(irisseismic.surfaceDistance(av1.latitude, av1.longitude, avCompatible.latitude, avCompatible.longitude))
dist2 = pd.Series()
for i in range(0,avCompatible.shape[0]):
dist2.set_value(i,value=obspy.geodetics.base.locations2degrees(av1.latitude, av1.longitude,avCompatible.latitude.iloc[i],avCompatible.longitude.iloc[i]))
avCompatible['dist'] = dist2
avCompatible = avCompatible.sort_values('dist', ascending=True)
# ----- Compatible SNCLs found. Find the closest one with data ------------
for (index2, av2) in avCompatible.iterrows():
if math.isnan(av2.latitude) or math.isnan(av2.longitude):
logger.debug("No metadata for " + av2.snclId + ": skipping")
continue
lastsncl = avCompatible.snclId[-1:].to_string(index=False)
testx = 0
r_stream2 = None
# Get data in a window centered on the event's arrival at station #2
try:
tt = irisseismic.getTraveltime(event.latitude, event.longitude, event.depth,
av2.latitude, av2.longitude)
except Exception as e:
logger.warning('Skipping %s:%s because getTravelTime failed: %s' % (av1.snclId, av2.snclId, e))
if av2.snclId is lastsncl:
testx = 1
continue
windowStart2 = event.time + min(tt.travelTime) - windowSecs/2.0
windowEnd2 = event.time + min(tt.travelTime) + windowSecs/2.0
logger.debug("Looking for near neighbor station %s from %s to %s" % (av2.snclId, windowStart, windowEnd))
try:
r_stream2 = concierge.get_dataselect(av2.network, av2.station, av2.location, av2.channel, windowStart2, windowEnd2)
except Exception as e:
if str(e).lower().find('no data') > -1:
logger.debug('No data available for %s' % (av2.snclId))
elif str(e).lower().find('multiple epochs'):
logger.info('Skipping %s because multiple metadata epochs are found' % (av2.snclId))
else:
logger.warning('No data available for %s from %s: %s' % (av2.snclId, concierge.dataselect_url, e))
if av2.snclId is lastsncl:
testx = 1
continue
# Check for actual sample rate compatibility
sampler1 = utils.get_slot(r_stream1,'sampling_rate')
sampler2 = utils.get_slot(r_stream2,'sampling_rate')
if sampler1 >= 1 and sampler2 >= 1:
sr1 = int(round(sampler1,1))
sr2 = int(round(sampler2,1))
if (sr1 % sr2 != 0 ) and (sr2 % sr1 != 0):
logger.debug('Skipping %s:%s because actual sample rates are not compatible, %s:%s' % (av1.snclId, av2.snclId, sr1, sr2))
if av2.snclId == lastsncl:
testx = 1
continue
# NOTE: This check is missing from IRISMustangUtils/R/generateMetrics_crossCorrelation.R
# No metric calculation possible if SNCL has more than one trace
if len(utils.get_slot(r_stream2, 'traces')) > 1:
logger.debug('Skipping %s because it has gaps' % (av2.snclId))
if av2.snclId is lastsncl:
testx = 1
continue
else:
# Found everything we need so end the loop
break
# ----- Second SNCL found. Now on to calculate cross-correlation ----------
# if last avCompatible snclid doesn't pass checks it will end up here.
if testx == 1:
logger.info('Skipping %s because no compatible stations found' % (av1.snclId))
continue
# Calculate the cross-correlation metrics and append them to the list
if not r_stream2 == None:
logger.info('%03d Calculating polarityCheck metrics for %s:%s' % (index, av1.snclId, av2.snclId))
try:
df = irismustangmetrics.apply_correlation_metric(r_stream1, r_stream2, 'crossCorrelation', maxLagSecs)
dataframes.append(df)
except Exception as e:
logger.warning('"polarityCheck" metric calculation failed for %s:%s: %s' % (av1.snclId, av2.snclId, e))
# END of SNCL loop
# END of event loop
# Concatenate and filter dataframes before returning -----------------------
# Create a boolean mask for filtering the dataframe
def valid_metric(x):
return x in concierge.metric_names
if len(dataframes) == 0:
logger.warning('"cross_correlation" metric calculation generated zero metrics')
return None
else:
result = pd.concat(dataframes, ignore_index=True)
mask = result.metricName.apply(valid_metric)
result = result[(mask)]
result.reset_index(drop=True, inplace=True)
return(result)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| lgpl-3.0 | -6,340,471,296,524,402,000 | 43.736695 | 173 | 0.568906 | false |
cloud-engineering/xfc-email-notifier | config.py | 1 | 1460 | class launcher(object):
LOOP_INTERVAL = 10.00 # (seconds)
# Controls how often the Launcher checks for terminated sub-processes
# to restart them.
RUN_DELAY = 1.00 # (seconds)
# A delay inserted into the program after a sub-process has been
# started. This prevents servers and modules from starting at the same
# time.
MODULES = [
#{'args': ['./data_acquisition.py'], 'skip': False},
{'args': ['./main.py'], 'skip': False}]
# A list of module configurations. The Launcher will start each of the
# listed modules as a separate sub-process. `MODULE` directive supports
class general(object):
MAIN_LOOP_TIME = 5.00 # (seconds)
# configures how often the mail check service looks for new emails.
LOG_LEVEL = 20
# Messages with log level lower than the configured level will be
# suppressed. The relevant log levels are:
# 50 - CRITICAL/FATAL
# 40 - ERROR
# 30 - WARNING
# 20 - INFO
# 10 - DEBUG
class db(object):
''' Configures the database interface (SQLAlchemy). '''
DEBUG = False
# Enables/disables the database debugging messages. Should not be left
# enabled unless required as it slows down the database interface.
PATH = 'db.sqlite'
# Name of the SQLite database file to use, relative to the application
# directory.
| mit | 6,143,968,750,684,580,000 | 36.435897 | 79 | 0.615068 | false |
Hardtack/TypeConverter | typeconverter.py | 1 | 3756 | # -*- encoding: utf-8 -*-
""":mod:`typeconverter`
~~~~~~~~~~~~~~~~~~~~~~~~
Converts object into specified type.
"""
from __future__ import unicode_literals
from functools import wraps
__version__ = '0.1.0'
__license__ = 'MIT License'
__author__ = 'Geonu Choi'
__email__ = '[email protected]'
class Handler(object):
"""Converting handler base.
"""
def __init__(self, fn, domain=()):
super(Handler, self).__init__()
self.fn = fn
self.domain = domain
self.handlable = self.default_handlable
def matching_type(self, obj):
"""Returns matching type in `domain` if exists, or :const:`None`
:rtype: type
"""
for t in self.domain:
if isinstance(obj, t):
return t
return None
def default_handlable(self, obj):
"""Default handlability checker. Just check type of instance.
:rtype: bool
"""
if self.matching_type(obj) is None:
return False
return True
def __call__(self, obj):
return self.fn(obj)
def check_handlable(self, fn):
"""Decorator for function that indicates the handler can handle object.
"""
self.handlable = fn
return fn
def can_handle(self, obj):
return self.handlable(obj)
def _default_handler(self, obj):
"""Default convert handler.
It just raises :class:`TypeError`
"""
raise TypeError('Cannot convert object of {0}'.format(type(obj)))
class Converter(object):
"""Converts object into specified types."""
def __init__(self, range):
super(Converter, self).__init__()
if isinstance(range, type):
range = [range]
self.range = range
self.handlers = []
self.default_handler = _default_handler
def assert_type(self, obj):
"""Asserts if type of `obj` is in range of the converter."""
for t in self.range:
if isinstance(obj, t):
return
assert False, "{0!r} is not in range".format(obj)
def inrange(self, obj):
"""Checks if `obj` is in range of the conveter.
:rtype: bool
"""
try:
self.assert_type(obj)
except AssertionError:
return False
return True
def add_handler(self, handler):
self.handlers.append(handler)
def handle(self, *types):
"""Decorator for function that converts type.
"""
def decorator(fn):
handler = Handler(fn, types)
wraps(fn)(handler)
self.add_handler(handler)
return handler
return decorator
def default(self, fn):
"""Decorator that changes default handler."""
self.default_handler = fn
return fn
def find_handler(self, obj):
"""Finds best matching handler.
Returns handler whose matching type is most-bottom subclass of class
hierarchy tree.
"""
candidates = [handler for handler in
self.handlers if
handler.can_handle(obj)]
if not candidates:
return None
best = candidates.pop(0)
while candidates:
handler = candidates.pop(0)
best_type = best.matching_type(obj)
t = handler.matching_type(obj)
if issubclass(t, best_type):
best = handler
return best
def convert(self, obj):
"""Convert `obj` until it is in `range`."""
while not self.inrange(obj):
handler = self.find_handler(obj)
if handler is None:
handler = self.default_handler
obj = handler(obj)
return obj
| mit | 3,227,461,768,443,383,300 | 24.208054 | 79 | 0.552183 | false |
ghoshbishakh/filedrop | tests/tcpchat.py | 1 | 1725 | #! /usr/bin/python
import sys,socket
from threading import Thread
PORT=50607
USAGE="\n \t usage: tcpchat.py server|client <ip address of server>"
if len(sys.argv)==1:
print USAGE
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def ClientRecv(sock):
while True:
data = sock.recv(1024)
if not data: sys.exit(0)
if str(data)=="stop":
sys.exit(0)
print data, "\n"
def ClientSend(sock):
while 1:
message = raw_input(">>>")
str(message)
sock.sendall(message)
print "\n \t Welcome to TCP chat"
if sys.argv[1]=="server":
if len(sys.argv)<3:
print "\n \t Please specify your IP address"
print USAGE
else:
HOST=sys.argv[2]
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(1)
while True:
SocketAddr=s.getsockname()
print "listening at ", SocketAddr
client, ClientAddr = s.accept()
print "\n Accepted connection from", ClientAddr
print "\n Connected is establishde between ", client.getsockname(), "and", client.getpeername()
message = client.recv(16)
print "Client's 16 bit message is", repr(message)
client.sendall("\nClosing Connection")
message = client.recv(16)
print "Client's 16 bit message is", repr(message)
client.close()
print "Socket Closed"
elif sys.argv[1]=="client":
if len(sys.argv)<3:
print "\n \t Please specify your IP address"
print USAGE
else:
HOST=sys.argv[2]
s.connect((HOST,PORT))
print "\n Connected"
ClientAddr=s.getsockname()
print "\nclient has been assigned the address ", ClientAddr
Thread(target=ClientRecv,args=(s,)).start()
ClientSend(s)
Thread(target=ClientRecv,args=(s,)).stop()
else:
print USAGE | gpl-2.0 | 1,582,692,535,708,819,500 | 23.657143 | 99 | 0.666667 | false |
tofler/toflerdb | toflerdb/apiserver/handlers/uploadfactshandler.py | 1 | 1296 | import json
import traceback
# from toflerdb.utils.common import Common
from toflerdb.core import api as gcc_api
from toflerdb.utils import exceptions
from basehandler import BaseHandler
class UploadFactsHandler(BaseHandler):
def post(self):
request_body = self.request.body
if request_body is None:
return []
response = []
try:
request_body = json.loads(request_body)
except:
print "Error processing request"
response = []
fact_tuples = request_body.get('fact_tuples', None)
file_text = request_body.get('file_text', None)
ignore_duplicate = request_body.get('ignore_duplicate', True)
author = request_body.get('author', None)
try:
response = gcc_api.insert_facts(
fact_tuples=fact_tuples, file_text=file_text, author=author,
ignore_duplicate=ignore_duplicate)
self.apiwrite(response)
except exceptions.ToflerDBException, e:
print traceback.format_exc()
self.apiwrite(str(e), status=False)
except Exception, e:
print traceback.format_exc()
# Common.get_logger().error(str(e))
self.apiwrite('Something went wrong', status=False)
| agpl-3.0 | -4,751,077,594,835,138,000 | 35 | 76 | 0.618056 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.