repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
rocktavious/DevToolsLib | DTL/maya/vertexColorUtils.py | 1 | 8982 | import os, sys, traceback
import maya.cmds as cmds
from functools import partial
#Needs refactoring
from ..utils.funcs import selection
from DTL.api import Safe
"""
#------------------------------------------------------------
def buildChannelMatrixFromUI():
'''Helper Function to build the channel matrix from the UI'''
channelMatrix = []
redMix = (cmds.floatField('CM_red_red',q=1,v=1),cmds.floatField('CM_red_green',q=1,v=1),cmds.floatField('CM_red_blue',q=1,v=1),cmds.floatField('CM_red_alpha',q=1,v=1))
greenMix = (cmds.floatField('CM_green_red',q=1,v=1),cmds.floatField('CM_green_green',q=1,v=1),cmds.floatField('CM_green_blue',q=1,v=1),cmds.floatField('CM_green_alpha',q=1,v=1))
blueMix = (cmds.floatField('CM_blue_red',q=1,v=1),cmds.floatField('CM_blue_green',q=1,v=1),cmds.floatField('CM_blue_blue',q=1,v=1),cmds.floatField('CM_blue_alpha',q=1,v=1))
alphaMix = (cmds.floatField('CM_alpha_red',q=1,v=1),cmds.floatField('CM_alpha_green',q=1,v=1),cmds.floatField('CM_alpha_blue',q=1,v=1),cmds.floatField('CM_alpha_alpha',q=1,v=1))
channelMatrix = [redMix,greenMix,blueMix,alphaMix]
return channelMatrix
"""
#------------------------------------------------------------
def vertColorAction(action='apply',rgba=[1,1,1,1],channelMatrix=[],blendMix=None,sel=None):
'''Wrapper Function to aid in vertex color actions - handles selection data for you to get around memory leak'''
#cmds.progressWindow( title='Coloring Verts',progress=0, status='Processing:',isInterruptable=False )
cmds.undoInfo(openChunk=True)
if sel == None :
sel = selection()
try:
for obj in sel.selection.keys():
vertDict = sel.selection[obj][5]
cmds.polyOptions(obj, cs=1, cm='none')
progressCount = 1
#Added the plus one so the dialogue to the user never reaches full - its a perception thing
#cmds.progressWindow(edit=True,max=len(vertDict.keys())+1)
for colorKey, vertFaceList in vertDict.items():
#cmds.progressWindow( edit=True, progress=progressCount, status=('Processing - ' + str(len(vertFaceList)) + ' - Vertex Faces'))
if action == 'apply':
vertexColorApply(vertFaceList,rgba[0],rgba[1],rgba[2],rgba[3])
if action == 'add':
vertexColorAdd(vertFaceList,colorKey,rgba[0],rgba[1],rgba[2])
if action == 'tint':
vertexColorTint(vertFaceList,colorKey,rgba[0],rgba[1],rgba[2],rgba[3])
if action == 'gamma':
vertexColorGamma(vertFaceList,colorKey,rgba[0],rgba[1],rgba[2])
if action == 'blend':
if blendMix == None:
blendMix = cmds.floatSliderGrp('blendMixSlider',q=1,v=1)
vertexColorBlend(vertFaceList,colorKey,rgba[0],rgba[1],rgba[2],blendMix)
if action == 'average':
vertexColorAvg(vertFaceList,colorKey,vertDict.keys())
if action == 'channel':
vertexColorChannelMix(vertFaceList,colorKey,channelMatrix)
if action == 'channelAlpha':
vertexColorChannelMixAlpha(vertFaceList,colorKey,channelMatrix)
progressCount = progressCount + 1
cmds.delete(obj,ch=1)
except Exception:
traceback.print_exc()
finally:
cmds.undoInfo(closeChunk=True)
#cmds.progressWindow(endProgress=1)
#------------------------------------------------------------
@Safe
def vertexColorApply(vertList=None, red=1, green=1, blue=1, alpha=1 ):
'''Straight Color/Alpha Apply'''
if vertList == None or vertList == []:
return
bufferSize = 2000
for begin in xrange(0, len(vertList), bufferSize):
vertBatch = vertList[begin: begin+bufferSize]
cmds.polyColorPerVertex(vertBatch, r=red, g=green, b=blue, a=alpha)
#------------------------------------------------------------
def vertexColorAdd(vertList=None, currentRGBA=None, red=0, green=0, blue=0 ):
'''Add New Color to Current Color - Alpha Excluded'''
if currentRGBA == None:
return
newR = currentRGBA[0] + red
newG = currentRGBA[1] + green
newB = currentRGBA[2] + blue
vertexColorApply(vertList,newR,newG,newB,currentRGBA[3])
#------------------------------------------------------------
def vertexColorTint(vertList=None, currentRGBA=None, red=1, green=1, blue=1, alpha=1 ):
'''Multiply New Color to Current Color - Alpha Included'''
if currentRGBA == None:
return
newR = currentRGBA[0]*red
newG = currentRGBA[1]*green
newB = currentRGBA[2]*blue
newA = currentRGBA[3]*alpha
vertexColorApply(vertList,newR,newG,newB,newA)
#------------------------------------------------------------
def vertexColorGamma(vertList=None, currentRGBA=None, red=2, green=2, blue=2 ):
'''Multiply New Color Exponetionally to Current Color - Alpha Excluded'''
if currentRGBA == None:
return
newR = currentRGBA[0] ** red
newG = currentRGBA[1] ** green
newB = currentRGBA[2] ** blue
vertexColorApply(vertList,newR,newG,newB,currentRGBA[3])
#------------------------------------------------------------
def vertexColorBlend(vertList=None, currentRGBA=None, red=1, green=1, blue=1, mix=0.5 ):
'''Blend New Color with Current Color - Alpha Excluded'''
if currentRGBA == None:
return
newR = currentRGBA[0]*(1-mix) + red*mix
newG = currentRGBA[1]*(1-mix) + green*mix
newB = currentRGBA[2]*(1-mix) + blue*mix
vertexColorApply(vertList,newR,newG,newB,currentRGBA[3])
#------------------------------------------------------------
def vertexColorAvg(vertList=None, currentRGBA=None, colorKeyList=None):
'''Average the Color of the vert list based on the entire obj - Alpha Excluded'''
if currentRGBA == None:
return
if colorKeyList == None:
return
vertColorAvg = [0,0,0]
for colorKey in colorKeyList:
vertColorAvg[0] += colorKey[0]
vertColorAvg[1] += colorKey[1]
vertColorAvg[2] += colorKey[2]
colorKeyCount = len(colorKeyList)
newR = vertColorAvg[0]/colorKeyCount
newG = vertColorAvg[1]/colorKeyCount
newB = vertColorAvg[2]/colorKeyCount
vertexColorApply(vertList,newR,newG,newB,currentRGBA[3])
#------------------------------------------------------------
def vertexColorChannelMix(vertList=None, currentRGBA=None, channelMatrix=[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]):
'''Channel Mixes Current Color - Alpha Excluded'''
if currentRGBA == None:
return
try:
redMix, greenMix, blueMix, alphaMix = channelMatrix
except:
raise Exception("Unable to unpack channelMatrix")
if len(redMix) != 4:
raise Exception("Must pass a 4-tuple as redMix")
if len(greenMix) != 4:
raise Exception("Must pass a 4-tuple as greenMix")
if len(blueMix) != 4:
raise Exception("Must pass a 4-tuple as blueMix")
newR = currentRGBA[0]*redMix[0] + currentRGBA[1]*redMix[1] + currentRGBA[2]*redMix[2]
newG = currentRGBA[0]*greenMix[0] + currentRGBA[1]*greenMix[1] + currentRGBA[2]*greenMix[2]
newB = currentRGBA[0]*blueMix[0] + currentRGBA[1]*blueMix[1] + currentRGBA[2]*blueMix[2]
vertexColorApply(vertList,newR,newG,newB,currentRGBA[3])
#------------------------------------------------------------
def vertexColorChannelMixAlpha(vertList=None, currentRGBA=None, channelMatrix=[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]] ):
'''Channel Mixes Current Color - Alpha Included'''
if currentRGBA == None:
return
try:
redMix, greenMix, blueMix, alphaMix = channelMatrix
except:
raise Exception("Unable to unpack channelMatrix")
if len(redMix) != 4:
raise Exception("Must pass a 4-tuple as redMix")
if len(greenMix) != 4:
raise Exception("Must pass a 4-tuple as greenMix")
if len(blueMix) != 4:
raise Exception("Must pass a 4-tuple as blueMix")
if len(alphaMix) != 4:
raise Exception("Must pass a 4-tuple as alphaMix")
newR = currentRGBA[0]*redMix[0] + currentRGBA[1]*redMix[1] + currentRGBA[2]*redMix[2] + currentRGBA[3]*redMix[3]
newG = currentRGBA[0]*greenMix[0] + currentRGBA[1]*greenMix[1] + currentRGBA[2]*greenMix[2] + currentRGBA[3]*greenMix[3]
newB = currentRGBA[0]*blueMix[0] + currentRGBA[1]*blueMix[1] + currentRGBA[2]*blueMix[2] + currentRGBA[3]*blueMix[3]
newA = currentRGBA[0]*alphaMix[0] + currentRGBA[1]*alphaMix[1] + currentRGBA[2]*alphaMix[2] + currentRGBA[3]*alphaMix[3]
vertexColorApply(vertList,newR,newG,newB,newA)
#------------------------------------------------------------
def toggleVertColor():
'''Util for toggling the vertex color per obj selected'''
sel = selection()
for obj in sel.selection.keys():
cmds.polyOptions(obj,cs=1-cmds.polyOptions(obj,q=1,cs=1)[0],cm='none') | mit | 5,149,193,529,652,118,000 | 42.606796 | 181 | 0.599978 | false |
MapofLife/MOL | earthengine/google-api-python-client/samples/oauth2/dailymotion/main.py | 1 | 3069 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = '[email protected] (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.client import OAuth2WebServerFlow
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.util import login_required
FLOW = OAuth2WebServerFlow(
client_id='2ad565600216d25d9cde',
client_secret='03b56df2949a520be6049ff98b89813f17b467dc',
scope='read',
user_agent='oauth2client-sample/1.0',
auth_uri='https://api.dailymotion.com/oauth/authorize',
token_uri='https://api.dailymotion.com/oauth/token'
)
class Credentials(db.Model):
credentials = CredentialsProperty()
class MainHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
credentials = StorageByKeyName(
Credentials, user.user_id(), 'credentials').get()
if credentials is None or credentials.invalid == True:
callback = self.request.relative_url('/auth_return')
authorize_url = FLOW.step1_get_authorize_url(callback)
memcache.set(user.user_id(), pickle.dumps(FLOW))
self.redirect(authorize_url)
else:
http = httplib2.Http()
http = credentials.authorize(http)
resp, content = http.request('https://api.dailymotion.com/me')
path = os.path.join(os.path.dirname(__file__), 'welcome.html')
logout = users.create_logout_url('/')
variables = {
'content': content,
'logout': logout
}
self.response.out.write(template.render(path, variables))
class OAuthHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id()))
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
Credentials, user.user_id(), 'credentials').put(credentials)
self.redirect("/")
else:
pass
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
('/auth_return', OAuthHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| bsd-3-clause | -1,209,884,518,423,326,000 | 27.95283 | 74 | 0.706419 | false |
danakj/chromium | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py | 1 | 11415 | # Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for printing.py."""
import StringIO
import optparse
import sys
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system import logtesting
from webkitpy.layout_tests import port
from webkitpy.layout_tests.controllers import manager
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.views import printing
def get_options(args):
print_options = printing.print_options()
option_parser = optparse.OptionParser(option_list=print_options)
return option_parser.parse_args(args)
class TestUtilityFunctions(unittest.TestCase):
def test_print_options(self):
options, _ = get_options([])
self.assertIsNotNone(options)
class FakeRunResults(object):
def __init__(self, total=1, expected=1, unexpected=0, fake_results=None):
fake_results = fake_results or []
self.total = total
self.expected = expected
self.expected_failures = 0
self.unexpected = unexpected
self.expected_skips = 0
self.results_by_name = {}
total_run_time = 0
for result in fake_results:
self.results_by_name[result.shard_name] = result
total_run_time += result.total_run_time
self.run_time = total_run_time + 1
class FakeShard(object):
def __init__(self, shard_name, total_run_time):
self.shard_name = shard_name
self.total_run_time = total_run_time
class Testprinter(unittest.TestCase):
def assertEmpty(self, stream):
self.assertFalse(stream.getvalue())
def assertNotEmpty(self, stream):
self.assertTrue(stream.getvalue())
def assertWritten(self, stream, contents):
self.assertEqual(stream.buflist, contents)
def reset(self, stream):
stream.buflist = []
stream.buf = ''
def get_printer(self, args=None):
args = args or []
printing_options = printing.print_options()
option_parser = optparse.OptionParser(option_list=printing_options)
options, args = option_parser.parse_args(args)
host = MockHost()
self._port = host.port_factory.get('test', options)
regular_output = StringIO.StringIO()
printer = printing.Printer(self._port, options, regular_output)
return printer, regular_output
def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0):
failures = []
if result_type == test_expectations.TIMEOUT:
failures = [test_failures.FailureTimeout()]
elif result_type == test_expectations.CRASH:
failures = [test_failures.FailureCrash()]
return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
def test_configure_and_cleanup(self):
# This test verifies that calling cleanup repeatedly and deleting
# the object is safe.
printer, _ = self.get_printer()
printer.cleanup()
printer.cleanup()
printer = None
def test_print_config(self):
printer, err = self.get_printer()
# FIXME: it's lame that i have to set these options directly.
printer._options.pixel_tests = True
printer._options.new_baseline = True
printer._options.time_out_ms = 6000
printer._options.slow_time_out_ms = 12000
printer.print_config('/tmp')
self.assertIn("Using port 'test-mac-mac10.10'", err.getvalue())
self.assertIn('Test configuration: <mac10.10, x86, release>', err.getvalue())
self.assertIn('View the test results at file:///tmp', err.getvalue())
self.assertIn('View the archived results dashboard at file:///tmp', err.getvalue())
self.assertIn('Baseline search path: test-mac-mac10.10 -> test-mac-mac10.11 -> generic', err.getvalue())
self.assertIn('Using Release build', err.getvalue())
self.assertIn('Pixel tests enabled', err.getvalue())
self.assertIn('Command line:', err.getvalue())
self.assertIn('Regular timeout: ', err.getvalue())
self.reset(err)
printer._options.quiet = True
printer.print_config('/tmp')
self.assertNotIn('Baseline search path: test-mac-mac10.10 -> test-mac-mac10.11 -> generic', err.getvalue())
def test_print_directory_timings(self):
printer, err = self.get_printer()
printer._options.debug_rwt_logging = True
run_results = FakeRunResults()
run_results.results_by_name = {
"slowShard": FakeShard("slowShard", 16),
"borderlineShard": FakeShard("borderlineShard", 15),
"fastShard": FakeShard("fastShard", 1),
}
printer._print_directory_timings(run_results)
self.assertWritten(err, ['Time to process slowest subdirectories:\n',
' slowShard took 16.0 seconds to run 1 tests.\n', '\n'])
printer, err = self.get_printer()
printer._options.debug_rwt_logging = True
run_results.results_by_name = {
"borderlineShard": FakeShard("borderlineShard", 15),
"fastShard": FakeShard("fastShard", 1),
}
printer._print_directory_timings(run_results)
self.assertWritten(err, [])
def test_print_one_line_summary(self):
def run_test(total, exp, unexp, shards, result):
printer, err = self.get_printer(['--timing'] if shards else None)
fake_results = FakeRunResults(total, exp, unexp, shards)
total_time = fake_results.run_time + 1
printer._print_one_line_summary(total_time, fake_results)
self.assertWritten(err, result)
# Without times:
run_test(1, 1, 0, [], ["The test ran as expected.\n", "\n"])
run_test(2, 1, 1, [], ["\n", "1 test ran as expected, 1 didn't:\n", "\n"])
run_test(3, 2, 1, [], ["\n", "2 tests ran as expected, 1 didn't:\n", "\n"])
run_test(3, 2, 0, [], ["\n", "2 tests ran as expected (1 didn't run).\n", "\n"])
# With times:
fake_shards = [FakeShard("foo", 1), FakeShard("bar", 2)]
run_test(1, 1, 0, fake_shards, ["The test ran as expected in 5.00s (2.00s in rwt, 1x).\n", "\n"])
run_test(2, 1, 1, fake_shards, ["\n", "1 test ran as expected, 1 didn't in 5.00s (2.00s in rwt, 1x):\n", "\n"])
run_test(3, 2, 1, fake_shards, ["\n", "2 tests ran as expected, 1 didn't in 5.00s (2.00s in rwt, 1x):\n", "\n"])
run_test(3, 2, 0, fake_shards, ["\n", "2 tests ran as expected (1 didn't run) in 5.00s (2.00s in rwt, 1x).\n", "\n"])
def test_test_status_line(self):
printer, _ = self.get_printer()
printer._meter.number_of_columns = lambda: 80
actual = printer._test_status_line(
'fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(80, len(actual))
self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associa...after-index-assertion-fail1.html passed')
printer._meter.number_of_columns = lambda: 89
actual = printer._test_status_line(
'fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(89, len(actual))
self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associated-...ents-after-index-assertion-fail1.html passed')
printer._meter.number_of_columns = lambda: sys.maxsize
actual = printer._test_status_line(
'fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(90, len(actual))
self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html passed')
printer._meter.number_of_columns = lambda: 18
actual = printer._test_status_line(
'fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(18, len(actual))
self.assertEqual(actual, '[0/0] f...l passed')
printer._meter.number_of_columns = lambda: 10
actual = printer._test_status_line(
'fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(actual, '[0/0] associated-elements-after-index-assertion-fail1.html passed')
def test_details(self):
printer, err = self.get_printer(['--details'])
result = self.get_result('passes/image.html')
printer.print_started_test('passes/image.html')
printer.print_finished_test(result, expected=False, exp_str='', got_str='')
self.assertNotEmpty(err)
def test_print_found(self):
printer, err = self.get_printer()
printer.print_found(100, 10, 1, 1)
self.assertWritten(err, ["Found 100 tests; running 10, skipping 90.\n"])
self.reset(err)
printer.print_found(100, 10, 2, 3)
self.assertWritten(err, ["Found 100 tests; running 10 (6 times each: --repeat-each=2 --iterations=3), skipping 90.\n"])
def test_debug_rwt_logging_is_throttled(self):
printer, err = self.get_printer(['--debug-rwt-logging'])
result = self.get_result('passes/image.html')
printer.print_started_test('passes/image.html')
printer.print_finished_test(result, expected=True, exp_str='', got_str='')
printer.print_started_test('passes/text.html')
result = self.get_result('passes/text.html')
printer.print_finished_test(result, expected=True, exp_str='', got_str='')
# Only the first test's start should be printed.
lines = err.buflist
self.assertEqual(len(lines), 1)
self.assertTrue(lines[0].endswith('passes/image.html\n'))
| bsd-3-clause | -3,636,279,563,733,310,500 | 43.073359 | 127 | 0.657556 | false |
fmetzger/videostreaming-bufferemulation | ytdl.py | 1 | 15813 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
ytdl_refactored.py
Required python packages:
python-gdata
python-matplotlib
python-numpy
"""
import urllib2
import urllib
import os
import subprocess
import sys
import string
import re
import socket
import datetime
from datetime import datetime
import gdata.youtube
import gdata.youtube.service
print "OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO"
print "OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO"
print "OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO"
class Tools:
"""
The class Tools contains several @classmethod functions.
You can call these functions without initializing a Tools object, just as
simple as Tools.write("filename","Hello Tools").
The private helper function works like a wrapper for subprocess.Popen().
It returns the processes std out.
"""
def __init__(self):
# ?
pass
@classmethod
def chdir(self, directory):
if os.access(directory, os.F_OK) is False:
os.mkdir(directory)
os.chdir(directory)
@classmethod
def pwd(self):
return os.getcwd()
@classmethod
def __helper(self, pstring):
run = subprocess.Popen(pstring, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return run.stdout.read()
@classmethod
def traceroute(self, ip, interface=None):
if interface is not None:
return self.__helper("traceroute -i " + interface + " " + ip)
else:
return self.__helper("traceroute " + ip)
@classmethod
def lft(self, ip, opt=""):
return self.__helper("lft " + opt + " " + ip)
@classmethod
def ping(self, ip, interface=None):
if interface is not None:
return self.__helper("ping -c 10 -I " + interface + " " + ip)
else:
return self.__helper("ping -c 10 " + ip)
@classmethod
def whob(self, ip, opt=""):
return self.__helper("whob " + opt + " " + ip)
@classmethod
def mediainfo(self, mfile):
return self.__helper("mediainfo " + mfile)
@classmethod
def mplayer(self, mfile):
# remove all vstats file beforehand
filelist = os.listdir(".")
for vfile in filelist:
if "vstats_" in vfile:
os.remove(vfile)
os.system("mplayer " + mfile)
@classmethod
def curl(self, url, user_agent, interface=None):
download_start = datetime.now()
url = str(url)
user_agent = str(user_agent)
print "url is " + url
if interface is not None:
os.system("curl \"" + url + "\" " + "--interface " + interface + " --location --retry 10 --retry-delay 1 --user-agent \"" + user_agent + "\" --trace-time --trace-ascii curltrace > curlout")
else:
print "foo"
os.system("curl \"" + url + "\" --location --retry 10 --retry-delay 1 --user-agent \"" + user_agent + "\" --trace-time --trace-ascii curltrace > curlout")
print "bar"
download_end = datetime.now()
return download_end - download_start
@classmethod
def tcpdump(self, hostname, interface=None):
if interface is not None:
args = ["tcpdump", "-i", interface, "-w", "capture.log", "host", hostname]
else:
#args = ["tcpdump", "-w", "capture.log", "host", hostname]args = ["tcpdump", "-w", "capture.log", "host", hostname]
#dont filter for hostname at the moment
args = ["tcpdump", "-w", "capture.log"]
return subprocess.Popen(args)
@classmethod
def getIPAddrsList(self, hostname):
(hostnamelist, aliaslist, ipaddrslist) = socket.gethostbyname_ex(hostname)
return ipaddrslist
@classmethod
def ytBrowser(self, video_id):
write = ""
# gather data from gdata API
yt_service = gdata.youtube.service.YouTubeService()
yt_service.ssl = False
entry = yt_service.GetYouTubeVideoEntry(video_id=video_id)
write += "video_title: " + entry.media.title.text + "\n"
vc = '0'
if hasattr(entry.statistics, 'view_count'):
vc = entry.statistics.view_count
write += "video_viewcount: " + vc + "\n"
vr = 'N/A'
if hasattr(entry, 'rating') and hasattr(entry.rating, 'average'):
vr = entry.rating.average
write += "video_rating: " + vr + "\n"
write += "video_url: " + entry.media.player.url + "\n"
return write
@classmethod
def write(self, fname, write):
f_out = open(fname, 'w')
f_out.write(write)
f_out.close()
class Video:
"""
The video class represents a YouTube video.
It is created using a YouTube video ID and an user agent string.
On initialization, the Video object loads the HTML source and
saves all found URLs and hostnames in private fields.
You can call several functions on a video object,
i.e. get it's YouTube URL, it's YouTube ID, etc.
The agent argument stands for a user agent string used to request the
HTML source code of the webpage containing the YouTube video.
Some example user agent strings:
"""
def __init__(self, video_id, agent='Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27'):
self.user_agent = agent
self.vid = video_id
self.siteString = self.__getHTMLString()
self.urls = self.__getURLs()
self.hostnames = self.__getHostnames()
def getVideoID(self):
return self.vid
def getHTMLString(self):
return self.siteString
def __getHTMLString(self):
headers = { 'User-Agent':self.user_agent }
request = urllib2.Request(self.getURL(), None, headers)
response = urllib2.urlopen(request)
return response.read()
def getURL(self):
# get the full url of the video
# returns http://www.youtube.com/watch?v=<video_id>
return "http://www.youtube.com/watch?v=" + self.vid
def getURLs(self):
return self.urls
"""
def __getURLs(self):
# find all video urls
u = []
# format changed Q2/Q3 2011
strings = string.split(self.siteString, "flashvars=\"")[1]
strings = string.split(strings,"\"")[0]
# strings = string.split(strings,'\"};')[0]
strings = string.split(strings,"&")
for s in strings:
if "url_encoded_fmt_stream_map" in s: # previously was fmt_stream_map
# s = re.split(": \"\d\d|", s)[1]
s = string.split(s, "url%3D")
for rawurl in s:
if "http" in rawurl:
url = urllib.unquote(rawurl)
url = urllib2.unquote(url).replace("%3A",":").replace("%2F","/").replace("%3D","=").replace("%26","&").replace("%3F", "?").replace("%2C", ",")
url = url.rstrip(",")
print url
u.append(url)
return u
"""
def __getURLs(self):
# find all video urls
u = []
strings = string.split(self.siteString,"PLAYER_CONFIG")[1] #extract the swfConfig first
strings = string.split(strings,"});")[0]
a_strings = string.split(strings,'url=')
for i in range(len(a_strings)):
if i != 0:
index = a_strings[i].index('fallback_host')
strings = a_strings[i][0:index-6] #i-6 = das letzte \u0026 (ampersand) entfernen
url = urllib.url2pathname(strings).replace('\/','/').replace("\u0026","&")
#print i,url
u.append(url)
return u
def getHostnames(self):
return self.hostnames
def __getHostnames(self):
hostnames = []
for s in self.urls:
hostname = s.split("//")[1].split("/")[0]
hostnames.append(hostname)
return hostnames
def saveHTML(self):
Tools.write("video.html", self.siteString)
class ExperimentManager:
"""
The ExperimentManager manages everything for you.
Just give him your prefered video id and your running network interfaces
and the ExperimentManager will perform some serious measurments
containing downloading, tracerouting, media analysis 'n stuff.
Example user agent strings (captured and/or from en.wikipedia.org/ ):
user_agent = 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)'
user_agent = 'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.0) Gecko/20110214 Firefox/4.0.0'
user_agent = 'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)'
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10'
user_agent = 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27'
user_agent = 'Mozilla/5.0 (Ubuntu; X11; Linux x86_64; rv:8.0) Gecko/20100101 Firefox/8.0'
user_agent = 'Mozilla/5.0 (Windows NT 6.1; rv:8.0) Gecko/20100101 Firefox/8.0'
"""
def __init__(self, video_id, user_agent = 'Mozilla/5.0 (Windows NT 6.1; rv:8.0) Gecko/20100101 Firefox/8.0', interface=None):
self.user_agent = user_agent
self.video = Video(video_id, self.user_agent)
self.interface = interface
self.__run()
def __curlOK(self):
"""
Here happens some renaming action after a successful download.
Because we don't need every bit of information, we filter it a bit.
"""
write = ""
pattern = re.compile('^\d\d:\d\d:')
f_in = open("curltrace", 'r')
for line in f_in:
if pattern.match(line):
write += line
return write
def __run(self):
"""
The »run the experiment« function
Here happens the magic:
- get urls and hostnames from video
- measure download time
- get additional info using mediaplayer, mediainfo
- measurements using ping, traceroute, whob
perform all steps on each given networking interface
"""
urls = self.video.getURLs()
hostnames = self.video.getHostnames()
Tools.chdir(self.video.getVideoID())
path = Tools.pwd()
Tools.chdir(path)
if self.interface is not None:
print "---> using interface " + self.interface + "\n"
Tools.chdir(self.interface)
# do this for every URL
for u in urls:
print "---> using URL " + u + "\n"
#host = re.search('http\:\/\/([a-zA-Z0-9\.]*)\/',u).group(1)
host = re.search('http\:\/\/([a-zA-Z0-9\.-]*)\/',u)
if host is not None:
host = host.group(1)
print host
prefix = str(urls.index(u))
# run tcpdump
#tcpdump = Tools.tcpdump(i, host)
tcpdump = Tools.tcpdump(host, self.interface)
# download using curl
download_duration = Tools.curl(u, self.user_agent, self.interface)
# stop tcpdump again
tcpdump.terminate()
# generic log file with additional data from gdata API
write = "url : " + u + "\n"
write += "file_size: " + str(os.path.getsize("curlout")) + "\n"
write += "download_duration: " + str(float(download_duration.seconds) + float(download_duration.microseconds) / 1000000) + "\n"
write += Tools.ytBrowser(self.video.getVideoID())
Tools.write(prefix + ".log", write)
# fs = os.path.getsize("curlout")
if os.path.getsize("curlout") is not 0:
# print "---> Logfile saved"
# write downloadlog
Tools.write(prefix + ".downloadlog", self.__curlOK())
# print "---> Downloadlog saved"
# generate framelog
Tools.mplayer("curlout -lavdopts vstats -vo null -ao null -speed 10")
# assume that the vstats_* file is the one we want
filelist = os.listdir(".")
for vfile in filelist:
if "vstats_" in vfile:
os.rename(vfile, prefix + ".framelog")
if "capture.log" in vfile:
os.rename(vfile, prefix + ".dump")
# print "---> mediaplayer logfile saved"
# aks mediainfo for extended information
Tools.write(prefix + ".mediainfo", Tools.mediainfo("-f curlout"))
# print "---> mediainfo logfile saved"
# check for 302's (redirects)
# store all additional URLs and hostnames from 302's
f_302check = open(prefix + ".downloadlog",'r')
for line in f_302check:
if "== Info: Issue another request to this URL:" in line:
url302 = line.split(": \'")[1].rstrip("\'")
urls.append(url302)
hostname302 = url302.split("//")[1].split("/")[0]
hostnames.append(hostname302)
# self.video.hostnames.append(hostname302)
print "Got redirected to " + url302
print "Redirection hostname " + hostname302
# TODO: delete remnant files (curlout, curltrace)
else:
os.rename("curltrace",prefix+".downloaderrorlog")
print "Download resulted in a zero size file, check the error log for details.\n\n"
# check every hostname in hostnamelist
# run traceroute, ping, whob for every ip we find
# save results in files
for hn in hostnames:
str_traceroute = ""
str_ping = ""
str_whob = ""
prefix = str(hostnames.index(hn))
for ip in Tools.getIPAddrsList(hn):
# traceroute
str_traceroute += Tools.traceroute(ip, self.interface) + "\n\n"
# ping
str_ping += Tools.ping(ip, self.interface) + "\n\n"
# whob
str_whob += Tools.whob(ip) + "\n\n"
# lft
# Tools.lft(ip, "-D " + i))
Tools.write(prefix + ".traceroute", str_traceroute)
print str_traceroute
Tools.write(prefix + ".ping", str_ping)
print str_ping
Tools.write(prefix + ".whob", str_whob)
print str_whob
video_id = sys.argv[1]
iList = sys.argv[2:]
em = ExperimentManager(video_id, iList)
# TODO: command line options for tcpdump, modules for other streamers, default option with no given interface
| unlicense | 7,069,916,604,199,555,000 | 34.450673 | 201 | 0.537411 | false |
atheiste/django-bit-category | bitcategory/tests.py | 1 | 6753 | from __future__ import absolute_import
from django.test import TestCase
from .models import Category
class UnitTests(TestCase):
def test_bit_mask(self):
"""We can't use HierarchicalModel here.
Since django 1.6. one can't instantiate an abstract model with foreign key.
"""
hm = Category(parent=None, level=1, name="dummy")
self.assertEqual(hm._mask_for(1), 0b11111000000000000000000000000000)
self.assertEqual(hm._mask_for(2), 0b11111111110000000000000000000000)
self.assertEqual(hm._mask_for(3), 0b11111111111111100000000000000000)
self.assertEqual(hm._mask_for(4), 0b11111111111111111111000000000000)
self.assertEqual(hm._mask_for(5), 0b11111111111111111111111110000000)
self.assertEqual(hm._mask_for(6), 0b11111111111111111111111111111100)
def test_bit_offset(self):
hm = Category(parent=None, level=1, name="dummy")
self.assertEqual(hm._get_left_offset(), 5)
self.assertEqual(hm._get_left_offset(1), 5)
self.assertEqual(hm._get_left_offset(2), 10)
self.assertEqual(hm._get_right_offset(), 27)
self.assertEqual(hm._get_right_offset(2), 22)
def test_min_max(self):
hm = Category(parent=None, level=1, name="dummy")
self.assertEqual(hm.min, 0b00001000000000000000000000000000)
self.assertEqual(hm.max, 0b11111000000000000000000000000000)
class ModelTest(TestCase):
def test_get_free_id(self):
Category.objects.all().delete() # WTF why!?
# empty db
cat1 = Category(parent=None, level=1, name="cat1")
self.assertEqual(cat1.get_free_id(), 0b00001000000000000000000000000000)
cat1.save()
# root
cat2 = Category(parent=None, level=1, name="cat2")
self.assertEqual(cat2.get_free_id(), 0b00010000000000000000000000000000)
cat2.save()
cat3 = Category(parent=None, level=1, name="cat3")
self.assertEqual(cat3.get_free_id(), 0b00011000000000000000000000000000)
cat3.save()
# non-root
cat21 = Category(parent=cat2, level=2, name="cat21")
self.assertEqual(cat21.get_free_id(), 0b00010000010000000000000000000000)
cat21.save()
cat22 = Category(parent=cat2, level=2, name="cat22")
self.assertEqual(cat22.get_free_id(), 0b00010000100000000000000000000000)
cat22.save()
# counts
self.assertEqual(cat22.descendants.count(), 1) # itself
self.assertEqual(cat22.ancestors.count(), 2) # itself, root
self.assertEqual(cat22.ancestors[0], cat2) # the last one has to be the root
self.assertEqual(cat22.ancestors[1], cat22) # the last one has to be the root
self.assertEqual(cat22.root, cat2) # the last one has to be the root
# spaces in ids line
cat23 = Category(parent=cat2, level=2, name="cat23")
self.assertEqual(cat23.get_free_id(), 0b00010000110000000000000000000000)
cat23.save()
cat22.delete()
self.assertFalse(Category.objects.filter(id=0b00010000100000000000000000000000).exists())
cat22 = Category(parent=cat2, level=2, name="cat22")
self.assertEqual(cat22.get_free_id(), 0b00010000100000000000000000000000)
cat22.save()
# counts
cat221 = Category(parent=cat22, level=3, name="cat221")
self.assertEqual(cat221.get_free_id(), 0b00010000100000100000000000000000)
cat221.save()
self.assertEqual(cat22.descendants.count(), 2) # itself
self.assertEqual(cat22.descendants[0], cat22) # the last one has to be the root
self.assertEqual(cat22.descendants[1], cat221) # the last one has to be the root
self.assertEqual(cat22.ancestors.count(), 2) # itself, root
self.assertEqual(cat22.ancestors[0], cat2) # the last one has to be the root
self.assertEqual(cat22.ancestors[1], cat22) # the last one has to be the root
self.assertEqual(cat22.root, cat2) # the last one has to be the root
def test_gt(self):
hm1 = Category(parent=None, level=1, name="cat1")
hm1.save()
hm11 = Category(parent=hm1, level=2, name="cat11")
hm11.save()
hm2 = Category(parent=None, level=1, name="cat2")
hm2.save()
self.assertTrue(hm1 > hm11)
self.assertTrue(hm1 > hm1) # this might feel akward
self.assertFalse(hm1 > hm2)
self.assertFalse(hm11 > hm1)
def test_contains(self):
hm1 = Category(parent=None, level=1, name="cat1")
hm1.save()
hm11 = Category(parent=hm1, level=2, name="cat11")
hm11.save()
hm12 = Category(parent=hm1, level=2, name="cat12")
hm12.save()
hm2 = Category(parent=None, level=1, name="cat2")
hm2.save()
hm22 = Category(parent=hm2, level=2, name="cat22")
hm22.save()
hm121 = Category(parent=hm12, level=3, name="cat121")
hm121.save()
self.assertTrue(hm1 in hm1)
self.assertTrue(hm11 in hm1, "{:b} in {:b} when mask {:b}".format(hm11.id, hm1.id, hm1._mask_for(hm1.level)))
self.assertTrue(hm12 in hm1)
self.assertTrue(hm121 in hm1)
self.assertTrue(hm22 in hm2)
self.assertTrue(hm2 in hm2)
self.assertFalse(hm1 in hm2)
self.assertFalse(hm1 in hm11)
self.assertFalse(hm1 in hm12)
self.assertFalse(hm1 in hm121)
self.assertFalse(hm22 in hm1)
def test_relations(self):
Category.objects.all().delete() # WTF why!?
cat1 = Category.objects.create(parent=None, name="cat1")
cat2 = Category.objects.create(parent=None, name="cat2")
cat3 = Category.objects.create(parent=None, name="cat3")
cat21 = Category.objects.create(parent=cat2, name="cat21")
cat22 = Category.objects.create(parent=cat2, name="cat22")
cat23 = Category.objects.create(parent=cat2, name="cat23")
cat24 = Category.objects.create(parent=cat2, name="cat24")
cat31 = Category.objects.create(parent=cat3, name="cat31")
cat32 = Category.objects.create(parent=cat3, name="cat32")
cat221 = Category.objects.create(parent=cat22, name="cat221")
cat222 = Category.objects.create(parent=cat22, name="cat222")
self.assertEqual(cat1.neighbours.count(), 3)
self.assertEqual(cat21.neighbours.count(), 4)
self.assertEqual(cat22.neighbours.count(), 4)
self.assertEqual(cat23.neighbours.count(), 4)
self.assertEqual(cat24.neighbours.count(), 4)
self.assertEqual(cat31.neighbours.count(), 2)
self.assertEqual(cat32.neighbours.count(), 2)
self.assertEqual(cat221.neighbours.count(), 2)
self.assertEqual(cat222.neighbours.count(), 2)
| bsd-3-clause | 3,041,693,801,767,563,300 | 44.322148 | 117 | 0.651118 | false |
quisas/albus | cli_tools/openpyxl/reader/comments.py | 1 | 3069 | # Copyright (c) 2010-2014 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
from os import path
from openpyxl.comments import Comment
from openpyxl.shared.ooxml import PACKAGE_WORKSHEET_RELS, PACKAGE_WORKSHEETS, \
SHEET_MAIN_NS, COMMENTS_NS
from openpyxl.shared.xmltools import fromstring
def _get_author_list(root):
author_subtree = root.find('{%s}authors' % SHEET_MAIN_NS)
return [author.text for author in author_subtree]
def read_comments(ws, xml_source):
"""Given a worksheet and the XML of its comments file, assigns comments to cells"""
root = fromstring(xml_source)
authors = _get_author_list(root)
comment_nodes = root.iter('{%s}comment' % SHEET_MAIN_NS)
for node in comment_nodes:
author = authors[int(node.attrib['authorId'])]
cell = node.attrib['ref']
text_node = node.find('{%s}text' % SHEET_MAIN_NS)
text = ''
substrs = []
for run in text_node.findall('{%s}r' % SHEET_MAIN_NS):
runtext = ''.join([t.text for t in run.findall('{%s}t' % SHEET_MAIN_NS)])
substrs.append(runtext)
comment_text = ''.join(substrs)
comment = Comment(comment_text, author)
ws.cell(coordinate=cell).comment = comment
def get_comments_file(sheet_codename, archive, valid_files):
"""Returns the XML filename in the archive which contains the comments for
the spreadsheet with codename sheet_codename. Returns None if there is no
such file"""
rels_file = PACKAGE_WORKSHEET_RELS + '/' + sheet_codename + '.rels'
if rels_file not in valid_files:
return None
rels_source = archive.read(rels_file)
root = fromstring(rels_source)
for i in root:
if i.attrib['Type'] == COMMENTS_NS:
comments_file = path.normpath(PACKAGE_WORKSHEETS + '/' + i.attrib['Target'])
if comments_file in valid_files:
return comments_file
return None
| agpl-3.0 | 6,218,087,853,442,427,000 | 44.132353 | 88 | 0.69306 | false |
wimglenn/argboss | test_override_kwargs.py | 1 | 1470 | from override_kwargs import override_kwargs
from other_module import delegating_function, function
from datetime import datetime
from unittest import TestCase
def function_in_this_module(x=123):
"""hello I'm a docstring"""
return x
def MyClass(object):
def method_in_this_module(x=123):
return x
with override_kwargs('__main__', 'function_in_this_module', {'x': 69}) as f:
assert function_in_this_module() == 69
assert function_in_this_module.__doc__ == f.__doc__
assert function_in_this_module.__name__ == f.__name__
assert function_in_this_module() == 123
# with override_kwargs('__main__', 'MyClass.method_in_this_module', {'x': 69}) as f:
# assert method_in_this_module() == 69 == f()
# assert method_in_this_module.__doc__ == f.__doc__
# assert method_in_this_module.__name__ == f.__name__
# assert method_in_this_module() == 123
with override_kwargs('__main__', 'function', {'x': 69}):
assert function() == 69
assert function() == 123
with override_kwargs('other_module', 'ClassInOtherModule.method', {'x': 69}):
assert delegating_function() == 69
assert delegating_function() == 123
with override_kwargs('other_module', 'another_module.another_function', {0: 69}):
assert delegating_function() == 69
assert delegating_function() == 123
then = datetime(year=1982, month=3, day=19)
with override_kwargs('__main__', 'datetime', {'year': 1982}):
assert datetime(year=2014, month=3, day=19) == then
| mit | 6,979,320,661,274,152,000 | 34.853659 | 84 | 0.662585 | false |
mckinseyacademy/xblock-poll | tests/integration/test_functions.py | 1 | 8200 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 McKinsey Academy
#
# Authors:
# Jonathan Piacenti <[email protected]>
#
# This software's license gives you freedom; you can copy, convey,
# propagate, redistribute and/or modify this program under the terms of
# the GNU Affero General Public License (AGPL) as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version of the AGPL published by the FSF.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests a realistic, configured Poll to make sure that everything works as it
should.
"""
from .base_test import PollBaseTest
ANSWER_SELECTOR = 'label.poll-answer-text'
class TestPollFunctions(PollBaseTest):
def test_first_load(self):
"""
Checks first load.
Verify that the poll loads with the expected choices, that feedback is
not showing, and that the submit button is disabled.
"""
self.go_to_page('Poll Functions')
answer_elements = self.browser.find_elements_by_css_selector(ANSWER_SELECTOR)
answers = [element.text for element in answer_elements]
self.assertEqual(['A very long time', 'Not very long', 'I shall not say', 'Longer than you'], answers)
self.assertFalse(self.browser.find_element_by_css_selector('.poll-feedback').is_displayed())
submit_button = self.get_submit()
self.assertFalse(submit_button.is_enabled())
def test_submit_enabled(self):
"""
Makes sure the submit button is enabled when selecting an answer.
"""
self.go_to_page('Poll Functions')
answer_elements = self.browser.find_elements_by_css_selector(ANSWER_SELECTOR)
answer_elements[0].click()
# When an answer is selected, make sure submit is enabled.
self.wait_until_exists('input[name=poll-submit]:enabled')
def test_poll_submission(self):
"""
Verify that the user can submit his or her vote and that the vote counts.
Also check that feedback is displayed afterward.
"""
self.go_to_page('Poll Functions')
answer_elements = self.browser.find_elements_by_css_selector(ANSWER_SELECTOR)
# 'Not very long'
answer_elements[1].click()
self.get_submit().click()
self.wait_until_exists('.poll-footnote')
self.assertTrue(self.browser.find_element_by_css_selector('.poll-feedback').text,
"Thank you\nfor being a valued student.")
self.assertEqual(self.browser.find_element_by_css_selector('.poll-footnote').text,
'Results gathered from 100 respondents.')
self.assertFalse(self.browser.find_element_by_css_selector('input[name=poll-submit]').is_enabled())
def test_submit_not_enabled_on_revisit(self):
"""
Verify that revisiting the page post-vote does not re-enable the submit button.
"""
self.go_to_page('Poll Functions')
answer_elements = self.browser.find_elements_by_css_selector(ANSWER_SELECTOR)
# Not very long
answer_elements[1].click()
self.get_submit().click()
# Button will be replaced with a new disabled copy, not just disabled.
self.wait_until_exists('input[name=poll-submit]:disabled')
self.go_to_page('Poll Functions')
self.assertFalse(self.get_submit().is_enabled())
class TestSurveyFunctions(PollBaseTest):
@staticmethod
def chunk_list(chunkable, max_size):
"""
Subdivides a list into several smaller lists.
"""
result = []
in_list = False
for index, item in enumerate(chunkable, start=1):
if not in_list:
result.append([])
in_list = True
result[-1].append(item)
if not index % max_size:
in_list = False
return result
def test_first_load(self):
"""
Checks the first load of the survey.
Verifies that the poll loads with the expected questions,
that the answers are shown in the expected order, that feedback is
not showing, and that the submit button is disabled.
"""
self.go_to_page('Survey Functions')
self.assertEqual(
[element.text for element in self.browser.find_elements_by_css_selector('.survey-question')],
[
"I feel like this test will pass.", "I like testing software", "Testing is not necessary",
"I would fake a test result to get software deployed."
]
)
self.assertEqual(
[element.text for element in self.browser.find_elements_by_css_selector('.survey-answer')],
[
"Strongly Agree", "Agree", "Neutral", "Disagree", "Strongly Disagree"
]
)
self.assertFalse(self.browser.find_element_by_css_selector('.poll-feedback').is_displayed())
submit_button = self.get_submit()
self.assertFalse(submit_button.is_enabled())
def fill_survey(self, assert_submit=False):
"""
Fills out the survey. Optionally checks if the submit button is
in the right state along the way.
"""
elements = self.browser.find_elements_by_css_selector('.survey-option input[type=radio]')
# Answers should be in sets of five.
questions = self.chunk_list(elements, 5)
# Disabled to start...
submit_button = self.get_submit()
if assert_submit:
self.assertFalse(submit_button.is_enabled())
# Strongly Agree: I feel like this test will pass.
questions[0][0].click()
if assert_submit:
self.assertFalse(submit_button.is_enabled())
# Disagree: Testing is not necessary
questions[2][3].click()
if assert_submit:
self.assertFalse(submit_button.is_enabled())
# Agree: I like testing software
questions[1][1].click()
if assert_submit:
self.assertFalse(submit_button.is_enabled())
# Strongly Disagree: I would fake a test result to get software deployed.
questions[3][4].click()
if assert_submit:
# Submit button should now be enabled!
self.assertTrue(submit_button.is_enabled())
def test_submit_enabled(self):
"""
Verify that the submit button is enabled only when every question
has an answer.
"""
self.go_to_page('Survey Functions')
self.fill_survey(assert_submit=True)
def test_survey_submission(self):
"""
Verify that the user can submit his or her vote and that the vote counts.
Also check that feedback is displayed afterward.
"""
self.go_to_page('Survey Functions')
self.fill_survey()
self.get_submit().click()
self.wait_until_exists('.poll-footnote')
self.assertEqual(self.browser.find_element_by_css_selector('.poll-footnote').text,
'Results gathered from 21 respondents.')
self.assertTrue(self.browser.find_element_by_css_selector('.poll-feedback').text,
"Thank you\nfor running the tests.")
def test_submit_not_enabled_on_revisit(self):
"""
Verify that revisiting the page post-vote does not re-enable the submit button.
"""
self.go_to_page('Survey Functions')
self.fill_survey()
self.get_submit().click()
# Button will be replaced with a new disabled copy, not just disabled.
self.wait_until_exists('input[name=poll-submit]:disabled')
self.go_to_page('Poll Functions')
self.assertFalse(self.get_submit().is_enabled())
| agpl-3.0 | 5,463,858,232,789,444,000 | 34.964912 | 110 | 0.630732 | false |
Fluent-networks/floranet | floranet/models/application.py | 1 | 1851 | from twisted.internet.defer import inlineCallbacks, returnValue
from floranet.models.model import Model
from floranet.models.appinterface import AppInterface
class Application(Model):
"""LoRa application class
Model representing a LoRa application.
Attributes:
name (str): a user friendly name for the application
domain (str): optional customer domain string
appeui (int): global application ID (IEEE EUI64)
appnonce (int): A unique ID provided by the network server
appkey (int): AES-128 application secret key
fport (int): Port field used for this application
"""
TABLENAME = 'applications'
BELONGSTO = [{'name': 'appinterface', 'class_name': 'AppInterface'}]
HASMANY = [{'name': 'properties', 'class_name': 'AppProperty'}]
@inlineCallbacks
def valid(self):
"""Validate an application object.
Returns:
valid (bool), message(dict): (True, empty) on success,
(False, error message dict) otherwise.
"""
messages = {}
# Check for unique appkeys
duplicate = yield Application.exists(where=['appkey = ? AND appeui != ?',
self.appkey, self.appeui])
if duplicate:
messages['appkey'] = "Duplicate application key exists: appkey " \
"must be unique."
# Check the app interface exists
if self.appinterface_id:
exists = yield AppInterface.exists(where=['id = ?', self.appinterface_id])
if not exists:
messages['appinterface_id'] = "Application interface {} does not " \
"exist.".format(self.appinterface_id)
valid = not any(messages)
returnValue((valid, messages))
| mit | -2,307,838,893,648,743,000 | 35.313725 | 86 | 0.593193 | false |
collective/collective.anonfeedback | src/collective/anonfeedback/tests/test_views.py | 1 | 2732 | import unittest2 as unittest
from plone.testing.z2 import Browser
from Products.CMFCore.utils import getToolByName
from plone.app.testing import SITE_OWNER_NAME
from plone.app.testing import SITE_OWNER_PASSWORD
from plone.app.testing import login
from collective.anonfeedback.testing import\
COLLECTIVE_ANONFEEDBACK_FUNCTIONAL_TESTING
class TestInstalled(unittest.TestCase):
layer = COLLECTIVE_ANONFEEDBACK_FUNCTIONAL_TESTING
def setUp(self):
self.app = self.layer['app']
self.portal = self.layer['portal']
self.qi_tool = getToolByName(self.portal, 'portal_quickinstaller')
def get_browser(self, username=None, password=None):
browser = Browser(self.app)
browser.handleErrors = False
portalURL = self.portal.absolute_url()
if username:
browser.open(portalURL + '/login_form')
browser.getControl(name='__ac_name').value = username
browser.getControl(name='__ac_password').value = password
browser.getControl(name='submit').click()
return browser
def test_views(self):
""" Validate that our products GS profile has been run and the product
installed
"""
browser = self.get_browser()
portalURL = self.portal.absolute_url()
browser.open(portalURL)
browser.getLink('Give Feedback').click()
form = browser.getForm(name='feedback')
# Submit an incomplete form
form.getControl('Subject').value = 'Test subject'
form.getControl('Submit').click()
self.assertIn('You must enter a subject and some feedback text.', browser.contents)
# The filled in value remains
form = browser.getForm(name='feedback')
self.assertEqual(form.getControl('Subject').value, 'Test subject')
# Complete the form
form.getControl('Feedback').value = 'Test\nmessage.'
form.getControl('Submit').click()
# It worked.
self.assertIn('Your feedback has been submitted.', browser.contents)
# Fields should now be empty.
form = browser.getForm(name='feedback')
self.assertEqual(form.getControl('Subject').value, '')
# Anonymous people can't view the feedback.
self.assertNotIn('View Feedback', browser.contents)
# Login
browser = self.get_browser(SITE_OWNER_NAME, SITE_OWNER_PASSWORD)
browser.open(portalURL)
# Admin *can* see the feedback.
self.assertIn('View Feedback', browser.contents)
browser.getLink('View Feedback').click()
self.assertIn('<h3>Test subject</h3>', browser.contents)
| mit | -8,225,146,587,051,691,000 | 36.438356 | 91 | 0.639092 | false |
elliterate/capybara.py | capybara/tests/session/test_has_text.py | 1 | 10818 | # coding=utf-8
from __future__ import unicode_literals
import pytest
import re
import capybara
class TestHasText:
def test_is_true_if_the_given_text_is_on_the_page_at_least_once(self, session):
session.visit("/with_html")
assert session.has_text("est")
assert session.has_text("Lorem")
assert session.has_text("Redirect")
def test_ignores_tags(self, session):
session.visit("/with_html")
assert not session.has_text("""exercitation <a href="/foo">ullamco</a> laboris""")
assert session.has_text("exercitation ullamco laboris")
def test_ignores_extra_whitespace_and_newlines(self, session):
session.visit("/with_html")
assert session.has_text("text with whitespace")
def test_ignores_white_space_and_newlines_in_the_search_string(self, session):
session.visit("/with_html")
assert session.has_text("text with \n\n whitespace")
def test_is_false_if_the_given_text_is_not_on_the_page(self, session):
session.visit("/with_html")
assert not session.has_text("xxxxyzzz")
assert not session.has_text("monkey")
def test_is_true_if_the_given_unicode_text_is_on_the_page(self, session):
session.visit("/with_html")
assert session.has_text("이름")
def test_is_false_if_the_given_unicode_text_is_not_on_the_page(self, session):
session.visit("/with_html")
assert not session.has_text("论坛")
def test_handles_single_quotes_in_the_text(self, session):
session.visit("/with-quotes")
assert session.has_text("can't")
def test_handles_double_quotes_in_the_text(self, session):
session.visit("/with-quotes")
assert session.has_text("\"No,\" he said")
def test_handles_mixed_single_and_double_quotes_in_the_text(self, session):
session.visit("/with-quotes")
assert session.has_text("\"you can't do that.\"")
def test_is_false_if_text_is_in_the_title_tag_in_the_head(self, session):
session.visit("/with_js")
assert not session.has_text("with_js")
def test_is_false_if_text_is_inside_a_script_tag_in_the_body(self, session):
session.visit("/with_js")
assert not session.has_text("a javascript comment")
assert not session.has_text("aVar")
def test_is_false_if_the_given_text_is_on_the_page_but_not_visible(self, session):
session.visit("/with_html")
assert not session.has_text("Inside element with hidden ancestor")
def test_is_true_if_all_given_and_text_is_invisible(self, session):
session.visit("/with_html")
assert session.has_text("all", "Some of this text is hidden!")
def test_is_true_if_capybara_ignore_hidden_elements_is_false_and_text_is_invisible(self, session):
capybara.ignore_hidden_elements = False
session.visit("/with_html")
assert session.has_text("Some of this text is hidden!")
def test_is_true_if_the_text_in_the_page_matches_given_regex(self, session):
session.visit("/with_html")
assert session.has_text(re.compile(r"Lorem"))
def test_is_false_if_the_text_in_the_page_does_not_match_given_regex(self, session):
session.visit("/with_html")
assert not session.has_text(re.compile(r"xxxxyzzz"))
def test_is_true_if_text_matches_exact_text_exactly(self, session):
session.visit("/with_html")
assert session.find("id", "h2one").has_text("Header Class Test One", exact_text=True)
def test_is_false_if_text_does_not_match_exact_text_exactly(self, session):
session.visit("/with_html")
assert not session.find("id", "h2one").has_text("Header Class Test On", exact_text=True)
def test_escapes_any_characters_that_would_have_special_meaning_in_a_regex(self, session):
session.visit("/with_html")
assert not session.has_text(".orem")
def test_accepts_non_string_parameters(self, session):
session.visit("/with_html")
assert session.has_text(42)
def test_is_true_when_passed_none(self, session):
session.visit("/with_html")
assert session.has_text(None)
@pytest.mark.requires("js")
def test_waits_for_text_to_appear(self, session):
session.visit("/with_js")
session.click_link("Click me")
assert session.has_text("Has been clicked")
def test_is_true_if_the_text_occurs_within_the_range_given(self, session):
session.visit("/with_count")
assert session.has_text("count", between=range(1, 4))
assert session.has_text(re.compile(r"count"), between=range(2, 3))
def test_is_false_if_the_text_occurs_more_or_fewer_times_than_range(self, session):
session.visit("/with_count")
assert not session.has_text("count", between=range(0, 2))
assert not session.has_text("count", between=range(3, 11))
assert not session.has_text(re.compile(r"count"), between=range(2, 2))
def test_is_true_if_the_text_occurs_the_given_number_of_times(self, session):
session.visit("/with_count")
assert session.has_text("count", count=2)
def test_is_false_if_the_text_occurs_a_different_number_of_times_than_given(self, session):
session.visit("/with_count")
assert not session.has_text("count", count=0)
assert not session.has_text("count", count=1)
assert not session.has_text(re.compile(r"count"), count=3)
def test_coerces_count_to_an_integer(self, session):
session.visit("/with_count")
assert session.has_text("count", count="2")
assert not session.has_text("count", count="3")
def test_is_true_when_text_occurs_same_or_fewer_times_than_given(self, session):
session.visit("/with_count")
assert session.has_text("count", maximum=2)
assert session.has_text("count", maximum=3)
def test_is_false_when_text_occurs_more_times_than_given(self, session):
session.visit("/with_count")
assert not session.has_text("count", maximum=1)
assert not session.has_text(re.compile(r"count"), maximum=0)
def test_coerces_maximum_to_an_integer(self, session):
session.visit("/with_count")
assert session.has_text("count", maximum="2")
assert not session.has_text("count", maximum="1")
def test_is_true_when_text_occurs_same_or_more_times_than_given(self, session):
session.visit("/with_count")
assert session.has_text("count", minimum=2)
assert session.has_text(re.compile(r"count"), minimum=0)
def test_is_false_when_text_occurs_fewer_times_than_given(self, session):
session.visit("/with_count")
assert not session.has_text("count", minimum=3)
def test_coerces_minimum_to_an_integer(self, session):
session.visit("/with_count")
assert session.has_text("count", minimum="2")
assert not session.has_text("count", minimum="3")
@pytest.mark.requires("js")
def test_finds_element_if_it_appears_before_given_wait_duration(self, session):
with capybara.using_wait_time(0.1):
session.visit("/with_js")
session.click_link("Click me")
assert session.has_text("Has been clicked", wait=0.9)
class TestHasNoText:
def test_is_false_if_the_given_text_is_on_the_page_at_least_once(self, session):
session.visit("/with_html")
assert not session.has_no_text("est")
assert not session.has_no_text("Lorem")
assert not session.has_no_text("Redirect")
def test_is_false_if_scoped_to_an_element_which_has_the_text(self, session):
session.visit("/with_html")
with session.scope("//a[@title='awesome title']"):
assert not session.has_no_text("labore")
def test_is_true_if_scoped_to_an_element_which_does_not_have_the_text(self, session):
session.visit("/with_html")
with session.scope("//a[@title='awesome title']"):
assert session.has_no_text("monkey")
def test_ignores_tags(self, session):
session.visit("/with_html")
assert session.has_no_text("""exercitation <a href="/foo" id="foo">ullamco</a> laboris""")
assert not session.has_no_text("exercitation ullamco laboris")
def test_is_true_if_the_given_text_is_not_on_the_page(self, session):
session.visit("/with_html")
assert session.has_no_text("xxxxyzzz")
assert session.has_no_text("monkey")
def test_handles_single_quotes_in_the_text(self, session):
session.visit("/with-quotes")
assert not session.has_no_text("can't")
def test_handles_double_quotes_in_the_text(self, session):
session.visit("/with-quotes")
assert not session.has_no_text("\"you can't do that.\"")
def test_is_true_if_text_is_in_the_title_tag_in_the_head(self, session):
session.visit("/with_js")
assert session.has_no_text("with_js")
def test_is_true_if_text_is_inside_a_script_tag_in_the_body(self, session):
session.visit("/with_js")
assert session.has_no_text("a javascript comment")
assert session.has_no_text("aVar")
def test_is_true_if_the_given_text_is_on_the_page_but_not_visible(self, session):
session.visit("/with_html")
assert session.has_no_text("Inside element with hidden ancestor")
def test_is_false_if_all_given_and_text_is_invisible(self, session):
session.visit("/with_html")
assert not session.has_no_text("all", "Some of this text is hidden!")
def test_is_false_if_capybara_ignore_hidden_elements_is_false_and_text_is_invisible(self, session):
capybara.ignore_hidden_elements = False
session.visit("/with_html")
assert not session.has_no_text("Some of this text is hidden!")
def test_is_true_if_the_text_in_the_page_does_not_match_given_regex(self, session):
session.visit("/with_html")
assert session.has_no_text(re.compile(r"xxxxyzzz"))
def test_is_false_if_the_text_in_the_page_matches_given_regex(self, session):
session.visit("/with_html")
assert not session.has_no_text(re.compile(r"Lorem"))
def test_escapes_any_characters_that_would_have_special_meaning_in_a_regex(self, session):
session.visit("/with_html")
assert session.has_no_text(".orem")
@pytest.mark.requires("js")
def test_waits_for_text_to_disappear(self, session):
session.visit("/with_js")
session.click_link("Click me")
assert session.has_no_text("I changed it")
@pytest.mark.requires("js")
def test_does_not_find_element_if_it_appears_after_given_wait_duration(self, session):
session.visit("/with_js")
session.click_link("Click me")
assert session.has_no_text("Has been clicked", wait=0.1)
| mit | -3,833,025,502,180,976,600 | 41.727273 | 103 | 0.655412 | false |
jgrillo/zoonomia | test/test_solution.py | 1 | 1543 | import unittest
from zoonomia.solution import (
verify_closure_property, BasisOperator, TerminalOperator, OperatorSet,
Objective, Fitness, Solution
)
class TestVerifyClosureProperty(unittest.TestCase):
def test_verify_closure_property(self):
def add(left, right): return left + right
int_basis = BasisOperator(func=add, signature=(int, int), dtype=int)
float_basis = BasisOperator(
func=add, signature=(float, float), dtype=float
)
int_terminal = TerminalOperator(source=xrange(666), dtype=int)
float_terminal = TerminalOperator(
source=(float(i) for i in xrange(666)), dtype=float
)
terminal_set = OperatorSet(operators=(int_terminal, float_terminal))
result = verify_closure_property(
)
class TestBasisOperator(unittest.TestCase):
def test_basis_operator(self):
raise NotImplementedError() # FIXME
class TestTerminalOperator(unittest.TestCase):
def test_terminal_operator(self):
raise NotImplementedError() # FIXME
class TestOperatorSet(unittest.TestCase):
def test_operator_set(self):
raise NotImplementedError() # FIXME
class TestObjective(unittest.TestCase):
def test_evaluate(self):
raise NotImplementedError() # FIXME
class TestFitness(unittest.TestCase):
def test_equals(self):
raise NotImplementedError() # FIXME
class TestSolution(unittest.TestCase):
def test_solution(self):
raise NotImplementedError() # FIXME
| mit | -1,835,718,517,750,096,600 | 23.109375 | 76 | 0.685029 | false |
kylef/goji | goji/client.py | 1 | 7210 | import datetime
import mimetypes
import os
import pickle
from typing import Any, List, Optional
import click
import requests
from requests.auth import AuthBase, HTTPBasicAuth
from requests.compat import urljoin
from goji.models import Attachment, Comment, Issue, Sprint, Transition, User
class JIRAException(click.ClickException):
def __init__(self, error_messages: List[str], errors):
self.error_messages = error_messages
self.errors = errors
def show(self):
for error in self.error_messages:
click.echo(error)
for (key, error) in self.errors.items():
click.echo('- {}: {}'.format(key, error))
class NoneAuth(AuthBase):
"""
Creates a "None" auth type as if actual None is set as auth and a netrc
credentials are found, python-requests will use them instead.
"""
def __call__(self, request):
return request
class JIRAAuth(HTTPBasicAuth):
def __call__(self, request):
if 'Cookie' in request.headers:
# Prevent authorization headers when cookies are present as it
# causes silent authentication errors on the JIRA instance if
# cookies are used and invalid authorization headers are sent
# (although request succeeds)
if (
'atlassian.xsrf.token' in request.headers['Cookie']
and len(request.headers['Cookie'].split('=')) == 2
):
# continue if the cookie is ONLY the xsrf token
# check is very naive as to not get into cookie parsing
# ensure that we check only for key=value (once) being xsrf
return super(JIRAAuth, self).__call__(request)
return request
return super(JIRAAuth, self).__call__(request)
class JIRAClient(object):
def __init__(self, base_url: str, auth=None):
self.session = requests.Session()
self.base_url = base_url
self.rest_base_url = urljoin(self.base_url, 'rest/api/2/')
if auth:
self.session.auth = JIRAAuth(auth[0], auth[1])
else:
self.session.auth = NoneAuth()
self.load_cookies()
# Persistent Cookie
@property
def cookie_path(self) -> str:
return os.path.expanduser('~/.goji/cookies')
def load_cookies(self) -> None:
if os.path.exists(self.cookie_path):
try:
with open(self.cookie_path, 'rb') as fp:
self.session.cookies = pickle.load(fp)
except Exception as e:
print('warning: Could not load cookies from dist: {}'.format(e))
def save_cookies(self) -> None:
cookies = self.session.cookies.keys()
cookies.remove('atlassian.xsrf.token')
if len(cookies) > 0:
os.makedirs(os.path.expanduser('~/.goji'), exist_ok=True)
with open(self.cookie_path, 'wb') as fp:
pickle.dump(self.session.cookies, fp)
elif os.path.exists(self.cookie_path):
os.remove(self.cookie_path)
# Methods
def validate_response(self, response: requests.Response) -> None:
if response.status_code >= 400 and 'application/json' in response.headers.get(
'Content-Type', ''
):
error = response.json()
raise JIRAException(error.get('errorMessages', []), error.get('errors', {}))
def get(self, path: str, **kwargs) -> requests.Response:
url = urljoin(self.rest_base_url, path)
response = self.session.get(url, **kwargs)
self.validate_response(response)
return response
def post(self, path: str, json) -> requests.Response:
url = urljoin(self.rest_base_url, path)
response = self.session.post(url, json=json)
self.validate_response(response)
return response
def put(self, path: str, json) -> requests.Response:
url = urljoin(self.rest_base_url, path)
response = self.session.put(url, json=json)
self.validate_response(response)
return response
@property
def username(self) -> Optional[str]:
if self.session.auth and isinstance(self.session.auth, JIRAAuth):
return self.session.auth.username
return None
def get_user(self) -> Optional[User]:
response = self.get('myself', allow_redirects=False)
response.raise_for_status()
return User.from_json(response.json())
def get_issue(self, issue_key: str) -> Issue:
response = self.get('issue/%s' % issue_key)
response.raise_for_status()
return Issue.from_json(response.json())
def get_issue_transitions(self, issue_key: str) -> List[Transition]:
response = self.get('issue/%s/transitions' % issue_key)
response.raise_for_status()
return list(map(Transition.from_json, response.json()['transitions']))
def change_status(self, issue_key: str, transition_id: str) -> None:
data = {'transition': {'id': transition_id}}
self.post('issue/%s/transitions' % issue_key, data)
def edit_issue(self, issue_key: str, updated_fields) -> None:
data = {'fields': updated_fields}
self.put('issue/%s' % issue_key, data)
def attach(self, issue_key: str, attachment) -> List[Attachment]:
media_type = mimetypes.guess_type(attachment.name)
files = {
'file': (attachment.name, attachment, media_type[0]),
}
url = urljoin(self.rest_base_url, f'issue/{issue_key}/attachments')
response = self.session.post(
url,
headers={'X-Atlassian-Token': 'no-check'},
files=files,
)
self.validate_response(response)
return list(map(Attachment.from_json, response.json()))
def create_issue(self, fields) -> Issue:
response = self.post('issue', {'fields': fields})
return Issue.from_json(response.json())
def assign(self, issue_key: str, name: Optional[str]) -> None:
response = self.put('issue/%s/assignee' % issue_key, {'name': name})
response.raise_for_status()
def comment(self, issue_key: str, comment: str) -> Comment:
response = self.post('issue/%s/comment' % issue_key, {'body': comment})
return Comment.from_json(response.json())
def search(self, query: str) -> List[Issue]:
response = self.post('search', {'jql': query})
response.raise_for_status()
return list(map(Issue.from_json, response.json()['issues']))
def create_sprint(
self,
board_id: int,
name: str,
start_date: Optional[datetime.datetime] = None,
end_date: Optional[datetime.datetime] = None,
) -> Sprint:
payload = {
'originBoardId': board_id,
'name': name,
}
if start_date:
payload['startDate'] = start_date.isoformat()
if end_date:
payload['endDate'] = end_date.isoformat()
url = urljoin(self.base_url, 'rest/agile/1.0/sprint')
response = self.session.post(url, json=payload)
self.validate_response(response)
return Sprint.from_json(response.json())
| bsd-2-clause | 8,375,053,474,429,902,000 | 33.663462 | 88 | 0.603745 | false |
tomazc/orange-bio | orangecontrib/bio/widgets/OWGeneNetwork.py | 1 | 16713 | from collections import namedtuple
from PyQt4.QtCore import QTimer, QThread, pyqtSlot as Slot
import Orange.data
import Orange.feature
import Orange.network
from Orange.orng.orngDataCaching import data_hints
from Orange.OrangeWidgets import OWWidget
from Orange.OrangeWidgets import OWGUI
from Orange.OrangeWidgets import OWItemModels
from Orange.OrangeWidgets.OWConcurrent import ThreadExecutor, Task, methodinvoke
from .. import ppi, taxonomy, gene
NAME = "Gene Network"
DESCRIPTION = "Extract a gene network for a set of genes."
ICON = "icons/GeneNetwork.svg"
INPUTS = [("Data", Orange.data.Table, "set_data")]
OUTPUTS = [("Network", Orange.network.Graph)]
Source = namedtuple(
"Source",
["name", "constructor", "tax_mapping", "sf_domain", "sf_filename",
"score_filter"]
)
SOURCES = [
Source("BioGRID", ppi.BioGRID, ppi.BioGRID.TAXID_MAP,
"PPI", ppi.BioGRID.SERVER_FILE, False),
Source("STRING", ppi.STRING, ppi.STRING.TAXID_MAP,
"PPI", ppi.STRING.FILENAME, True)
]
class OWGeneNetwork(OWWidget.OWWidget):
settingsList = ["taxid", "use_attr_names", "network_source",
"include_neighborhood", "min_score"]
contextHandlers = {
"": OWWidget.DomainContextHandler(
"", ["taxid", "gene_var_index", "use_attr_names"]
)
}
def __init__(self, parent=None, signalManager=None, title="Gene Network"):
super(OWGeneNetwork, self).__init__(
parent, signalManager, title, wantMainArea=False,
resizingEnabled=False
)
self.taxid = "9606"
self.gene_var_index = -1
self.use_attr_names = False
self.network_source = 1
self.include_neighborhood = True
self.autocommit = False
self.min_score = 0.9
self.loadSettings()
self.taxids = taxonomy.common_taxids()
self.current_taxid_index = self.taxids.index(self.taxid)
self.data = None
self.geneinfo = None
self.nettask = None
self._invalidated = False
box = OWGUI.widgetBox(self.controlArea, "Info")
self.info = OWGUI.widgetLabel(box, "No data on input\n")
box = OWGUI.widgetBox(self.controlArea, "Organism")
self.organism_cb = OWGUI.comboBox(
box, self, "current_taxid_index",
items=map(taxonomy.name, self.taxids),
callback=self._update_organism
)
box = OWGUI.widgetBox(self.controlArea, "Genes")
self.genes_cb = OWGUI.comboBox(
box, self, "gene_var_index", callback=self._update_query_genes
)
self.varmodel = OWItemModels.VariableListModel()
self.genes_cb.setModel(self.varmodel)
OWGUI.checkBox(
box, self, "use_attr_names",
"Use attribute names",
callback=self._update_query_genes
)
box = OWGUI.widgetBox(self.controlArea, "Network")
OWGUI.comboBox(
box, self, "network_source",
items=[s.name for s in SOURCES],
callback=self._on_source_db_changed
)
OWGUI.checkBox(
box, self, "include_neighborhood",
"Include immediate gene neighbors",
callback=self.invalidate
)
self.score_spin = OWGUI.doubleSpin(
box, self, "min_score", 0.0, 1.0, step=0.001,
label="Minimal edge score",
callback=self.invalidate
)
self.score_spin.setEnabled(SOURCES[self.network_source].score_filter)
box = OWGUI.widgetBox(self.controlArea, "Commit")
OWGUI.button(box, self, "Commit", callback=self.commit, default=True)
self.executor = ThreadExecutor()
def set_data(self, data):
self.closeContext()
self.data = data
if data is not None:
self.varmodel[:] = string_variables(data.domain)
taxid = data_hints.get_hint(data, "taxid", default=self.taxid)
if taxid in self.taxids:
self.set_organism(self.taxids.index(taxid))
self.use_attr_names = data_hints.get_hint(
data, "genesinrows", default=self.use_attr_names
)
if not (0 <= self.gene_var_index < len(self.varmodel)):
self.gene_var_index = len(self.varmodel) - 1
self.openContext("", data)
self.invalidate()
self.commit()
else:
self.varmodel[:] = []
self.send("Network", None)
def set_source_db(self, dbindex):
self.network_source = dbindex
self.invalidate()
def set_organism(self, index):
self.current_taxid_index = index
self.taxid = self.taxids[index]
self.invalidate()
def set_gene_var(self, index):
self.gene_var_index = index
self.invalidate()
def query_genes(self):
if self.use_attr_names:
if self.data is not None:
return [var.name for var in self.data.domain.attributes]
else:
return []
elif self.gene_var_index >= 0:
var = self.varmodel[self.gene_var_index]
genes = [str(inst[var]) for inst in self.data
if not inst[var].isSpecial()]
return list(unique(genes))
else:
return []
def invalidate(self):
self._invalidated = True
if self.nettask is not None:
self.nettask.finished.disconnect(self._on_result_ready)
self.nettask.future().cancel()
self.nettask = None
if self.autocommit:
QTimer.singleShot(10, self._maybe_commit)
@Slot()
def _maybe_commit(self):
if self._invalidated:
self.commit()
@Slot()
def advance(self):
self.progressBarValue = (self.progressBarValue + 1) % 100
@Slot(float)
def set_progress(self, value):
self.progressBarValue = value
def commit(self):
include_neighborhood = self.include_neighborhood
query_genes = self.query_genes()
source = SOURCES[self.network_source]
if source.score_filter:
min_score = self.min_score
assert source.name == "STRING"
min_score = min_score * 1000
else:
min_score = None
taxid = self.taxid
progress = methodinvoke(self, "advance")
if self.geneinfo is None:
self.geneinfo = self.executor.submit(
fetch_ncbi_geneinfo, taxid, progress
)
geneinfo_f = self.geneinfo
taxmap = source.tax_mapping
db_taxid = taxmap.get(taxid, taxid)
if db_taxid is None:
raise ValueError("invalid taxid for this network")
def fetch_network():
geneinfo = geneinfo_f.result()
ppidb = fetch_ppidb(source, db_taxid, progress)
return get_gene_network(ppidb, geneinfo, db_taxid, query_genes,
include_neighborhood=include_neighborhood,
min_score=min_score,
progress=methodinvoke(self, "set_progress", (float,)))
self.nettask = Task(function=fetch_network)
self.nettask.finished.connect(self._on_result_ready)
self.executor.submit(self.nettask)
self.setBlocking(True)
self.setEnabled(False)
self.progressBarInit()
self._invalidated = False
self._update_info()
@Slot(object)
def _on_result_ready(self,):
self.progressBarFinished()
self.setBlocking(False)
self.setEnabled(True)
net = self.nettask.result()
self._update_info()
self.send("Network", net)
def _on_source_db_changed(self):
source = SOURCES[self.network_source]
self.score_spin.setEnabled(source.score_filter)
self.invalidate()
def _update_organism(self):
self.taxid = self.taxids[self.current_taxid_index]
if self.geneinfo is not None:
self.geneinfo.cancel()
self.geneinfo = None
self.invalidate()
def _update_query_genes(self):
self.invalidate()
def _update_info(self):
if self.data is None:
self.info.setText("No data on input\n")
else:
names = self.query_genes()
lines = ["%i unique genes on input" % len(set(names))]
if self.nettask is not None:
if not self.nettask.future().done():
lines.append("Retrieving ...")
else:
net = self.nettask.result()
lines.append("%i nodes %i edges" %
(len(net.nodes()), len(net.edges())))
else:
lines.append("")
self.info.setText("\n".join(lines))
def unique(seq):
seen = set()
for el in seq:
if el not in seen:
seen.add(el)
yield el
def string_variables(domain):
variables = domain.variables + domain.getmetas().values()
return [v for v in variables if isinstance(v, Orange.feature.String)]
def multimap_inverse(multimap):
"""
Return a multimap inverse relation.
"""
d = {}
for key, values in multimap.iteritems():
for v in values:
d.setdefault(v, []).append(key)
return d
def ppidb_synonym_mapping(ppidb, taxid):
keys = ppidb.ids(taxid)
mapping = {key: ppidb.synonyms(key) for key in keys}
return multimap_inverse(mapping)
def taxonomy_match(query_taxids, target_taxids):
taxid_mapping = {}
target_taxids = set(target_taxids)
for taxid in query_taxids:
mapped = taxid_map(taxid, target_taxids)
taxid_mapping[taxid] = mapped
return taxid_mapping
def taxid_map(query, targets):
if query in targets:
return query
lineage = taxonomy.lineage(query)
if any(tid in targets for tid in lineage):
return set(lineage).intersection(targets).pop()
else:
return None
from Orange.utils import serverfiles as sf
def fetch_ppidb(ppisource, taxid, progress=None):
fname = ppisource.sf_filename
if "{taxid}" in fname:
if taxid in ppisource.tax_mapping:
taxid_m = ppisource.tax_mapping[taxid]
if taxid_m is None:
raise ValueError(taxid)
taxid = taxid_m
fname = fname.format(taxid=taxid)
constructor = lambda: ppisource.constructor(taxid)
else:
constructor = ppisource.constructor
sf.localpath_download(
ppisource.sf_domain, fname, callback=progress, verbose=True
)
return constructor()
def fetch_ncbi_geneinfo(taxid, progress=None):
taxid = gene.NCBIGeneInfo.TAX_MAP.get(taxid, taxid)
sf.localpath_download(
"NCBI_geneinfo", "gene_info.{taxid}.db".format(taxid=taxid),
callback=progress, verbose=True,
)
return gene.NCBIGeneInfo(taxid)
def get_gene_network(ppidb, geneinfo, taxid, query_genes,
include_neighborhood=True, min_score=None,
progress=None):
if progress is not None:
progress(1.0)
# Normalize the names to ppidb primary keys
matcher = geneinfo.matcher
query_genes = zip(query_genes, map(matcher.umatch, query_genes))
synonyms = ppidb_synonym_mapping(ppidb, taxid)
query_genes = [(query_gene, geneid,
synonyms.get(query_gene, synonyms.get(geneid)))
for query_gene, geneid in query_genes]
query = [(syn[0], query_gene)
for query_gene, _, syn in query_genes if syn]
net = extract_network(ppidb, dict(query), geneinfo, include_neighborhood,
min_score, progress=progress)
return net
def extract_network(ppidb, query, geneinfo, include_neighborhood=True,
min_score=None, progress=None):
"""
include neighborhood
"""
from functools import partial
from collections import defaultdict
from itertools import count
if not isinstance(query, dict):
query = {name: name for name in query}
report_weights = True
if isinstance(ppidb, ppi.BioGRID):
# BioGRID scores are not comparable (they can be p values,
# confidence scores, ..., i.e. whatever was reported in the source
# publication)
report_weights = False
if min_score is not None:
raise ValueError("min_score used with BioGrid")
graph = Orange.network.Graph()
# node ids in Orange.network.Graph need to be in [0 .. n-1]
nodeids = defaultdict(partial(next, count()))
def gi_info(names):
mapping = [(name, geneinfo.matcher.umatch(name)) for name in names]
mapping = [(name, match) for name, match in mapping if match]
entries = [(name, geneinfo[match]) for name, match in mapping]
if len(entries) > 1:
# try to resolve conflicts by prioritizing entries whose
# symbol/gene_id/locus_tag exactly matches the synonym name.
entries_ = [(name, entry) for name, entry in entries
if name in [entry.gene_id, entry.symbol, entry.locus_tag]]
if len(entries_) == 1:
entries = entries_
if len(entries) == 0:
return None
elif len(entries) >= 1:
# Need to report multiple mappings
return entries[0][1]
# Add query nodes.
for key, query_name in query.items():
nodeid = nodeids[key]
synonyms = ppidb.synonyms(key)
entry = gi_info(synonyms)
graph.add_node(
nodeid,
key=key,
synonyms=synonyms,
query_name=query_name,
symbol=entry.symbol if entry is not None else ""
)
if include_neighborhood:
# extend the set of nodes in the network with immediate neighborers
edges_iter = (edge for key in query for edge in ppidb.edges(key))
for id1, id2, score in edges_iter:
if min_score is None or score >= min_score:
nodeid1 = nodeids[id1]
nodeid2 = nodeids[id2]
if nodeid1 not in graph:
synonyms1 = ppidb.synonyms(id1)
entry1 = gi_info(synonyms1)
symbol1 = entry1.symbol if entry1 is not None else ""
graph.add_node(
nodeid1, key=id1, synonyms=synonyms1,
symbol=symbol1
)
if nodeid2 not in graph:
synonyms2 = ppidb.synonyms(id2)
entry2 = gi_info(synonyms2)
symbol2 = entry2.symbol if entry2 is not None else ""
graph.add_node(
nodeid2, key=id2, synonyms=synonyms2,
symbol=symbol2
)
# add edges between nodes
for i, id1 in enumerate(nodeids.keys()):
if progress is not None:
progress(100.0 * i / len(nodeids))
for _, id2, score in ppidb.edges(id1):
if id2 in nodeids and (min_score is None or score >= min_score):
nodeid1 = nodeids[id1]
nodeid2 = nodeids[id2]
assert nodeid1 in graph and nodeid2 in graph
if score is not None and report_weights:
graph.add_edge(nodeid1, nodeid2, weight=score)
else:
graph.add_edge(nodeid1, nodeid2)
nodedomain = Orange.data.Domain(
[Orange.feature.String("Query name"), # if applicable
Orange.feature.String("id"), # ppidb primary key
Orange.feature.String("Synonyms"), # ppidb synonyms
Orange.feature.String("Symbol"), # ncbi gene name ??
Orange.feature.Discrete("source", values=["false", "true"])],
None
)
node_items = sorted(graph.node.items(), key=lambda t: nodeids[t[0]])
nodeitems = Orange.data.Table(
nodedomain,
[[str(node.get("query_name", "")),
str(node.get("key", "")),
str(", ".join(node.get("synonyms", []))),
str(node.get("symbol", nodeid)),
"true" if "query_name" in node else "false"]
for nodeid, node in node_items]
)
graph.set_items(nodeitems)
return graph
def main():
from PyQt4.QtGui import QApplication
app = QApplication([])
w = OWGeneNetwork()
brown = Orange.data.Table("brown-selected")
w.set_data(Orange.data.Table(brown[:5]))
w.show()
app.exec_()
w.saveSettings()
return 0
if __name__ == "__main__":
main()
| gpl-3.0 | 4,020,125,862,851,062,000 | 31.389535 | 90 | 0.579609 | false |
SkyTruth/pelagos-data | setup.py | 1 | 2925 | #!/usr/bin/env python
# This document is part of FS_Nav
# https://github.com/geowurster/FS_Nav
# =================================================================================== #
#
# New BSD License
#
# Copyright (c) 2014, Kevin D. Wurster
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * The names of its contributors may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# =================================================================================== #
"""
Setup script for PelagosProcessing
"""
from glob import glob
from setuptools import setup, find_packages
import pelagos_processing
setup(
name='PelagosProcessing',
version=pelagos_processing.__version__,
author=pelagos_processing.__author__,
author_email=pelagos_processing.__author_email__,
description=pelagos_processing.__doc__,
long_description=pelagos_processing.__doc__,
license=pelagos_processing.__license__,
url=pelagos_processing.__source__,
packages=find_packages(),
scripts=glob('bin/*.py'),
include_package_data=True,
classifiers=[
'Topic :: Terminals',
'Topic :: Utilities',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy'
]
)
| mit | -8,811,666,424,475,370,000 | 35.5625 | 87 | 0.674188 | false |
openstack/designate | designate/tests/unit/producer/test_service.py | 1 | 2947 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Federico Ceratto <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit-test Producer service
"""
from unittest import mock
import oslotest.base
from oslo_config import cfg
from oslo_config import fixture as cfg_fixture
from designate.producer import service
import designate.service
from designate.tests import fixtures
from designate.tests.unit import RoObject
CONF = cfg.CONF
@mock.patch.object(service.rpcapi.CentralAPI, 'get_instance', mock.Mock())
class ProducerTest(oslotest.base.BaseTestCase):
def setUp(self):
conf = self.useFixture(cfg_fixture.Config(CONF))
conf.conf([], project='designate')
service.CONF = RoObject({
'service:producer': RoObject({
'enabled_tasks': None, # enable all tasks
}),
'producer_task:zone_purge': '',
})
super(ProducerTest, self).setUp()
self.stdlog = fixtures.StandardLogging()
self.useFixture(self.stdlog)
self.service = service.Service()
self.service.rpc_server = mock.Mock()
self.service._storage = mock.Mock()
self.service._quota = mock.Mock()
self.service._quota.limit_check = mock.Mock()
@mock.patch.object(service.tasks, 'PeriodicTask')
@mock.patch.object(service.coordination, 'Partitioner')
@mock.patch.object(designate.service.RPCService, 'start')
def test_service_start(self, mock_rpc_start, mock_partitioner,
mock_periodic_task):
self.service.coordination = mock.Mock()
self.service.start()
self.assertTrue(mock_rpc_start.called)
def test_service_stop(self):
self.service.coordination.stop = mock.Mock()
self.service.stop()
self.assertTrue(self.service.coordination.stop.called)
self.assertIn('Stopping producer service', self.stdlog.logger.output)
def test_service_name(self):
self.assertEqual('producer', self.service.service_name)
def test_producer_rpc_topic(self):
CONF.set_override('topic', 'test-topic', 'service:producer')
self.service = service.Service()
self.assertEqual('test-topic', self.service.rpc_topic)
self.assertEqual('producer', self.service.service_name)
def test_central_api(self):
self.assertIsInstance(self.service.central_api, mock.Mock)
| apache-2.0 | -7,713,076,138,659,402,000 | 32.11236 | 77 | 0.686121 | false |
KRHS-GameProgramming-2016/Memefinity | Wall.py | 1 | 1721 | import pygame, sys, math
class Wall(pygame.sprite.Sprite):
def __init__(self, pos=[0,0], size=None):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = pygame.image.load("rsc/wall/wall.png")
if size:
self.image = pygame.transform.scale(self.image, [size,size])
self.rect = self.image.get_rect(center = pos)
def shiftX(self, amount):
self.rect.x += amount
class Wall_5x5(pygame.sprite.Sprite):
def __init__(self, pos=[0,0], size=None):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = pygame.image.load("rsc/wall/wall.png")
if size:
self.image = pygame.transform.scale(self.image, [size*5,size*5])
self.rect = self.image.get_rect(center = pos)
def shiftX(self, amount):
self.rect.x += amount
class Ground(pygame.sprite.Sprite):
def __init__(self, pos=[0,0], size=None):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = pygame.image.load("rsc/wall/ground.png")
if size:
self.image = pygame.transform.scale(self.image, [size,size])
self.rect = self.image.get_rect(center = pos)
def shiftX(self, amount):
self.rect.x += amount
class Background(pygame.sprite.Sprite):
def __init__(self, image, size = None):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = pygame.image.load("rsc/ball/"+image)
if size:
self.image = pygame.transform.scale(self.image, size)
self.rect = self.image.get_rect()
def shiftX(self, amount):
self.rect.x += amount
print "shifting"
| mit | 727,751,280,407,669,500 | 35.617021 | 76 | 0.595003 | false |
project-generator/project_generator | project_generator/commands/build.py | 1 | 3506 | # Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from ..tools_supported import ToolsSupported
from ..generate import Generator
from ..settings import ProjectSettings
from . import argparse_filestring_type, argparse_string_type, split_options
help = 'Build a project'
def run(args):
# Export if we know how, otherwise return
combined_projects = args.projects + args.project or ['']
kwargs = split_options(args.options)
generator = Generator(args.file)
any_build_failed = False
any_export_failed = False
for project_name in combined_projects:
for project in generator.generate(project_name):
clean_failed = False
if args.clean and project.clean(args.tool) == -1:
clean_failed = True # So we don't attempt to generate or build this project.
any_build_failed = True
if not clean_failed:
if project.generate(args.tool, args.copy) == -1:
any_export_failed = True
if project.build(args.tool, jobs=args.jobs, **kwargs) == -1:
any_build_failed = True
if args.stop_on_failure and (any_build_failed or any_export_failed):
break
if any_build_failed or any_export_failed:
return -1
else:
return 0
def setup(subparser):
subparser.add_argument('-v', dest='verbosity', action='count', default=0,
help='Increase the verbosity of the output (repeat for more verbose output)')
subparser.add_argument('-q', dest='quietness', action='count', default=0,
help='Decrease the verbosity of the output (repeat for less verbose output)')
subparser.add_argument(
"-f", "--file", help="YAML projects file", default='projects.yaml',
type=argparse_filestring_type)
subparser.add_argument(
"-p", "--project", dest="projects", action='append', default=[], help="Name of the project to build")
subparser.add_argument(
"-t", "--tool", help="Build a project files for provided tool",
type=argparse_string_type(str.lower, False), choices=list(ToolsSupported.TOOLS_DICT.keys()) + list(ToolsSupported.TOOLS_ALIAS.keys()))
subparser.add_argument(
"-c", "--copy", action="store_true", help="Copy all files to the exported directory")
subparser.add_argument(
"-k", "--clean", action="store_true", help="Clean project before building")
subparser.add_argument(
"-o", "--options", action="append", help="Toolchain options")
subparser.add_argument(
"-x", "--stop-on-failure", action="store_true", help="Stop on first failure")
subparser.add_argument(
"-j", "--jobs", action="store", type=int, default=1,
help="Number of concurrent build jobs (not supported by all tools)")
subparser.add_argument("project", nargs='*',
help="Specify projects to be generated and built")
| apache-2.0 | -4,970,498,958,962,731,000 | 45.746667 | 142 | 0.653451 | false |
kfarr2/django-local-settings | local_settings/__init__.py | 1 | 3356 | import json
import os
import sys
from .color_printer import ColorPrinter
from .checker import Checker
from .exc import LocalSettingsError, SettingsFileNotFoundError
from .loader import Loader
from .types import LocalSetting, SecretSetting
from .util import NO_DEFAULT, get_file_name
from .__main__ import make_local_settings
def load_and_check_settings(base_settings, file_name=None, section=None, base_path=None,
quiet=NO_DEFAULT):
"""Merge local settings from file with base settings, then check.
Returns a new OrderedDict containing the base settings and the
loaded settings. Ordering is:
- base settings
- settings from extended file(s), if any
- settings from file
When a setting is overridden, it gets moved to the end.
Settings loaded from the specified file will override base settings,
then the settings will be checked to ensure that all required local
settings have been set.
If a file name is passed: if the file exists, local settings will be
loaded from it and any missing settings will be appended to it; if
the file does not exist, it will be created and all settings will be
added to it.
If a file name isn't passed: if the ``LOCAL_SETTINGS_FILE_NAME``
environment variable is set, the specified file will be used;
otherwise ``{base_path}/local.cfg`` will be used.
``base_path`` is used when ``file_name`` is relative; if it's not
passed, it will be set to the current working directory.
When ``quiet`` is ``True``, informational messages will not be
printed. The ``LOCAL_SETTINGS_CONFIG_QUIET`` can be used to set
``quiet`` (use a JSON value like 'true', '1', 'false', or '0').
See :meth:`.Loader.load` and :meth:`.Checker.check` for more info.
"""
if quiet is NO_DEFAULT:
quiet = json.loads(os.environ.get('LOCAL_SETTINGS_CONFIG_QUIET', 'false'))
if not quiet:
printer = ColorPrinter()
key = 'LOCAL_SETTINGS_DISABLE'
disable_local_settings = os.environ.get(key, base_settings.get(key, False))
if disable_local_settings:
if not quiet:
printer.print_warning('Loading of local settings disabled')
return
else:
if file_name is None:
file_name = get_file_name()
if not os.path.isabs(file_name):
base_path = base_path or os.getcwd()
file_name = os.path.normpath(os.path.join(base_path, file_name))
try:
try:
loader = Loader(file_name, section)
settings = loader.load(base_settings)
registry = loader.registry
except SettingsFileNotFoundError:
registry = None
checker = Checker(file_name, section, registry=registry)
success = checker.check(settings)
except KeyboardInterrupt:
# Loading/checking of local settings was aborted with Ctrl-C.
# This isn't an error, but we don't want to continue.
if not quiet:
printer.print_warning('\nAborted loading/checking of local settings')
sys.exit(0)
if not success:
raise LocalSettingsError(
'Could not load local settings from {0}'.format(file_name))
if not quiet:
printer.print_success('Settings loaded successfully from {0}'.format(file_name))
return settings
| mit | -7,191,827,870,245,881,000 | 38.023256 | 89 | 0.666865 | false |
emilybache/texttest-runner | src/main/python/lib/default/gtkgui/testtree.py | 1 | 37187 |
"""
Code associated with the left-hand tree view for tests
"""
import gtk, gobject, pango, guiutils, plugins, logging
from ordereddict import OrderedDict
class TestColumnGUI(guiutils.SubGUI):
def __init__(self, dynamic, testCount):
guiutils.SubGUI.__init__(self)
self.addedCount = 0
self.totalNofTests = testCount
self.totalNofDistinctTests = testCount
self.nofSelectedTests = 0
self.nofDistinctSelectedTests = 0
self.totalNofTestsShown = 0
self.versionString = ""
self.column = None
self.dynamic = dynamic
self.testSuiteSelection = False
self.diag = logging.getLogger("Test Column GUI")
self.allSuites = []
def addSuites(self, suites):
self.allSuites = suites
def createView(self):
testRenderer = gtk.CellRendererText()
self.column = gtk.TreeViewColumn(self.getTitle(), testRenderer, text=0, background=1, foreground=7)
self.column.set_data("name", "Test Name") # Not a widget, so we can't set a name, do this instead
self.column.set_resizable(True)
self.column.set_cell_data_func(testRenderer, self.renderSuitesBold)
if not self.dynamic:
self.column.set_clickable(True)
self.column.connect("clicked", self.columnClicked)
if guiutils.guiConfig.getValue("auto_sort_test_suites") == 1:
self.column.set_sort_indicator(True)
self.column.set_sort_order(gtk.SORT_ASCENDING)
elif guiutils.guiConfig.getValue("auto_sort_test_suites") == -1:
self.column.set_sort_indicator(True)
self.column.set_sort_order(gtk.SORT_DESCENDING)
return self.column
def renderSuitesBold(self, dummy, cell, model, iter):
if model.get_value(iter, 2)[0].classId() == "test-case":
cell.set_property('font', "")
else:
cell.set_property('font', "bold")
def columnClicked(self, *args):
if not self.column.get_sort_indicator():
self.column.set_sort_indicator(True)
self.column.set_sort_order(gtk.SORT_ASCENDING)
order = 1
else:
order = self.column.get_sort_order()
if order == gtk.SORT_ASCENDING:
self.column.set_sort_order(gtk.SORT_DESCENDING)
order = -1
else:
self.column.set_sort_indicator(False)
order = 0
self.notify("ActionStart")
self.setSortingOrder(order)
if order == 1:
self.notify("Status", "Tests sorted in alphabetical order.")
elif order == -1:
self.notify("Status", "Tests sorted in descending alphabetical order.")
else:
self.notify("Status", "Tests sorted according to testsuite file.")
self.notify("RefreshTestSelection")
self.notify("ActionStop")
def setSortingOrder(self, order, suite = None):
if not suite:
for suite in self.allSuites:
self.setSortingOrder(order, suite)
else:
self.notify("Status", "Sorting suite " + suite.name + " ...")
self.notify("ActionProgress")
suite.autoSortOrder = order
suite.updateOrder()
for test in suite.testcases:
if test.classId() == "test-suite":
self.setSortingOrder(order, test)
def getTitle(self):
title = "Tests: "
if self.versionString and len(self.versionString) > 40:
reducedVersionString = self.versionString[:40] + "..."
else:
reducedVersionString = self.versionString
if self.testSuiteSelection:
# We don't care about totals with test suites
title += plugins.pluralise(self.nofSelectedTests, "suite") + " selected"
if self.versionString:
title += ", " + reducedVersionString
elif self.nofDistinctSelectedTests != self.nofSelectedTests:
title += ", " + str(self.nofDistinctSelectedTests) + " distinct"
return title
if self.nofSelectedTests == self.totalNofTests:
title += "All " + str(self.totalNofTests) + " selected"
else:
title += str(self.nofSelectedTests) + "/" + str(self.totalNofTests) + " selected"
if not self.dynamic:
if self.versionString:
title += ", " + reducedVersionString
elif self.totalNofDistinctTests != self.totalNofTests:
if self.nofDistinctSelectedTests == self.totalNofDistinctTests:
title += ", all " + str(self.totalNofDistinctTests) + " distinct"
else:
title += ", " + str(self.nofDistinctSelectedTests) + "/" + str(self.totalNofDistinctTests) + " distinct"
if self.totalNofTestsShown == self.totalNofTests:
if self.dynamic and self.totalNofTests > 0:
title += ", none hidden"
elif self.totalNofTestsShown == 0:
title += ", all hidden"
else:
title += ", " + str(self.totalNofTests - self.totalNofTestsShown) + " hidden"
return title
def updateTitle(self, initial=False):
if self.column:
self.column.set_title(self.getTitle())
def notifyTestTreeCounters(self, totalDelta, totalShownDelta, totalRowsDelta, initial=False):
self.addedCount += totalDelta
if not initial or self.totalNofTests < self.addedCount:
self.totalNofTests += totalDelta
self.totalNofDistinctTests += totalRowsDelta
self.totalNofTestsShown += totalShownDelta
self.updateTitle(initial)
def notifyAllRead(self):
if self.addedCount != self.totalNofTests:
self.totalNofTests = self.addedCount
self.updateTitle()
def countTests(self, tests):
if self.dynamic:
return len(tests), False
testCount, suiteCount = 0, 0
for test in tests:
if test.classId() == "test-case":
testCount += 1
else:
suiteCount += 1
if suiteCount and not testCount:
return suiteCount, True
else:
return testCount, False
def getVersionString(self, tests, distinctTestCount):
if not self.dynamic and distinctTestCount == 1 and self.totalNofTests != self.totalNofDistinctTests:
versions = [ test.app.getFullVersion().replace("_", "__") or "<default>" for test in tests ]
return "version" + ("s" if len(versions) > 1 else "") + " " + ",".join(versions)
else:
return ""
def notifyNewTestSelection(self, tests, dummyApps, distinctTestCount, *args, **kw):
self.updateTestInfo(tests, distinctTestCount)
def updateTestInfo(self, tests, distinctTestCount):
newCount, suitesOnly = self.countTests(tests)
if distinctTestCount > newCount:
distinctTestCount = newCount
newVersionStr = self.getVersionString(tests, distinctTestCount)
if self.nofSelectedTests != newCount or newVersionStr != self.versionString or \
self.nofDistinctSelectedTests != distinctTestCount or suitesOnly != self.testSuiteSelection:
self.diag.info("New selection count = " + repr(newCount) + ", distinct = " + str(distinctTestCount) + ", test suites only = " + repr(suitesOnly))
self.nofSelectedTests = newCount
self.nofDistinctSelectedTests = distinctTestCount
self.testSuiteSelection = suitesOnly
self.versionString = newVersionStr
self.updateTitle()
def notifyVisibility(self, tests, newValue):
testCount = sum((int(test.classId() == "test-case") for test in tests))
if newValue:
self.totalNofTestsShown += testCount
else:
self.totalNofTestsShown -= testCount
self.updateTitle()
class TestIteratorMap:
def __init__(self, dynamic, allApps):
self.dict = OrderedDict()
self.dynamic = dynamic
self.parentApps = {}
for app in allApps:
for extra in [ app ] + app.extras:
self.parentApps[extra] = app
def getKey(self, test):
if self.dynamic:
return test
elif test is not None:
return self.parentApps.get(test.app, test.app), test.getRelPath()
def store(self, test, iter):
self.dict[self.getKey(test)] = iter
def updateIterator(self, test, oldRelPath):
# relative path of test has changed
key = self.parentApps.get(test.app, test.app), oldRelPath
iter = self.dict.get(key)
if iter is not None:
self.store(test, iter)
del self.dict[key]
return iter
else:
return self.getIterator(test)
def getIterator(self, test):
return self.dict.get(self.getKey(test))
def remove(self, test):
key = self.getKey(test)
if self.dict.has_key(key):
del self.dict[key]
class TestTreeGUI(guiutils.ContainerGUI):
def __init__(self, dynamic, allApps, popupGUI, subGUI):
guiutils.ContainerGUI.__init__(self, [ subGUI ])
self.model = gtk.TreeStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_PYOBJECT,\
gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_BOOLEAN, \
gobject.TYPE_STRING, gobject.TYPE_STRING)
self.popupGUI = popupGUI
self.itermap = TestIteratorMap(dynamic, allApps)
self.selection = None
self.selecting = False
self.selectedTests = []
self.clipboardTests = set()
self.dynamic = dynamic
self.collapseStatic = self.getCollapseStatic()
self.successPerSuite = {} # map from suite to tests succeeded
self.collapsedRows = {}
self.filteredModel = None
self.treeView = None
self.newTestsVisible = guiutils.guiConfig.showCategoryByDefault("not_started")
self.diag = logging.getLogger("Test Tree")
self.longActionRunning = False
self.recreateOnActionStop = False
def notifyDefaultVisibility(self, newValue):
self.newTestsVisible = newValue
def isExpanded(self, iter):
parentIter = self.filteredModel.iter_parent(iter)
return not parentIter or self.treeView.row_expanded(self.filteredModel.get_path(parentIter))
def getCollapseStatic(self):
if self.dynamic:
return False
else:
return guiutils.guiConfig.getValue("static_collapse_suites")
def notifyAllRead(self, *args):
if self.dynamic:
self.filteredModel.connect('row-inserted', self.rowInserted)
else:
self.newTestsVisible = True
self.model.foreach(self.makeRowVisible)
if self.collapseStatic:
self.expandLevel(self.treeView, self.filteredModel.get_iter_root())
else:
self.treeView.expand_all()
self.notify("AllRead")
def makeRowVisible(self, model, dummyPath, iter):
model.set_value(iter, 5, True)
def getNodeName(self, suite, parent):
nodeName = suite.name
if parent == None:
appName = suite.app.name + suite.app.versionSuffix()
if appName != nodeName:
nodeName += " (" + appName + ")"
return nodeName
def addSuiteWithParent(self, suite, parent, follower=None):
nodeName = self.getNodeName(suite, parent)
self.diag.info("Adding node with name " + nodeName)
colour = guiutils.guiConfig.getTestColour("not_started")
row = [ nodeName, colour, [ suite ], "", colour, self.newTestsVisible, "", "black" ]
iter = self.model.insert_before(parent, follower, row)
storeIter = iter.copy()
self.itermap.store(suite, storeIter)
path = self.model.get_path(iter)
if self.newTestsVisible and parent is not None:
filterPath = self.filteredModel.convert_child_path_to_path(path)
self.treeView.expand_to_path(filterPath)
return iter
def createView(self):
self.filteredModel = self.model.filter_new()
self.filteredModel.set_visible_column(5)
self.treeView = gtk.TreeView(self.filteredModel)
self.treeView.set_search_column(0)
self.treeView.set_name("Test Tree")
self.treeView.expand_all()
self.selection = self.treeView.get_selection()
self.selection.set_mode(gtk.SELECTION_MULTIPLE)
if self.dynamic:
self.selection.set_select_function(self.canSelect)
testsColumn = self.subguis[0].createView()
self.treeView.append_column(testsColumn)
if self.dynamic:
detailsRenderer = gtk.CellRendererText()
detailsRenderer.set_property('wrap-width', 350)
detailsRenderer.set_property('wrap-mode', pango.WRAP_WORD_CHAR)
recalcRenderer = gtk.CellRendererPixbuf()
detailsColumn = gtk.TreeViewColumn("Details")
detailsColumn.pack_start(detailsRenderer, expand=True)
detailsColumn.pack_start(recalcRenderer, expand=False)
detailsColumn.add_attribute(detailsRenderer, 'text', 3)
detailsColumn.add_attribute(detailsRenderer, 'background', 4)
detailsColumn.add_attribute(recalcRenderer, 'stock_id', 6)
detailsColumn.set_resizable(True)
guiutils.addRefreshTips(self.treeView, "test", recalcRenderer, detailsColumn, 6)
self.treeView.append_column(detailsColumn)
self.treeView.connect('row-expanded', self.rowExpanded)
self.expandLevel(self.treeView, self.filteredModel.get_iter_root())
self.treeView.connect("button_press_event", self.popupGUI.showMenu)
self.selection.connect("changed", self.userChangedSelection)
self.treeView.show()
self.popupGUI.createView()
return self.addScrollBars(self.treeView, hpolicy=gtk.POLICY_NEVER)
def notifyTopWindow(self, *args):
# avoid the quit button getting initial focus, give it to the tree view (why not?)
self.treeView.grab_focus()
def canSelect(self, path):
pathIter = self.filteredModel.get_iter(path)
test = self.filteredModel.get_value(pathIter, 2)[0]
return test.classId() == "test-case"
def rowExpanded(self, treeview, iter, path):
if self.dynamic:
realPath = self.filteredModel.convert_path_to_child_path(path)
if self.collapsedRows.has_key(realPath):
del self.collapsedRows[realPath]
self.expandLevel(treeview, self.filteredModel.iter_children(iter), not self.collapseStatic)
def rowInserted(self, model, dummy, iter):
self.expandRow(model.iter_parent(iter), False)
def expandRow(self, iter, recurse):
if iter == None:
return
path = self.filteredModel.get_path(iter)
realPath = self.filteredModel.convert_path_to_child_path(path)
# Iterate over children, call self if they have children
if not self.collapsedRows.has_key(realPath):
self.diag.info("Expanding path at " + repr(realPath))
self.treeView.expand_row(path, open_all=False)
if recurse:
childIter = self.filteredModel.iter_children(iter)
while (childIter != None):
if self.filteredModel.iter_has_child(childIter):
self.expandRow(childIter, True)
childIter = self.filteredModel.iter_next(childIter)
def collapseRow(self, iter):
# To make sure that the path is marked as 'collapsed' even if the row cannot be collapsed
# (if the suite is empty, or not shown at all), we set self.collapsedRow manually, instead of
# waiting for rowCollapsed() to do it at the 'row-collapsed' signal (which will not be emitted
# in the above cases)
path = self.model.get_path(iter)
self.diag.info("Collapsed path " + repr(path))
self.collapsedRows[path] = 1
# Collapsing rows can cause indirect changes of selection, make sure we indicate this.
self.selecting = True
filterPath = self.filteredModel.convert_child_path_to_path(path)
if filterPath is not None: # don't collapse if it's already hidden
self.selection.get_tree_view().collapse_row(filterPath)
self.selecting = False
self.selectionChanged(direct=False)
def userChangedSelection(self, *args):
if not self.selecting and not hasattr(self.selection, "unseen_changes"):
self.selectionChanged(direct=True)
def selectionChanged(self, direct):
newSelection = self.getSelected()
if newSelection != self.selectedTests:
self.sendSelectionNotification(newSelection, direct)
if self.dynamic and direct:
self.selection.selected_foreach(self.updateRecalculationMarker)
def notifyRefreshTestSelection(self):
# The selection hasn't changed, but we want to e.g.
# recalculate the action sensitiveness and make sure we can still see the selected tests.
self.sendSelectionNotification(self.selectedTests)
self.scrollToFirstTest()
def notifyRecomputed(self, test):
iter = self.itermap.getIterator(test)
# If we've recomputed, clear the recalculation icons
self.setNewRecalculationStatus(iter, test, [])
if test.stateInGui.hasFailed():
self.removeFromSuiteSuccess(test)
def getSortedSelectedTests(self, suite):
appTests = filter(lambda test: test.app is suite.app, self.selectedTests)
allTests = suite.allTestsAndSuites()
appTests.sort(key=allTests.index)
return appTests
def notifyWriteTestsIfSelected(self, suite, file):
for test in self.getSortedSelectedTests(suite):
self.writeSelectedTest(test, file)
def shouldListSubTests(self, test):
if test.parent is None or not all((self.isVisible(test) for test in test.testCaseList())):
return True
filters = test.app.getFilterList([test])
return len(filters) > 0
def writeSelectedTest(self, test, file):
if test.classId() == "test-suite":
if self.shouldListSubTests(test):
for subTest in test.testcases:
if self.isVisible(subTest):
self.writeSelectedTest(subTest, file)
return
file.write(test.getRelPath() + "\n")
def updateRecalculationMarker(self, model, dummy, iter):
tests = model.get_value(iter, 2)
if not tests[0].stateInGui.isComplete():
return
recalcComparisons = tests[0].stateInGui.getComparisonsForRecalculation()
childIter = self.filteredModel.convert_iter_to_child_iter(iter)
self.setNewRecalculationStatus(childIter, tests[0], recalcComparisons)
def setNewRecalculationStatus(self, iter, test, recalcComparisons):
oldVal = self.model.get_value(iter, 6)
newVal = self.getRecalculationIcon(recalcComparisons)
if newVal != oldVal:
self.model.set_value(iter, 6, newVal)
self.notify("Recalculation", test, recalcComparisons, newVal)
def getRecalculationIcon(self, recalc):
if recalc:
return "gtk-refresh"
else:
return ""
def checkRelatedForRecalculation(self, test):
self.filteredModel.foreach(self.checkRecalculationIfMatches, test)
def checkRecalculationIfMatches(self, model, path, iter, test):
tests = model.get_value(iter, 2)
if tests[0] is not test and tests[0].getRelPath() == test.getRelPath():
self.updateRecalculationMarker(model, path, iter)
def getSelectedApps(self, tests):
apps = []
for test in tests:
if test.app not in apps:
apps.append(test.app)
return apps
def notifyActionStart(self, foreground=True):
if not foreground:
self.longActionRunning = True
def notifyActionStop(self, foreground=True):
if not foreground:
if self.longActionRunning and self.recreateOnActionStop:
self.sendActualSelectionNotification(direct=False)
self.longActionRunning = False
self.recreateOnActionStop = False
def sendActualSelectionNotification(self, direct):
apps = self.getSelectedApps(self.selectedTests)
self.notify("NewTestSelection", self.selectedTests, apps, self.selection.count_selected_rows(), direct)
def sendSelectionNotification(self, tests, direct=True):
if len(tests) < 10:
self.diag.info("Selection now changed to " + repr(tests))
else:
self.diag.info("Selection now of size " + str(len(tests)))
self.selectedTests = tests
if self.longActionRunning:
self.recreateOnActionStop = True
self.subguis[0].updateTestInfo(tests, self.selection.count_selected_rows())
else:
self.sendActualSelectionNotification(direct)
def getSelected(self):
allSelected = []
prevSelected = set(self.selectedTests)
def addSelTest(model, dummy, iter, selected):
selected += self.getNewSelected(model.get_value(iter, 2), prevSelected)
self.selection.selected_foreach(addSelTest, allSelected)
return allSelected
def getNewSelected(self, tests, prevSelected):
intersection = prevSelected.intersection(set(tests))
if len(intersection) == 0 or len(intersection) == len(tests) or len(intersection) == len(prevSelected):
return tests
else:
return list(intersection)
def findIter(self, test):
try:
childIter = self.itermap.getIterator(test)
if childIter:
return self.filteredModel.convert_child_iter_to_iter(childIter)
except RuntimeError:
# convert_child_iter_to_iter throws RunTimeError if the row is hidden in the TreeModelFilter
self.diag.info("Could not find iterator for " + repr(test) + ", possibly row is hidden.")
def notifySetTestSelection(self, selTests, criteria="", selectCollapsed=True, direct=False):
actualSelection = self.selectTestRows(selTests, selectCollapsed)
# Here it's been set via some indirect mechanism, might want to behave differently
self.sendSelectionNotification(actualSelection, direct=direct)
def selectTestRows(self, selTests, selectCollapsed=True):
self.selecting = True # don't respond to each individual programmatic change here
self.selection.unselect_all()
treeView = self.selection.get_tree_view()
firstPath = None
actuallySelected = []
for test in selTests:
iter = self.findIter(test)
if not iter or (not selectCollapsed and not self.isExpanded(iter)):
continue
actuallySelected.append(test)
path = self.filteredModel.get_path(iter)
if not firstPath:
firstPath = path
if selectCollapsed:
treeView.expand_to_path(path)
self.selection.select_iter(iter)
treeView.grab_focus()
if firstPath is not None and treeView.get_property("visible"):
self.scrollToPath(firstPath)
self.selecting = False
return actuallySelected
def scrollToFirstTest(self):
if len(self.selectedTests) > 0:
test = self.selectedTests[0]
iter = self.findIter(test)
path = self.filteredModel.get_path(iter)
self.scrollToPath(path)
def scrollToPath(self, path):
treeView = self.selection.get_tree_view()
cellArea = treeView.get_cell_area(path, treeView.get_columns()[0])
visibleArea = treeView.get_visible_rect()
if cellArea.y < 0 or cellArea.y > visibleArea.height:
treeView.scroll_to_cell(path, use_align=True, row_align=0.1)
def expandLevel(self, view, iter, recursive=True):
# Make sure expanding expands everything, better than just one level as default...
# Avoid using view.expand_row(path, open_all=True), as the open_all flag
# doesn't seem to send the correct 'row-expanded' signal for all rows ...
# This way, the signals are generated one at a time and we call back into here.
model = view.get_model()
while (iter != None):
if recursive:
view.expand_row(model.get_path(iter), open_all=False)
iter = view.get_model().iter_next(iter)
def notifyTestAppearance(self, test, detailText, colour1, colour2, updateSuccess, saved):
iter = self.itermap.getIterator(test)
self.model.set_value(iter, 1, colour1)
self.model.set_value(iter, 3, detailText)
self.model.set_value(iter, 4, colour2)
if updateSuccess:
self.updateSuiteSuccess(test, colour1)
if saved:
self.checkRelatedForRecalculation(test)
def notifyLifecycleChange(self, test, *args):
if test in self.selectedTests:
self.notify("LifecycleChange", test, *args)
def notifyFileChange(self, test, *args):
if test in self.selectedTests:
self.notify("FileChange", test, *args)
def notifyDescriptionChange(self, test, *args):
if test in self.selectedTests:
self.notify("DescriptionChange", test, *args)
def notifyRefreshFilePreviews(self, test, *args):
if test in self.selectedTests:
self.notify("RefreshFilePreviews", test, *args)
def updateSuiteSuccess(self, test, colour):
suite = test.parent
if not suite:
return
self.successPerSuite.setdefault(suite, set()).add(test)
successCount = len(self.successPerSuite.get(suite))
suiteSize = len(filter(lambda subtest: not subtest.isEmpty(), suite.testcases))
if successCount == suiteSize:
self.setAllSucceeded(suite, colour)
self.updateSuiteSuccess(suite, colour)
def removeFromSuiteSuccess(self, test):
suite = test.parent
if suite and suite in self.successPerSuite and test in self.successPerSuite[suite]:
self.successPerSuite[suite].remove(test)
self.clearAllSucceeded(suite)
self.removeFromSuiteSuccess(suite)
def setAllSucceeded(self, suite, colour):
# Print how many tests succeeded, color details column in success color,
# collapse row, and try to collapse parent suite.
detailText = "All " + str(suite.size()) + " tests successful"
iter = self.itermap.getIterator(suite)
self.model.set_value(iter, 3, detailText)
self.model.set_value(iter, 4, colour)
if guiutils.guiConfig.getValue("auto_collapse_successful") == 1:
self.collapseRow(iter)
def clearAllSucceeded(self, suite):
iter = self.itermap.getIterator(suite)
self.model.set_value(iter, 3, "")
self.model.set_value(iter, 4, "white")
if guiutils.guiConfig.getValue("auto_collapse_successful") == 1:
path = self.model.get_path(iter)
if self.collapsedRows.has_key(path):
del self.collapsedRows[path]
filteredPath = self.filteredModel.convert_child_path_to_path(path)
self.treeView.expand_row(filteredPath, open_all=False)
def isVisible(self, test):
filteredIter = self.findIter(test)
if filteredIter:
filteredPath = self.filteredModel.get_path(self.filteredModel.iter_parent(filteredIter))
path = self.filteredModel.convert_path_to_child_path(filteredPath)
return not self.collapsedRows.has_key(path)
else:
self.diag.info("No iterator found for " + repr(test))
return False
def findAllTests(self):
tests = []
self.model.foreach(self.appendTest, tests)
return tests
def appendTest(self, model, dummy, iter, tests):
for test in model.get_value(iter, 2):
if test.classId() == "test-case":
tests.append(test)
def getTestForAutoSelect(self):
allTests = self.findAllTests()
if len(allTests) == 1:
test = allTests[0]
if self.isVisible(test):
return test
def notifyAllComplete(self):
# Window may already have been closed...
if self.selection.get_tree_view():
test = self.getTestForAutoSelect()
if test:
actualSelection = self.selectTestRows([ test ])
self.sendSelectionNotification(actualSelection)
def notifyAdd(self, test, initial):
if test.classId() == "test-case":
self.notify("TestTreeCounters", initial=initial, totalDelta=1,
totalShownDelta=self.getTotalShownDelta(), totalRowsDelta=self.getTotalRowsDelta(test))
elif self.dynamic and test.isEmpty():
return # don't show empty suites in the dynamic GUI
self.diag.info("Adding test " + repr(test))
self.tryAddTest(test, initial)
if test.parent is None and not initial:
# We've added a new suite, we should also select it as it's likely the user wants to add stuff under it
# Also include the knock-on effects, i.e. selecting the test tab etc
self.notifySetTestSelection([test], direct=True)
def notifyClipboard(self, tests, cut=False):
if cut:
colourKey = "clipboard_cut"
else:
colourKey = "clipboard_copy"
colour = guiutils.guiConfig.getTestColour(colourKey)
toRemove = self.clipboardTests.difference(set(tests))
self.clipboardTests = set(tests)
for test in tests:
iter = self.itermap.getIterator(test)
self.model.set_value(iter, 7, colour)
for test in toRemove:
iter = self.itermap.getIterator(test)
if iter:
self.model.set_value(iter, 7, "black")
def getTotalRowsDelta(self, test):
if self.itermap.getIterator(test):
return 0
else:
return 1
def getTotalShownDelta(self):
if self.dynamic:
return int(self.newTestsVisible)
else:
return 1 # we hide them temporarily for performance reasons, so can't do as above
def tryAddTest(self, test, initial=False):
iter = self.itermap.getIterator(test)
if iter:
self.addAdditional(iter, test)
return iter
suite = test.parent
suiteIter = None
if suite:
suiteIter = self.tryAddTest(suite, initial)
followIter = self.findFollowIter(suite, test, initial)
return self.addSuiteWithParent(test, suiteIter, followIter)
def findFollowIter(self, suite, test, initial):
if not initial and suite:
follower = suite.getFollower(test)
if follower:
return self.itermap.getIterator(follower)
def addAdditional(self, iter, test):
currTests = self.model.get_value(iter, 2)
if not test in currTests:
self.diag.info("Adding additional test to node " + self.model.get_value(iter, 0))
currTests.append(test)
def notifyRemove(self, test):
delta = -test.size()
iter = self.itermap.getIterator(test)
allTests = self.model.get_value(iter, 2)
if len(allTests) == 1:
self.notify("TestTreeCounters", totalDelta=delta, totalShownDelta=delta, totalRowsDelta=delta)
self.removeTest(test, iter)
else:
self.notify("TestTreeCounters", totalDelta=delta, totalShownDelta=delta, totalRowsDelta=0)
allTests.remove(test)
def removeTest(self, test, iter):
filteredIter = self.findIter(test)
self.selecting = True
if self.selection.iter_is_selected(filteredIter):
self.selection.unselect_iter(filteredIter)
self.selecting = False
self.selectionChanged(direct=False)
self.model.remove(iter)
self.itermap.remove(test)
def notifyNameChange(self, test, origRelPath):
iter = self.itermap.updateIterator(test, origRelPath)
oldName = self.model.get_value(iter, 0)
if test.name != oldName:
self.model.set_value(iter, 0, test.name)
filteredIter = self.filteredModel.convert_child_iter_to_iter(iter)
if self.selection.iter_is_selected(filteredIter):
self.notify("NameChange", test, origRelPath)
def notifyContentChange(self, suite):
suiteIter = self.itermap.getIterator(suite)
newOrder = self.findNewOrder(suiteIter)
self.model.reorder(suiteIter, newOrder)
def findNewOrder(self, suiteIter):
child = self.model.iter_children(suiteIter)
index = 0
posMap = {}
while (child != None):
subTestName = self.model.get_value(child, 0)
posMap[subTestName] = index
child = self.model.iter_next(child)
index += 1
newOrder = []
for currSuite in self.model.get_value(suiteIter, 2):
for subTest in currSuite.testcases:
oldIndex = posMap.get(subTest.name)
if oldIndex not in newOrder:
newOrder.append(oldIndex)
return newOrder
def notifyVisibility(self, tests, newValue):
self.diag.info("Visibility change for " + repr(tests) + " to " + repr(newValue))
if not newValue:
self.selecting = True
changedTests = []
for test in tests:
if self.updateVisibilityWithParents(test, newValue):
changedTests.append(test)
self.selecting = False
if len(changedTests) > 0:
self.diag.info("Actually changed tests " + repr(changedTests))
self.notify("Visibility", changedTests, newValue)
if self.treeView:
self.updateVisibilityInViews(newValue)
def updateVisibilityInViews(self, newValue):
if newValue: # if things have become visible, expand everything
rootIter = self.filteredModel.get_iter_root()
while rootIter != None:
self.expandRow(rootIter, True)
rootIter = self.filteredModel.iter_next(rootIter)
gobject.idle_add(self.scrollToFirstTest)
else:
self.selectionChanged(direct=False)
def updateVisibilityWithParents(self, test, newValue):
changed = False
if test.parent and newValue:
changed |= self.updateVisibilityWithParents(test.parent, newValue)
changed |= self.updateVisibilityInModel(test, newValue)
if test.parent and not newValue and not self.hasVisibleChildren(test.parent):
self.diag.info("No visible children : hiding parent " + repr(test.parent))
changed |= self.updateVisibilityWithParents(test.parent, newValue)
return changed
def isMarkedVisible(self, test):
testIter = self.itermap.getIterator(test)
# Can get None here when using queue systems, so that some tests in a suite
# start processing when others have not yet notified the GUI that they have been read.
return testIter is not None and self.model.get_value(testIter, 5) and test in self.model.get_value(testIter, 2)
def updateVisibilityInModel(self, test, newValue):
testIter = self.itermap.getIterator(test)
if testIter is None:
# Tests are not necessarily loaded yet in the GUI (for example if we do show only selected), don't stacktrace
return False
visibleTests = self.model.get_value(testIter, 2)
isVisible = test in visibleTests
changed = False
if newValue and not isVisible:
visibleTests.append(test)
changed = True
elif not newValue and isVisible:
visibleTests.remove(test)
changed = True
if (newValue and len(visibleTests) > 1) or (not newValue and len(visibleTests) > 0):
self.diag.info("No row visibility change : " + repr(test))
return changed
else:
return self.setVisibility(testIter, newValue)
def setVisibility(self, iter, newValue):
oldValue = self.model.get_value(iter, 5)
if oldValue == newValue:
return False
self.model.set_value(iter, 5, newValue)
return True
def hasVisibleChildren(self, suite):
return any((self.isMarkedVisible(test) for test in suite.testcases))
| mit | -9,205,779,399,166,659,000 | 41.066742 | 157 | 0.624708 | false |
line/line-bot-sdk-python | tests/models/test_base.py | 1 | 2959 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals, absolute_import
import json
import unittest
from linebot.models import Base
class Hoge(Base):
def __init__(self, title=None, content=None, hoge_bar=None, **kwargs):
super(Hoge, self).__init__(**kwargs)
self.title = title
self.content = content
self.hoge_bar = hoge_bar
class TestBase(unittest.TestCase):
def test_as_json_string(self):
self.assertEqual(
Hoge().as_json_string(),
'{}')
self.assertEqual(
Hoge(title='title').as_json_string(),
'{"title": "title"}')
self.assertEqual(
Hoge(title='title', content='content').as_json_string(),
'{"content": "content", "title": "title"}')
self.assertEqual(
Hoge(title='title', content={"hoge": "hoge"}).as_json_string(),
'{"content": {"hoge": "hoge"}, "title": "title"}')
self.assertEqual(
Hoge(title=[1, 2]).as_json_string(),
'{"title": [1, 2]}')
self.assertEqual(
Hoge(hoge_bar='hoge_bar').as_json_string(),
'{"hogeBar": "hoge_bar"}')
def test_as_json_dict(self):
self.assertEqual(
Hoge().as_json_dict(),
{})
self.assertEqual(
Hoge(title='title').as_json_dict(),
{'title': 'title'})
self.assertEqual(
Hoge(title='title', content='content').as_json_dict(),
{'content': 'content', 'title': 'title'})
self.assertEqual(
Hoge(title='title', content={"hoge": "hoge"}).as_json_dict(),
{'content': {'hoge': 'hoge'}, 'title': 'title'})
self.assertEqual(
Hoge(title=[1, 2]).as_json_dict(),
{'title': [1, 2]})
def test_new_from_json_dict(self):
self.assertEqual(
Hoge.new_from_json_dict({"title": "title"}),
Hoge(title='title'))
self.assertEqual(
Hoge.new_from_json_dict(json.loads('{"title": "title"}')),
Hoge(title='title'))
self.assertEqual(
Hoge.new_from_json_dict({"hoge_bar": "hoge_bar"}),
Hoge(hoge_bar='hoge_bar'))
self.assertEqual(
Hoge.new_from_json_dict({"hogeBar": "hoge_bar"}),
Hoge(hoge_bar='hoge_bar'))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,919,719,617,394,605,000 | 33.406977 | 76 | 0.560324 | false |
brianlions/python-nebula | nebula/log.py | 1 | 13537 | #!/usr/bin/env python3
#
# Copyright (c) 2012 Brian Yi ZHANG <brianlions at gmail dot com>
#
# This file is part of pynebula.
#
# pynebula is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pynebula is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pynebula. If not, see <http://www.gnu.org/licenses/>.
#
import time
import traceback
import os
import sys
class Logger(object):
'''
'''
EMERG, ALERT, CRIT, ERR, WARNING, NOTICE, INFO, DEBUG = range(0, 8)
LOG_LEVELS = frozenset((EMERG, ALERT, CRIT, ERR, WARNING, NOTICE, INFO, DEBUG))
__level_names = {
EMERG: ('eme', 'emerg'),
ALERT: ('ale', 'alert'),
CRIT: ('cri', 'crit'),
ERR: ('err', 'err'),
WARNING: ('war', 'warning'),
NOTICE: ('not', 'notice'),
INFO: ('inf', 'info'),
DEBUG: ('deb', 'debug'),
}
@classmethod
def log_mask(cls, level):
'''Returns log mask for the specified log level.
Args:
level: one of the constants in Logger.LOG_LEVELS.
Returns:
An integer which can be passed to set_log_mask() etc.
'''
if level not in cls.__level_names:
raise ValueError("invalid log level: {:d}".format(level))
return (1 << level)
@classmethod
def mask_upto(cls, level):
'''Returns log mask for all levels through level.
Args:
level: one of the constants in Logger.LOG_LEVELS.
Returns:
An integer which can be passed to set_log_mask() etc.
'''
if level not in cls.__level_names:
raise ValueError("invalid log level: {:d}".format(level))
return (1 << (level + 1)) - 1
@classmethod
def level_name(cls, level, abbr = False):
'''Returns name of the specified log level.
Args:
level: one of the constants in Logger.LOG_LEVELS.
abbr: whether to use the abbreviated name or not.
Returns:
Human-readable string representation of the log level.'''
if level not in cls.__level_names:
raise ValueError("invalid log level: {:d}".format(level))
return cls.__level_names[level][(not abbr) and 1 or 0]
@classmethod
def timestamp_str(cls, now = None, use_gmtime = False, show_timezone = False):
'''Format and return current date and time.
Args:
now: seconds (as float) since the unix epoch, use current
time stamp if value is false.
use_gmtime: whether to use GMT time or not.
show_timezone: whether to display the time zone or not.
Returns:
String representation of date & time, the format of the returned
value is "YYYY.mm.dd-HH:MM:SS.ssssss-ZZZ".
'''
if not now:
now = time.time()
if show_timezone:
tz_format = use_gmtime and '-GMT' or '-%Z'
else:
tz_format = ''
return time.strftime('%Y.%m.%d-%H:%M:%S' + ('.%06d' % ((now - int(now)) * 1000000)) + tz_format,
use_gmtime and time.gmtime(now) or time.localtime(now))
def __init__(self, log_mask = None, use_gmtime = False, show_timezone = True):
self.__log_mask = log_mask and log_mask or self.mask_upto(self.INFO)
self.__use_gmtime = use_gmtime and True or False
self.__show_timezone = show_timezone and True or False
def set_log_mask(self, new_mask):
'''Set log mask, and return previous log mask.
Args:
new_mask: the new log mask to be set to.
Returns:
Previous log mask (as integer).
'''
if new_mask < self.mask_upto(self.EMERG) or new_mask > self.mask_upto(self.DEBUG):
raise ValueError("invalid log mask: {:d}".format(new_mask))
old_mask = self.__log_mask
self.__log_mask = new_mask
return old_mask
def set_max_level(self, max_level):
'''Log all messages through max_level.
Args:
max_level: one of the constants in Logger.LOG_LEVELS.
Returns:
Previous log mask (as integer).
'''
return self.set_log_mask(Logger.mask_upto(max_level))
def is_use_gmtime(self):
'''Whether we are using GMT time representation of not.
Returns:
True if using GMT, False otherwise.
'''
return self.__use_gmtime
def is_show_timezone(self):
'''Whether we are printing the time zone of not.
Returns:
True if printing time zone, False otherwise.
'''
return self.__show_timezone
def log(self, level, msg, use_gmtime = None, show_timezone = None,
stack_limit = 2):
'''Generate one log message.
Args:
level: level of the message
msg: string message to be logged
use_gmtime: whether to use GMT or not, if value is None, use the
value passed to __init__()
show_timezone: whether to log time zone or not, if value is None, use
the value passed to __init__()
stack_limit: passed to traceback.extract_stack(), in order to get
the correct file name, line number, and method name.
Returns:
True if the message was logged, False otherwise.
'''
if self.log_mask(level) & self.__log_mask:
file_name, line_num, func_name = traceback.extract_stack(limit = stack_limit)[0][:3]
# remove current working directory if it is prefix of the file name
cwd = os.getcwd() + os.path.sep
if file_name.startswith(cwd):
file_name = '.' + os.path.sep + file_name[len(cwd):]
if use_gmtime is None:
use_gmtime = self.is_use_gmtime()
if show_timezone is None:
show_timezone = self.is_show_timezone()
self.output_message(level, msg, file_name, line_num, func_name,
use_gmtime = use_gmtime,
show_timezone = show_timezone)
return True
else:
return False
def debug(self, msg, stack_limit = 3):
return self.log(self.DEBUG, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def info(self, msg, stack_limit = 3):
return self.log(self.INFO, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def notice(self, msg, stack_limit = 3):
return self.log(self.NOTICE, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def warning(self, msg, stack_limit = 3):
return self.log(self.WARNING, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def err(self, msg, stack_limit = 3):
return self.log(self.ERR, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def crit(self, msg, stack_limit = 3):
return self.log(self.CRIT, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def alert(self, msg, stack_limit = 3):
return self.log(self.ALERT, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def emerg(self, msg, stack_limit = 3):
return self.log(self.EMERG, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def output_message(self, level, msg, file_name, line_num, func_name,
use_gmtime = None, show_timezone = None):
'''Method subclass MUST implement.
Args:
level: (int) level of the message
msg: (str) message to be logged
file_name: (str) in which file the message was generated
line_num: (int) at which line the message was generated
func_name: (str) in which method (or function) the message was
generated
use_gmtime: (bool) whether to use GMT or not
show_timezone: (bool) whether to log the time zone or not
Returns:
(not required)
'''
raise NotImplementedError("{:s}.{:s}: output_message() not implemented".format(self.__class__.__module__,
self.__class__.__name__))
#-------------------------------------------------------------------------------
class ConsoleLogger(Logger):
'''Logger which log messages to console (stdout).'''
def __init__(self, *args, **kwargs):
super(ConsoleLogger, self).__init__(*args, **kwargs)
def output_message(self, level, msg, file_name, line_num, func_name,
use_gmtime = None, show_timezone = None):
'''Implements the abstract method defined in parent class.'''
if use_gmtime is None:
use_gmtime = self.is_use_gmtime()
if show_timezone is None:
show_timezone = self.is_show_timezone()
# time, log level, file name, line number, method name, log message
print("[{:s} {:s} {:s}:{:d}:{:s}] {:s}".format(self.timestamp_str(use_gmtime, show_timezone),
self.level_name(level, abbr = True),
file_name, line_num, func_name, msg))
sys.stdout.flush()
#-------------------------------------------------------------------------------
class WrappedLogger(object):
def __init__(self, log_handle = None):
self.__log_handle = None
self.set_log_handle(log_handle)
def set_log_handle(self, log_handle):
'''Set new log handle to be used.
Args:
log_handle: new log handle to be used
Returns:
Previous log handle, value might be None.
'''
if (log_handle is not None) and (not isinstance(log_handle, Logger)):
raise TypeError("log_handle {:s} is not an instance of {:s}.Logger".format(repr(log_handle),
self.__class__.__module__))
prev_handle = self.__log_handle
self.__log_handle = log_handle
return prev_handle
def get_log_handle(self):
'''Get current log handle current in use.
Returns:
Current log handle in use, value might be None.
'''
return self.__log_handle
def log_debug(self, msg):
if self.__log_handle:
self.__log_handle.debug(msg, stack_limit = 4)
def log_info(self, msg):
if self.__log_handle:
self.__log_handle.info(msg, stack_limit = 4)
def log_notice(self, msg):
if self.__log_handle:
self.__log_handle.notice(msg, stack_limit = 4)
def log_warning(self, msg):
if self.__log_handle:
self.__log_handle.warning(msg, stack_limit = 4)
def log_err(self, msg):
if self.__log_handle:
self.__log_handle.err(msg, stack_limit = 4)
def log_crit(self, msg):
if self.__log_handle:
self.__log_handle.crit(msg, stack_limit = 4)
def log_alert(self, msg):
if self.__log_handle:
self.__log_handle.alert(msg, stack_limit = 4)
def log_emerg(self, msg):
if self.__log_handle:
self.__log_handle.emerg(msg, stack_limit = 4)
#-------------------------------------------------------------------------------
def demo():
logger = ConsoleLogger(show_timezone = True)
for max_level in (Logger.DEBUG, Logger.INFO, Logger.NOTICE, Logger.WARNING, Logger.ERR):
print("max log level: %s" % Logger.level_name(max_level))
logger.set_log_mask(Logger.mask_upto(max_level))
for level in (Logger.DEBUG, Logger.INFO, Logger.NOTICE, Logger.WARNING, Logger.ERR):
logger.log(level, "message level %s" % Logger.level_name(level, abbr = False))
print()
print("max log level: %s" % Logger.level_name(Logger.DEBUG))
logger.set_log_mask(Logger.mask_upto(logger.DEBUG))
logger.debug("debug()")
logger.info("info()")
logger.notice("notice()")
logger.warning("wanring()")
logger.err("err()")
if __name__ == '__main__':
demo()
| gpl-3.0 | -4,693,924,603,498,929,000 | 35.292225 | 114 | 0.540149 | false |
tommo/gii | lib/mock/asset/AnimatorAsset.py | 1 | 1686 | import os.path
import json
from gii.core import *
from gii.qt.dialogs import requestString, alertMessage
from mock import _MOCK
##----------------------------------------------------------------##
class AnimatorDataCreator(AssetCreator):
def getAssetType( self ):
return 'animator_data'
def getLabel( self ):
return 'Animator Data'
def createAsset( self, name, contextNode, assetType ):
ext = '.animator_data'
filename = name + ext
if contextNode.isType('folder'):
nodepath = contextNode.getChildPath( filename )
else:
nodepath = contextNode.getSiblingPath( filename )
fullpath = AssetLibrary.get().getAbsPath( nodepath )
modelName = _MOCK.Model.findName( 'AnimatorData' )
assert( modelName )
_MOCK.createEmptySerialization( fullpath, modelName )
return nodepath
##----------------------------------------------------------------##
class AnimatorDataAssetManager( AssetManager ):
def getName( self ):
return 'asset_manager.animator_data'
def acceptAssetFile(self, filepath):
if not os.path.isfile(filepath): return False
if not filepath.endswith( '.animator_data' ): return False
return True
def importAsset(self, node, reload = False ):
node.assetType = 'animator_data'
node.setObjectFile( 'data', node.getFilePath() )
# def editAsset(self, node):
# editor = app.getModule( 'animator' )
# if not editor:
# return alertMessage( 'Designer not load', 'AnimatorData Designer not found!' )
# editor.openAsset( node )
##----------------------------------------------------------------##
AnimatorDataAssetManager().register()
AnimatorDataCreator().register()
AssetLibrary.get().setAssetIcon( 'animator_data', 'clip' )
| mit | -5,049,870,976,723,200,000 | 29.107143 | 83 | 0.641756 | false |
happyleavesaoc/home-assistant | homeassistant/components/notify/smtp.py | 1 | 9080 | """
Mail (SMTP) notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.smtp/
"""
import logging
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.application import MIMEApplication
import email.utils
import os
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_TITLE_DEFAULT, ATTR_DATA, PLATFORM_SCHEMA,
BaseNotificationService)
from homeassistant.const import (
CONF_USERNAME, CONF_PASSWORD, CONF_PORT, CONF_TIMEOUT,
CONF_SENDER, CONF_RECIPIENT)
_LOGGER = logging.getLogger(__name__)
ATTR_IMAGES = 'images' # optional embedded image file attachments
ATTR_HTML = 'html'
CONF_ENCRYPTION = 'encryption'
CONF_DEBUG = 'debug'
CONF_SERVER = 'server'
CONF_SENDER_NAME = 'sender_name'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 465
DEFAULT_TIMEOUT = 5
DEFAULT_DEBUG = False
DEFAULT_ENCRYPTION = 'tls'
ENCRYPTION_OPTIONS = ['tls', 'starttls', 'none']
# pylint: disable=no-value-for-parameter
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_RECIPIENT): vol.All(cv.ensure_list, [vol.Email()]),
vol.Required(CONF_SENDER): vol.Email(),
vol.Optional(CONF_SERVER, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_ENCRYPTION, default=DEFAULT_ENCRYPTION):
vol.In(ENCRYPTION_OPTIONS),
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SENDER_NAME): cv.string,
vol.Optional(CONF_DEBUG, default=DEFAULT_DEBUG): cv.boolean,
})
def get_service(hass, config, discovery_info=None):
"""Get the mail notification service."""
mail_service = MailNotificationService(
config.get(CONF_SERVER),
config.get(CONF_PORT),
config.get(CONF_TIMEOUT),
config.get(CONF_SENDER),
config.get(CONF_ENCRYPTION),
config.get(CONF_USERNAME),
config.get(CONF_PASSWORD),
config.get(CONF_RECIPIENT),
config.get(CONF_SENDER_NAME),
config.get(CONF_DEBUG))
if mail_service.connection_is_valid():
return mail_service
else:
return None
class MailNotificationService(BaseNotificationService):
"""Implement the notification service for E-mail messages."""
def __init__(self, server, port, timeout, sender, encryption, username,
password, recipients, sender_name, debug):
"""Initialize the SMTP service."""
self._server = server
self._port = port
self._timeout = timeout
self._sender = sender
self.encryption = encryption
self.username = username
self.password = password
self.recipients = recipients
self._sender_name = sender_name
self.debug = debug
self.tries = 2
def connect(self):
"""Connect/authenticate to SMTP Server."""
if self.encryption == "tls":
mail = smtplib.SMTP_SSL(
self._server, self._port, timeout=self._timeout)
else:
mail = smtplib.SMTP(
self._server, self._port, timeout=self._timeout)
mail.set_debuglevel(self.debug)
mail.ehlo_or_helo_if_needed()
if self.encryption == "starttls":
mail.starttls()
mail.ehlo()
if self.username and self.password:
mail.login(self.username, self.password)
return mail
def connection_is_valid(self):
"""Check for valid config, verify connectivity."""
server = None
try:
server = self.connect()
except smtplib.socket.gaierror:
_LOGGER.exception(
"SMTP server not found (%s:%s). "
"Please check the IP address or hostname of your SMTP server",
self._server, self._port)
return False
except (smtplib.SMTPAuthenticationError, ConnectionRefusedError):
_LOGGER.exception(
"Login not possible. "
"Please check your setting and/or your credentials")
return False
finally:
if server:
server.quit()
return True
def send_message(self, message="", **kwargs):
"""
Build and send a message to a user.
Will send plain text normally, or will build a multipart HTML message
with inline image attachments if images config is defined, or will
build a multipart HTML if html config is defined.
"""
subject = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = kwargs.get(ATTR_DATA)
if data:
if ATTR_HTML in data:
msg = _build_html_msg(
message, data[ATTR_HTML], images=data.get(ATTR_IMAGES))
else:
msg = _build_multipart_msg(
message, images=data.get(ATTR_IMAGES))
else:
msg = _build_text_msg(message)
msg['Subject'] = subject
msg['To'] = ','.join(self.recipients)
if self._sender_name:
msg['From'] = '{} <{}>'.format(self._sender_name, self._sender)
else:
msg['From'] = self._sender
msg['X-Mailer'] = 'HomeAssistant'
msg['Date'] = email.utils.format_datetime(dt_util.now())
msg['Message-Id'] = email.utils.make_msgid()
return self._send_email(msg)
def _send_email(self, msg):
"""Send the message."""
mail = self.connect()
for _ in range(self.tries):
try:
mail.sendmail(self._sender, self.recipients, msg.as_string())
break
except smtplib.SMTPServerDisconnected:
_LOGGER.warning(
"SMTPServerDisconnected sending mail: retrying connection")
mail.quit()
mail = self.connect()
except smtplib.SMTPException:
_LOGGER.warning(
"SMTPException sending mail: retrying connection")
mail.quit()
mail = self.connect()
mail.quit()
def _build_text_msg(message):
"""Build plaintext email."""
_LOGGER.debug("Building plain text email")
return MIMEText(message)
def _build_multipart_msg(message, images):
"""Build Multipart message with in-line images."""
_LOGGER.debug("Building multipart email with embedded attachment(s)")
msg = MIMEMultipart('related')
msg_alt = MIMEMultipart('alternative')
msg.attach(msg_alt)
body_txt = MIMEText(message)
msg_alt.attach(body_txt)
body_text = ['<p>{}</p><br>'.format(message)]
for atch_num, atch_name in enumerate(images):
cid = 'image{}'.format(atch_num)
body_text.append('<img src="cid:{}"><br>'.format(cid))
try:
with open(atch_name, 'rb') as attachment_file:
file_bytes = attachment_file.read()
try:
attachment = MIMEImage(file_bytes)
msg.attach(attachment)
attachment.add_header('Content-ID', '<{}>'.format(cid))
except TypeError:
_LOGGER.warning("Attachment %s has an unknown MIME type. "
"Falling back to file", atch_name)
attachment = MIMEApplication(file_bytes, Name=atch_name)
attachment['Content-Disposition'] = ('attachment; '
'filename="%s"' %
atch_name)
msg.attach(attachment)
except FileNotFoundError:
_LOGGER.warning("Attachment %s not found. Skipping", atch_name)
body_html = MIMEText(''.join(body_text), 'html')
msg_alt.attach(body_html)
return msg
def _build_html_msg(text, html, images):
"""Build Multipart message with in-line images and rich HTML (UTF-8)."""
_LOGGER.debug("Building HTML rich email")
msg = MIMEMultipart('related')
alternative = MIMEMultipart('alternative')
alternative.attach(MIMEText(text, _charset='utf-8'))
alternative.attach(MIMEText(html, ATTR_HTML, _charset='utf-8'))
msg.attach(alternative)
for atch_num, atch_name in enumerate(images):
name = os.path.basename(atch_name)
try:
with open(atch_name, 'rb') as attachment_file:
attachment = MIMEImage(attachment_file.read(), filename=name)
msg.attach(attachment)
attachment.add_header('Content-ID', '<{}>'.format(name))
except FileNotFoundError:
_LOGGER.warning("Attachment %s [#%s] not found. Skipping",
atch_name, atch_num)
return msg
| apache-2.0 | 7,774,232,293,811,562,000 | 34.748031 | 79 | 0.601322 | false |
lutraconsulting/qgis-constraint-checker-plugin | ConstraintChecker/constraintcheckerdialog.py | 1 | 1610 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Constraint Checker
A QGIS plugin
Generate reports of constraints (e.g. planning constraints) applicable to an area of interest.
-------------------
begin : 2014-03-19
copyright : (C) 2014 by Lutra Consulting for Dartmoor National Park Authority
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from qgis.PyQt import uic
from qgis.PyQt.QtWidgets import QDialog
ui_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ui', 'ui_constraintchecker.ui')
# create the dialog for zoom to point
class ConstraintCheckerDialog(QDialog):
def __init__(self):
QDialog.__init__(self)
# Set up the user interface from Designer.
self.ui = uic.loadUi(ui_file, self)
| gpl-2.0 | -157,576,542,083,008,830 | 47.787879 | 100 | 0.42236 | false |
levilucio/SyVOLT | UMLRT2Kiltera_MM/Properties/from_thesis/HMM8_then1_ConnectedLHS.py | 1 | 2650 | from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM8_then1_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMM8_then1_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMM8_then1_ConnectedLHS, self).__init__(name='HMM8_then1_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MM8_then1')
# Set the node attributes
# Nodes that represent the edges of the property.
# Add the edges
self.add_edges([
])
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| mit | 4,038,207,856,086,871,600 | 42.442623 | 125 | 0.47434 | false |
weka511/bioinformatics | phylogeny.py | 1 | 34566 | # Copyright (C) 2020-2021 Greenweaves Software Limited
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Phylogeny -- http://rosalind.info/problems/topics/phylogeny/
import re
from rosalind import LabelledTree
from random import randrange
from newick import newick_to_adjacency_list
from numpy import argmin,argmax
from fasta import FastaContent
from helpers import flatten
# tree -- Completing a Tree
#
# Given: A positive integer n (n<=1000) and an adjacency list corresponding to a graph on n nodes that contains no cycles.
#
# Return: The minimum number of edges that can be added to the graph to produce a tree.
# This is the number of independent components - 1
def CompleteTree(n,adj):
# create_twigs
#
# Build dictionary tow show which node is linked to what
def create_twigs():
twigs = {i:set() for i in range(1,n+1)}
for a,b in adj:
twigs[a].add(b)
twigs[b].add(a)
return twigs
# find_component
#
# Find one component of graph
def find_component(start):
component = [] # The component being built
todo = set() # Nodes being considered for inclusion
todo.add(start)
while len(todo)>0:
current = todo.pop()
component.append(current)
for node in twigs[current]:
if node not in component:
todo.add(node)
for c in component:
del twigs[c]
return component
twigs = create_twigs()
components = []
while len(twigs)>0:
components.append(find_component(list(twigs.keys())[0]))
return len(components)-1
def chbp(species,character_table):
pass
# cstr
#
# Creating a Character Table from Genetic Strings http://rosalind.info/problems/cstr/
def cstr(strings):
def trivial(split):
if len(split)<2: return True
for k,v in split.items():
if v<2: return True
return False
choices = [[] for s in strings[0]]
counts = [{} for s in strings[0]]
for i in range(len(strings[0])):
for s in strings:
if not s[i] in choices[i]:
choices[i].append(s[i])
if s[i] in counts[i]:
counts[i][s[i]] += 1
else:
counts[i][s[i]] = 1
splits=[]
for i in range(len(strings[0])):
split = {}
for c in choices[i]:
split[c] = 0
for s in strings:
for c in choices[i]:
if s[i]==c:
split[c]+=1
splits.append(split)
result=[]
for i in range(len(strings[0])):
character = []
split = splits[i]
if not trivial(split):
chs = list(split.keys())
for s in strings:
character.append('0' if s[i]==chs[0] else '1')
result.append(''.join(character))
return result
# ctbl Creating a Character Table http://rosalind.info/problems/ctbl/
def CharacterTable(tree):
def create_character(split_species):
character=[]
for s in species:
character.append(1 if s in split_species else 0)
return ''.join([str(c) for c in character])
species=[spec.name for spec in tree.find_elements(terminal=True)]
species.sort()
clades=[clade for clade in tree.find_clades(terminal=False)]
# we iterate over all Clades except the root
return [create_character([spec.name for spec in split.find_elements(terminal=True)]) for split in clades[1:]]
# NumberBinaryTrees
#
# cunr Counting Unrooted Binary Trees
# root Counting Rooted Binary Trees
# See http://carrot.mcb.uconn.edu/~olgazh/bioinf2010/class16.html
def NumberBinaryTrees(n,rooted=True):
N = 1
m = 2*n-3 if rooted else 2*n-5
while m>1:
N *=m
m -= 2
return N
class UnrootedBinaryTree:
@classmethod
# EnumerateUnrootedBinaryTrees
#
# Given: A collection of species names representing n taxa.
#
# Return: A list containing all unrooted binary trees whose leaves are these n
# taxa. Trees should be given in Newick format, with one tree on each line;
# the order of the trees is unimportant.
#
# Idea: all rooted trees with a given number of leaves are isomorphic if we
# ignore the labels of the leaves and nodes. Therfore it is enough to
# build a tree with 3 leaves, and keep adding one leaf at a time in all available positions.
def Enumerate(cls,species):
def enumerate(n):
if n==3:
return [cls({0:[species[0], species[1], species[2]]})]
else:
return [cls.insert(species[n-1],edge,graph) for graph in enumerate(n-1) for edge in graph.Edges()]
return enumerate(len(species))
# insert
#
# Create a rooted tree by adding one nre inernal node and a leaf to a specifed edge
@classmethod
def insert(cls,species,edge,graph):
nextNode = max(list(graph.adj.keys())) + 1
n1,n2 = edge
adj = {nextNode: [species,n2]}
for node,links in graph.adj.items():
adj[node] = [nextNode if ll==n2 else ll for ll in links] if node==n1 else links
return cls(adj)
def __init__(self,adj):
self.adj = adj
def __str__(self):
return self.bfs_newick()
# bfs_newick
#
# Create Newick representation by best first search
def bfs_newick(self,node=0):
newick = []
for child in self.adj[node]:
if type(child)==int:
newick.append(self.bfs_newick(node=child))
else:
newick.append(child)
representation = ','.join(newick)
return f'({representation})'
def Edges(self):
for a,b in self.adj.items():
for c in b:
yield a,c
# qrt Incomplete Characters
#
# Given: A partial character table C
#
# Return: The collection of all quartets that can be inferred from the splits corresponding to the underlying characters of C
def qrt(taxa,characters):
def tuples(n):
for i in range(n):
for j in range(n):
if i==j: continue
for k in range(n):
if k in [i,j]: continue
for l in range(n):
if l in [i,j,k]: continue
if i<j and k<l and i<k:
yield i,j,k,l
def isConsistent(selector):
for char in characters:
character = [char[i] for i in selector]
if any(c is None for c in character): continue
if character[0]==character[1] and character[2]==character[3] and character[0]!=character[2]: return True
return False
for (i,j,k,l) in tuples(len(taxa)):
selector = [i,j,k,l]
if isConsistent(selector):
yield [taxa[m] for m in selector]
# snarfed from https://stackoverflow.com/questions/51373300/how-to-convert-newick-tree-format-to-a-tree-like-hierarchical-object
def parse(newick,start=0):
tokens = re.findall(r"([^:;,()\s]*)(?:\s*:\s*([\d.]+)\s*)?([,);])|(\S)", newick+";")
def recurse(nextid = start, parentid = -1): # one node
thisid = nextid;
children = []
name, length, delim, ch = tokens.pop(0)
if ch == "(":
while ch in "(,":
node, ch, nextid = recurse(nextid+1, thisid)
children.append(node)
name, length, delim, ch = tokens.pop(0)
return {"id": thisid, "name": name, "length": float(length) if length else None,
"parentid": parentid, "children": children}, delim, nextid
return recurse()[0]
def create_adj(tree):
adj = {}
def dfs(tree):
id = tree['id']
name = tree['name']
children = tree['children']
parentid = tree['parentid']
if len(name)==0:
adj[id]=[]
if parentid>-1:
adj[parentid].append(id if len(name)==0 else name)
for child in children:
dfs(child)
dfs(tree)
return adj
# SPTD Phylogeny Comparison with Split Distance
def sptd(species,newick1,newick2):
def replace_leaves(adj):
return {parent:sorted([seiceps[child] if child in seiceps else child for child in children]) for parent,children in adj.items() }
def edges(adj):
for parent,children in adj.items():
for child in children:
if child >= n:
yield parent,child
def splits(adj,min_size=2):
def find_leaves(node,path=[]):
for child in adj[node]:
if child<n:
path.append(child)
else:
find_leaves(child,path=path)
for parent,child in edges(adj):
s1 = []
find_leaves(child,s1)#[leaf for leaf in find_leaves(child)]
if len(s1)<min_size: continue
s2 = [leaf for leaf in range(n) if not leaf in s1]
yield sorted(s1),sorted(s2)
def ds(adj1,adj2):
shared = 0
splits1 = sorted([s for s,_ in splits(adj1)])
splits2 = sorted([s for s,_ in splits(adj2)])
k1 = 0
k2 = 0
i1 = splits1[k1]
i2 = splits2[k2]
while k1<len(splits1) and k2<len(splits2):
if i1==i2:
shared += 1
k1 += 1
k2 += 1
if k1<len(splits1) and k2<len(splits2):
i1 = splits1[k1]
i2 = splits2[k2]
elif i1<i2:
k1+=1
if k1<len(splits1):
i1 = splits1[k1]
else:
k2+=1
if k2<len(splits2):
i2 = splits2[k2]
return 2*(n-3)- 2* shared
n = len(species)
seiceps = {species[i]:i for i in range(n)}
return ds(replace_leaves(create_adj(parse(newick1,start=n))),
replace_leaves(create_adj(parse(newick2,start=n))))
# MEND Inferring Genotype from a Pedigree
#
# Given: A rooted binary tree T in Newick format encoding an individual's pedigree
# for a Mendelian factor whose alleles are A (dominant) and a (recessive).
#
# Return: Three numbers between 0 and 1, corresponding to the respective probabilities
# that the individual at the root of T will exhibit the "AA", "Aa" and "aa" genotypes.
def mend(node):
# combine
#
# Combine two genomes with known probabilities - work out proabilites in next generation
#
# NB: the tree is a pedigree, not a phylogeny: the root is the descendent!
def combine(f1,f2):
return np.sum([[f*f1[i]*f2[j] for f in factors[i][j]] for i in range(n) for j in range(n)],
axis=0)
# Probability of each combination in the initial generation, when we know the genome
frequencies = {
'aa': (0,0,1),
'Aa': (0,1,0),
'AA': (1,0,0)
}
# Probabilty of each combination when we combine two genomes
factors=[# AA Aa/aA aa
[ [1.0, 0.0, 0.0], [0.50, 0.50, 0.00], [0.0, 1.0, 0.0] ], #AA
[ [0.5, 0.5, 0.0], [0.25, 0.50, 0.25], [0.0, 0.5, 0.5] ], #Aa/aA
[ [0.0, 1.0, 0.0], [0.00, 0.50, 0.50], [0.0, 0.0, 1.0] ] #aa
]
n = len(frequencies) # Number of combinations
# If we are at a leaf, we have a known ancestor
if len(node.nodes)==0:
try:
return frequencies['Aa' if node.name=='aA' else node.name]
except KeyError:
return (0,0)
parent_freqs = [mend(parent) for parent in node.nodes]
parent_freqs = [pp for pp in parent_freqs if len(pp)==n]
return combine(parent_freqs[0],parent_freqs[1])
# SmallParsimony
#
# Find the most parsimonious labeling of the internal nodes of a rooted tree.
#
# Given: An integer n followed by an adjacency list for a rooted binary tree with n leaves labeled by DNA strings.
#
# Return: The minimum parsimony score of this tree, followed by the adjacency list of the tree
# corresponding to labeling internal nodes by DNA strings in order to minimize the parsimony score of the tree.
def SmallParsimony(T,alphabet='ATGC'):
# SmallParsimonyC Solve small parsimony for one character
def SmallParsimonyC(Character):
# get_ripe
#
# Returns: a node that is ready fpr preocssing
def get_ripe():
for v in T.get_nodes():
if not processed[v] and v in T.edges:
for e,_ in T.edges[v]:
if e>v: continue
if not processed[e]: break
return v
return None
# calculate_s
# Calculate score if node v is set to a specified symbol
# Parameters:
# symbol The symbol, e.g. 'A', not the index in alphabet
# v The node
def calculate_s(symbol,v):
# delta
#
# Complement of Kronecker delta
def delta(i):
return 0 if symbol==alphabet[i] else 1
def get_min(e):
return min(s[e][i]+delta(i) for i in range(len(alphabet)))
return sum([get_min(e) for e,_ in T.edges[v]])
# update_assignments
#
# Parameters:
# v
# s
def update_assignments(v,s):
if not v in assignments.labels:
assignments.labels[v]=''
index = 0
min_s = float('inf')
for i in range(len(s)):
if s[i] < min_s:
min_s = s[i]
index = i
assignments.set_label(v,assignments.labels[v]+alphabet[index])
return alphabet[index]
# backtrack
#
# Process internal node of tree top down, starting from root
def backtrack(v, current_assignment):
for v_next,_ in T.edges[v]:
if T.is_leaf(v_next): continue
if not v_next in assignments.labels:
assignments.labels[v_next]=''
min_score = min([s[v_next][i] for i in range(len(alphabet))])
indices = [i for i in range(len(alphabet)) if s[v_next][i]==min_score ]
matched = False
for i in indices:
if alphabet[i]==current_assignment:
matched = True
assignments.set_label(v_next,assignments.labels[v_next]+current_assignment)
backtrack(v_next,current_assignment)
if not matched:
# Black magic alert: I am not clear why the introduction of random numbers
# helps here. Maybe it stops the tree being biased towatds the first strings
# in the alphabet.
next_assignment = alphabet[indices[randrange(0,(len(indices)))]]
assignments.set_label(v_next,assignments.labels[v_next]+next_assignment)
backtrack(v_next,next_assignment)
processed = {}
s = {}
# Compute scores for a leaves, and mark internal notes unprocessed
for v in T.get_nodes():
if T.is_leaf(v):
processed[v]=True
s[v] = [0 if symbol==Character[v] else float('inf') for symbol in alphabet]
else:
processed[v]=False
# Process ripe (unprocessed, but whose children have been processed)
# until there are none left
# Keep track of last node as we will use it to start backtracking
v = get_ripe()
while not v == None:
processed[v] = True
s[v] = [calculate_s(symbol,v) for symbol in alphabet ]
v_last = v
v = get_ripe()
backtrack(v_last,update_assignments(v_last,s[v_last]))
return min([s[v_last][c] for c in range(len(alphabet))])
assignments = LabelledTree(T.N)
assignments.initialize_from(T)
return sum([SmallParsimonyC([v[i] for l,v in T.labels.items()]) for i in range(len(T.labels[0]))]),assignments
# alph
#
# Given: A rooted binary tree T on n species, given in Newick format, followed by a multiple alignment of m
# augmented DNA strings having the same length (at most 300 bp) corresponding to the species
# and given in FASTA format.
#
# Return: The minimum possible value of dH(T), followed by a collection of DNA strings to be
# assigned to the internal nodes of T that will minimize dH(T).
def alph(T,Alignment,Alphabet=['A','T','C','G','-']):
# create_fixed_alignments
#
# Extract dictionary of leaves from Alignment
#
# Returns: length of any string in alignment, plus dictionary of leaves
def create_fixed_alignments():
Leaves = {}
k = None
for i in range(0,len(Alignment),2):
Leaves[Alignment[i]] = Alignment[i+1]
if k==None:
k = len(Alignment[i+1])
else:
assert k==len(Alignment[i+1]),f'Alignments should all have same length.'
return k,Leaves
# SmallParsimony
#
# This is the Small Parsimony algorihm from Pevzner and Compeau, which
# processes a single character
#
# Parameters:
# l Index of character in Alignment
# Returns: Score of best assignment, plus an assignment of character that provides this score
def SmallParsimony(l):
# is_ripe
#
# Determine whether now is ready for processing
# A ripe node is one that hasn't been processed,
# but its children have
def is_ripe(v):
for child in Adj[v]:
if not Tag[child]: return False
return True
# find_ripe
#
# Find list of nodes that are ready to be processed
#
# Input: A list of nodes
# Returns: Two lists, those ready for processing, and those which are not
def find_ripe(Nodes):
Ripe = []
Unripe = []
for v in Nodes:
if is_ripe(v):
Ripe.append(v)
else:
Unripe.append(v)
return Ripe,Unripe
# delta
#
# The delta function from Pevzner and Compeau: not the Kronecker delta
def delta(i,j):
return 0 if i==j else 1
# get_distance
#
# Get total distance of node from its children assuming one trial assignmnet
#
# Parameters:
# v Current node
# k Index of character for trial
def get_distance(v,k):
# best_alignment
#
# Find best alignment with child (measured by varying child's index) given
# the current choice of character in this node
#
# Parameters:
# k Trial alignmnet for this node
def best_alignment(child):
return min([s[child][i] + delta(i,k) for i in range(len(Alphabet))])
return sum([best_alignment(child) for child in Adj[v]])
# backtrack
#
# Perform a depth first search through all nodes to determive alignmant.
# Parameters:
# root Root node
# s Scores for all possible best assignments to all nodes
# Returns:
# score Score of best assignment,
# ks For each node the assignment of character that provides this score
# represented an an index into alphabet
#
#
# Comment by Robert Goldberg-Alberts.
# The Backtrack portion of the code consists of a breath first tracking through the tree from
# the root in a left to right fashion through the nodes (sons and daughters)
# row after row until you finally reach the leaves. The leaves already have values assigned to them from the data
# At the root, determine whether the value of the node is A, C, T, G by taking the minimum value of the
# four numbers created for the root set. Break ties by selecting from the ties at random.
# After that, for subsequent nodes take the minimum of each value at a node and determine if
# there are ties at the minimum. Check to see if the ancestor parent of that node has a value
# that is contained in the eligible nucleotides from the node. If it IS contained there force the
# ancestor value for that node.
# Continue in that fashion until all the internal nodes have values assigned to them.
def backtrack(root,s):
def dfs(node,k,parent_score):
def match(i,j,child_scores):
return parent_score == child_scores[0][i] + child_scores[1][j]
if len(Adj[node])==0: return
children = Adj[node]
child_scores_delta = [[s[child][i] + delta(i,k) for i in range(len(Alphabet))] for child in children]
child_scores_raw = [[s[child][i] for i in range(len(Alphabet))] for child in children]
candidates = [(i,j,child_scores_raw) for i in range(len(Alphabet)) for j in range(len(Alphabet)) \
if match(i,j,child_scores_delta)]
selection = candidates[randrange(len(candidates))]
scores_children = [selection[2][i][selection[i]] for i in range(len(children))]
for i in range(len(children)):
ks[children[i]] = selection[i]
for i in range(len(children)):
dfs(children[i],ks[children[i]],scores_children[i])
ks = {}
index = argmin(s[root])
score = s[root][index]
ks[root] = index
dfs(root,index,score)
return score, ks
s = {} # Scores for nodes
Tag = {} # Nodes that have been processed
ToBeProcessed = [] # Nodes that have yet to be processed
# Partition nodes into two groups: leaves are easily processed,
# the others are all marked as unprocessed
for v in Adj.keys():
if v in Leaves:
char = Leaves[v][l]
s[v] = [0 if Alphabet[k]==char else float('inf') for k in range(len(Alphabet))]
Tag[v] = True
else:
Tag[v] = False
ToBeProcessed.append(v)
Ripe,ToBeProcessed = find_ripe(ToBeProcessed)
while len(Ripe)>0:
for v in Ripe:
s[v] = [get_distance(v,k) for k in range(len(Alphabet))]
Tag[v] = True
Ripe,ToBeProcessed = find_ripe(ToBeProcessed)
assert len(ToBeProcessed)==0,'If there are no ripe nodes, ToBeProcessed should be exhausted'
return backtrack(v,s)
Adj = newick_to_adjacency_list(T)
L,Leaves = create_fixed_alignments()
Assignment = {a:[] for a in Adj.keys()}
d = 0
assert len([node for node,value in Adj.items() if len(value)==0 and node not in Leaves])==0,\
f'Some nodes are leaves, but have no strings in alignment'
for l in range(L):
score,ks = SmallParsimony(l)
d += score
for v,index in ks.items():
Assignment[v].append(Alphabet[index])
return d,[(f'{a}',''.join(b)) for a,b in Assignment.items() if len(Adj[a])!=0]
# chbp Character-Based Phylogeny
#
# Strategy: sort character table on entropy, then use each character to divide clades into two.
def chbp(species,character_table):
# Clade
#
# This class represents one clade or taxon
class Clade:
def __init__(self,taxa):
self.taxa = [s for s in taxa]
def is_trivial(self):
return len(self.taxa)==0
def is_singleton(self):
return len(self.taxa)==1
# newick
#
# Convert to string in Newick format
def newick(self):
def conv(taxon):
if type(taxon)==int:
return species[taxon]
else:
return taxon.newick()
if self.is_singleton():
return conv(self.taxa[0])
else:
return '(' + ','.join(conv(taxon) for taxon in self.taxa) +')'
# split
#
# Split clade in two using character: list of taxa is replaced by two clades
#
# Returns True if clade has been split into two non-trivial clades
# False if at least one clade would be trivial--in which case clade is unchanged
#
def split(self,character):
left = []
right = []
for i in self.taxa:
if character[i]==0:
left.append(i)
else:
right.append(i)
leftTaxon = Clade(left)
rightTaxon = Clade(right)
if leftTaxon.is_trivial(): return False
if rightTaxon.is_trivial(): return False
self.taxa = [leftTaxon,rightTaxon]
return True
# splitAll
#
# Split clade using character table
def splitAll(self,characters,depth=0):
if depth<len(characters):
if self.split(characters[depth]):
for taxon in self.taxa:
taxon.splitAll(characters,depth+1)
else:
self.splitAll(characters,depth+1)
# Calculate entropy of a single character
def get_entropy(freq):
if freq==0 or freq==n: return 0
p1 = freq/n
p2 = 1-p1
return - p1 *np.log(p1) - p2 * np.log(p2)
n = len(species)
entropies = [get_entropy(sum(char)) for char in character_table]
entropy_indices = np.argsort(entropies)
characters = [character_table[i] for i in entropy_indices[::-1]]
indices = list(range(len(species)))
root = Clade(indices)
root.splitAll(characters)
return f'{root.newick()};'
# RSUB Identifying Reversing Substitutions
#
# Given: A rooted binary tree T with labeled nodes in Newick format, followed by a collection of at most
# 100 DNA strings in FASTA format whose labels correspond to the labels of T.
#
# We will assume that the DNA strings have the same length, which does not exceed 400 bp).
#
# Return: A list of all reversing substitutions in T (in any order), with each substitution encoded by the following three items:
#
# the name of the species in which the symbol is first changed, followed by the name of the species in which it changes back to its original state
# the position in the string at which the reversing substitution occurs; and
# the reversing substitution in the form original_symbol->substituted_symbol->reverted_symbol.
def rsub(T,Assignments):
# find_path
#
# Find path from the root down to a specified leaf
def find_path(leaf):
Path = [leaf]
parent = Parents[leaf]
while len(parent)>0:
Path.append(parent)
if parent in Parents:
parent = Parents[parent]
else:
break
return Path[::-1]
# FindReversingSubstitutions
#
# Find reversion substitutions in one specified path trhough tree,
# affecting a specified position in the strings
#
# Parameters: Path Path to be searched
# pos position in tree
# Strategy: build up history of changes, and search back whenever a change is detected.
def FindReversingSubstitutions(Path,pos):
History = [Characters[Path[0]][pos]]
Names = Path[0:1]
Reverses = []
for taxon in Path[1:]:
current = Characters[taxon][pos]
if current==History[-1]: continue
History.append(current)
Names.append(taxon)
if len(History)>2 and History[-3]==History[-1]: # we have a reverse
Reverses.append((Names[-2],Names[-1],pos+1,History[-3],History[-2],History[-1]))
return Reverses
# create_parents
# Invert Ajacency list to we have the parent of each child
def create_parents(Adj):
Product = {node:[] for node in flatten(Adj.values())}
for parent,children in Adj.items():
for child in children:
Product[child] = parent
return Product
# get_unique
#
# Convert list of lists into a single list and remove duplicate elements
def get_unique(list_of_lists):
return list(set(flatten(list_of_lists)))
Adj,root = newick_to_adjacency_list(T,return_root=True)
fc = FastaContent(Assignments)
Characters = fc.to_dict() # So we can find character for each species
_,string = fc[0]
m = len(string)
Parents = create_parents(Adj)
Paths = [find_path(node) for node in flatten(Adj.values()) if len(Adj[node])==0]
# Build list of unique reversals.
return get_unique([subst for subst in [FindReversingSubstitutions(path,pos) for path in Paths for pos in range(m)] if len(subst)>0])
# cset A submatrix of a matrix M is a matrix formed by selecting rows and columns from M and
# taking only those entries found at the intersections of the selected rows and columns.
# We may also think of a submatrix as formed by deleting the remaining rows and columns from M
#
# Given: An inconsistent character table C on at most 100 taxa.
#
# Return: A submatrix of C representing a consistent character table on the same taxa
# and formed by deleting a single row of C.
def cset(table):
# get_split
#
# Used to split indices of character (row) into two groups, one for each allele
# First we yield all indices corresponding to 0, then those to 1
def get_splits(character):
for allele in [0,1]:
yield set(i for i, c in enumerate(character) if c == allele)
# conflicts_with
#
# Determine whether two characters are in conflict
# We iterate through all the splits of each character.
# If any pair of splits consists of two disjoint subsets,
# the characters are compatible.
def conflicts_with(c1, c2):
for part1 in get_splits(c1):
for part2 in get_splits(c2):
if len(part1.intersection(part2)) == 0: return False
return True
n = len(table)
Conflicts = [0 for _ in range(n)] # Count number of times each row conflicts with another
for i in range(n):
for j in range(i+1,n):
if conflicts_with(table[i],table[j]):
Conflicts[i] += 1
Conflicts[j] += 1
return [table[row] for row in range(n) if row!=argmax(Conflicts)]
# cntq Counting Quartets
def cntq(n,newick):
def create_adj(tree):
adj = {}
def bfs(tree):
id = tree['id']
name = tree['name']
children = tree['children']
parentid = tree['parentid']
if len(name)==0:
adj[id]=[]
if parentid>-1:
adj[parentid].append(id if len(name)==0 else name)
for child in children:
bfs(child)
bfs(tree)
return adj
def bfs(subtree,leaves):
for node in adj[subtree]:
if type(node)==str:
leaves.append(node)
else:
bfs(node,leaves)
def pair(leaves):
for i in range(len(leaves)):
for j in range(i+1,len(leaves)):
yield [leaves[i],leaves[j]] if leaves[i]<leaves[j] else [leaves[j],leaves[i]]
adj = create_adj(parse(newick))
taxa = [leaf for children in adj.values() for leaf in children if type(leaf)==str]
splitting_edges = [(key,child) for key,value in adj.items() for child in value if type(child)==int]
Quartets = []
for _,node in splitting_edges:
leaves = []
bfs(node,leaves)
other_leaves = [leaf for leaf in taxa if leaf not in leaves]
for pair1 in pair(leaves):
for pair2 in pair(other_leaves):
quartet = pair1 + pair2 if pair1[0]<pair2[0] else pair2 + pair1
Quartets.append(quartet)
Quartets.sort()
Unique =[Quartets[0]]
for i in range(1,len(Quartets)):
if Quartets[i]!=Unique[-1]:
Unique.append(Quartets[i])
return len(Unique),Unique | gpl-3.0 | -8,262,652,753,592,696,000 | 35.424658 | 149 | 0.549615 | false |
lebek/reversible-raytracer | util.py | 1 | 1864 | import numpy as np
import theano
import theano.tensor as T
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from scipy.misc import imsave
def initialize_weight(n_vis, n_hid, W_name, numpy_rng, rng_dist):
if 'uniform' in rng_dist:
W = numpy_rng.uniform(low=-np.sqrt(6. / (n_vis + n_hid)),\
high=np.sqrt(6. / (n_vis + n_hid)),
size=(n_vis, n_hid)).astype(theano.config.floatX)
elif rng_dist == 'normal':
W = 0.01 * numpy_rng.normal(size=(n_vis, n_hid)).astype(theano.config.floatX)
return theano.shared(value = W, name=W_name, borrow=True)
'''decaying learning rate'''
def get_epsilon(epsilon, n, i):
return float(epsilon / ( 1 + i/float(n)))
def broadcasted_switch(a, b, c):
return T.switch(a.dimshuffle(0, 1, 'x'), b, c)
def transNorm(transM, vec):
transN = T.zeros_like(vec)
transN = T.set_subtensor(transN[:,:,0], vec[:,:,0] * transM[0][0] \
+ vec[:,:,1] * transM[1][0] + vec[:,:,2] * transM[2][0])
transN = T.set_subtensor(transN[:,:,1], vec[:,:,0] * transM[0][1] \
+ vec[:,:,1] * transM[1][1] + vec[:,:,2] * transM[2][1])
transN = T.set_subtensor(transN[:,:,2], vec[:,:,0] * transM[0][2] \
+ vec[:,:,1] * transM[1][2] + vec[:,:,2] * transM[2][2])
return transN
def drawWithMarkers(fname, im):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(im, interpolation='nearest')
ax.add_patch(plt.Rectangle((85-3, 90-3), 6, 6, color='red',
linewidth=2, fill=False))
ax.add_patch(plt.Rectangle((90-3, 50-3), 6, 6, color='red',
linewidth=2, fill=False))
fig.savefig(fname, bbox_inches='tight', pad_inches=0)
def draw(fname, im):
imsave(fname, im)
| mit | -7,149,291,079,279,311,000 | 32.285714 | 88 | 0.551502 | false |
macrman/PyramidScheme | PyramidScheme/GeneratedParser.py | 1 | 11482 | from .Nodes import *
# Begin -- grammar generated by Yapps
import sys, re
from yapps import runtime
class R7RSScanner(runtime.Scanner):
patterns = [
('"do"', re.compile('do')),
('"if"', re.compile('if')),
('"set!"', re.compile('set!')),
('""', re.compile('')),
('"\\."', re.compile('\\.')),
('"lambda"', re.compile('lambda')),
('\\s+', re.compile('\\s+')),
('OPAREN', re.compile('\\(')),
('CPAREN', re.compile('\\)')),
('NUMBER', re.compile('[0-9]+')),
('BOOLEAN', re.compile('#t|#f|#true|#false')),
('IDENTIFIER', re.compile('[-+.><?*/!@%^&=a-zA-Z0-9_]+')),
('STRING', re.compile('"(.*?)"')),
]
def __init__(self, str,*args,**kw):
runtime.Scanner.__init__(self,None,{'\\s+':None,},str,*args,**kw)
class R7RS(runtime.Parser):
Context = runtime.Context
def identifier(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'identifier', [])
IDENTIFIER = self._scan('IDENTIFIER', context=_context)
return Id(IDENTIFIER)
def expression(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'expression', [])
_token = self._peek('IDENTIFIER', 'OPAREN', 'BOOLEAN', 'NUMBER', 'STRING', context=_context)
if _token == 'IDENTIFIER':
identifier = self.identifier(_context)
return identifier
elif _token != 'OPAREN':
literal = self.literal(_context)
return literal
else: # == 'OPAREN'
OPAREN = self._scan('OPAREN', context=_context)
structured_form = self.structured_form(_context)
CPAREN = self._scan('CPAREN', context=_context)
return structured_form
def structured_form(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'structured_form', [])
_token = self._peek('"set!"', '"if"', '"lambda"', 'IDENTIFIER', 'OPAREN', '"do"', 'BOOLEAN', 'NUMBER', 'STRING', context=_context)
if _token == '"set!"':
assignment = self.assignment(_context)
return assignment
elif _token not in ['"if"', '"lambda"', '"do"']:
procedure_call = self.procedure_call(_context)
return procedure_call
elif _token == '"if"':
conditional = self.conditional(_context)
return conditional
elif _token == '"lambda"':
lambda_expression = self.lambda_expression(_context)
return lambda_expression
else: # == '"do"'
derived_expression = self.derived_expression(_context)
return derived_expression
def lambda_expression(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'lambda_expression', [])
self._scan('"lambda"', context=_context)
formals = self.formals(_context)
body = self.body(_context)
return Lambda(formals, body)
def formals(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'formals', [])
_token = self._peek('IDENTIFIER', 'OPAREN', context=_context)
if _token == 'IDENTIFIER':
identifier = self.identifier(_context)
return Arguments([identifier], [])
else: # == 'OPAREN'
OPAREN = self._scan('OPAREN', context=_context)
paren_formals = self.paren_formals(_context)
CPAREN = self._scan('CPAREN', context=_context)
return Arguments(paren_formals[0], paren_formals[1])
def paren_formals(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'paren_formals', [])
_token = self._peek('IDENTIFIER', '""', context=_context)
if _token == 'IDENTIFIER':
args = []
while 1:
identifier = self.identifier(_context)
args.append(identifier)
if self._peek('IDENTIFIER', '"\\."', 'CPAREN', context=_context) != 'IDENTIFIER': break
optional_args = None
if self._peek('"\\."', 'CPAREN', context=_context) == '"\\."':
optional_args = self.optional_args(_context)
return [args, optional_args]
else: # == '""'
epsilon = self.epsilon(_context)
return ([],[])
def optional_args(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'optional_args', [])
self._scan('"\\."', context=_context)
identifier = self.identifier(_context)
return identifier
def body(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'body', [])
sequence = self.sequence(_context)
return sequence
def sequence(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'sequence', [])
seq = []
while 1:
expression = self.expression(_context)
seq.append(expression)
if self._peek('IDENTIFIER', 'OPAREN', 'BOOLEAN', 'NUMBER', 'STRING', 'CPAREN', context=_context) not in ['IDENTIFIER', 'OPAREN', 'BOOLEAN', 'NUMBER', 'STRING']: break
return seq
def program(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'program', [])
p = []
while 1:
expression = self.expression(_context)
p.append(expression)
if self._peek('IDENTIFIER', 'OPAREN', 'BOOLEAN', 'NUMBER', 'STRING', '""', context=_context) not in ['IDENTIFIER', 'OPAREN', 'BOOLEAN', 'NUMBER', 'STRING']: break
epsilon = self.epsilon(_context)
return Program(p)
def epsilon(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'epsilon', [])
self._scan('""', context=_context)
return None
def literal(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'literal', [])
self_evaluating = self.self_evaluating(_context)
return self_evaluating
def self_evaluating(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'self_evaluating', [])
_token = self._peek('BOOLEAN', 'NUMBER', 'STRING', context=_context)
if _token == 'BOOLEAN':
BOOLEAN = self._scan('BOOLEAN', context=_context)
boolean = BOOLEAN
return Bool(True if (boolean == "#t" or boolean == "#true") else False)
elif _token == 'NUMBER':
NUMBER = self._scan('NUMBER', context=_context)
return Num(int(NUMBER))
else: # == 'STRING'
STRING = self._scan('STRING', context=_context)
return Str(STRING[1:-1])
def assignment(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'assignment', [])
self._scan('"set!"', context=_context)
identifier = self.identifier(_context)
expression = self.expression(_context)
return Assign(identifier, expression)
def procedure_call(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'procedure_call', [])
operands = []
operator = self.operator(_context)
while self._peek('IDENTIFIER', 'OPAREN', 'BOOLEAN', 'NUMBER', 'STRING', 'CPAREN', context=_context) != 'CPAREN':
operand = self.operand(_context)
operands.append(operand)
return Call(operator, operands)
def operand(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'operand', [])
expression = self.expression(_context)
return expression
def operator(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'operator', [])
expression = self.expression(_context)
return expression
def conditional(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'conditional', [])
self._scan('"if"', context=_context)
test = self.test(_context)
consequent = self.consequent(_context)
alternate = self.alternate(_context)
return Conditional(test, consequent, alternate)
def test(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'test', [])
expression = self.expression(_context)
return expression
def consequent(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'consequent', [])
expression = self.expression(_context)
return expression
def alternate(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'alternate', [])
_token = self._peek('IDENTIFIER', 'OPAREN', '""', 'BOOLEAN', 'NUMBER', 'STRING', context=_context)
if _token != '""':
expression = self.expression(_context)
return expression
else: # == '""'
epsilon = self.epsilon(_context)
return None
def derived_expression(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'derived_expression', [])
do_expr = self.do_expr(_context)
return do_expr
def do_expr(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'do_expr', [])
self._scan('"do"', context=_context)
OPAREN = self._scan('OPAREN', context=_context)
iter_specs = []
while self._peek('CPAREN', 'OPAREN', context=_context) == 'OPAREN':
iteration_spec = self.iteration_spec(_context)
iter_specs.append(iteration_spec)
CPAREN = self._scan('CPAREN', context=_context)
OPAREN = self._scan('OPAREN', context=_context)
test = self.test(_context)
do_result = self.do_result(_context)
CPAREN = self._scan('CPAREN', context=_context)
cmds = []
while self._peek('IDENTIFIER', 'OPAREN', 'BOOLEAN', 'NUMBER', 'STRING', 'CPAREN', context=_context) != 'CPAREN':
expression = self.expression(_context)
cmds.append(expression)
return Do(iter_specs, test, do_result, cmds)
def iteration_spec(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'iteration_spec', [])
step = None
OPAREN = self._scan('OPAREN', context=_context)
identifier = self.identifier(_context)
init = self.init(_context)
if self._peek('CPAREN', 'IDENTIFIER', 'OPAREN', 'BOOLEAN', 'NUMBER', 'STRING', context=_context) != 'CPAREN':
step = self.step(_context)
CPAREN = self._scan('CPAREN', context=_context)
return IterSpec(identifier, init, step)
def init(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'init', [])
expression = self.expression(_context)
return expression
def step(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'step', [])
expression = self.expression(_context)
return expression
def do_result(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'do_result', [])
_token = self._peek('IDENTIFIER', 'OPAREN', '""', 'BOOLEAN', 'NUMBER', 'STRING', context=_context)
if _token != '""':
sequence = self.sequence(_context)
return sequence
else: # == '""'
epsilon = self.epsilon(_context)
return None
def parse(rule, text):
P = R7RS(R7RSScanner(text))
return runtime.wrap_error_reporter(P, rule)
# End -- grammar generated by Yapps
| bsd-2-clause | -5,070,462,504,728,422,000 | 41.058608 | 178 | 0.57159 | false |
srinath-chakravarthy/ovito | tests/scripts/test_suite/cluster_analysis_modifier.py | 1 | 1067 | from ovito import *
from ovito.io import *
from ovito.modifiers import *
import numpy as np
node = import_file("../../files/CFG/shear.void.120.cfg")
node.add_to_scene()
node.modifiers.append(SliceModifier(
distance = -12,
inverse = True,
slice_width = 18.0
))
node.modifiers.append(SliceModifier(
distance = 12,
inverse = True,
slice_width = 18.0
))
modifier = ClusterAnalysisModifier()
node.modifiers.append(modifier)
print("Parameter defaults:")
print(" cutoff: {}".format(modifier.cutoff))
modifier.cutoff = 2.8
print(" sort_by_size: {}".format(modifier.sort_by_size))
modifier.sort_by_size = False
node.compute()
print("Output:")
print("Number of clusters: {}".format(node.output.attributes['ClusterAnalysis.cluster_count']))
assert(node.output.attributes['ClusterAnalysis.cluster_count'] == 2)
print(node.output.cluster)
print(node.output.cluster.array)
modifier.sort_by_size = True
node.compute()
print(node.output.attributes['ClusterAnalysis.largest_size'])
assert(node.output.attributes['ClusterAnalysis.largest_size'] >= 1) | gpl-3.0 | 5,575,231,833,728,878,000 | 24.428571 | 95 | 0.730084 | false |
tensorflow/lucid | lucid/modelzoo/caffe_models/others.py | 1 | 5766 | # Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
from lucid.modelzoo.vision_base import Model, _layers_from_list_of_dicts, IMAGENET_MEAN_BGR
class CaffeNet_caffe(Model):
"""CaffeNet (AlexNet variant included in Caffe)
CaffeNet is a slight variant on AlexNet, described here:
https://github.com/BVLC/caffe/tree/master/models/bvlc_reference_caffenet
"""
model_path = 'gs://modelzoo/vision/caffe_models/CaffeNet.pb'
labels_path = 'gs://modelzoo/labels/ImageNet_standard.txt'
synsets_path = 'gs://modelzoo/labels/ImageNet_standard_synsets.txt'
dataset = 'ImageNet'
image_shape = [227, 227, 3]
is_BGR = True
image_value_range = (-IMAGENET_MEAN_BGR, 255-IMAGENET_MEAN_BGR)
input_name = 'data'
CaffeNet_caffe.layers = _layers_from_list_of_dicts(CaffeNet_caffe(), [
{'tags': ['conv'], 'name': 'conv5/concat', 'depth': 256} ,
{'tags': ['conv'], 'name': 'conv5/conv5', 'depth': 256} ,
{'tags': ['dense'], 'name': 'fc6/fc6', 'depth': 4096} ,
{'tags': ['dense'], 'name': 'fc7/fc7', 'depth': 4096} ,
{'tags': ['dense'], 'name': 'prob', 'depth': 1000} ,
])
class VGG16_caffe(Model):
"""VGG16 model used in ImageNet ILSVRC-2014, ported from caffe.
VGG16 was introduced by Simonyan & Zisserman (2014):
https://arxiv.org/pdf/1409.1556.pdf
http://www.robots.ox.ac.uk/~vgg/research/very_deep/
as the Oxford Visual Geometry Group's submission for the ImageNet ILSVRC-2014
contest. We download their caffe trained model from
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md
and convert it with caffe-tensorflow.
"""
model_path = 'gs://modelzoo/vision/caffe_models/VGG16.pb'
labels_path = 'gs://modelzoo/labels/ImageNet_standard.txt'
synsets_path = 'gs://modelzoo/labels/ImageNet_standard_synsets.txt'
dataset = 'ImageNet'
image_shape = [224, 224, 3]
is_BGR = True
image_value_range = (-IMAGENET_MEAN_BGR, 255-IMAGENET_MEAN_BGR)
input_name = 'input'
VGG16_caffe.layers = _layers_from_list_of_dicts(VGG16_caffe(), [
{'tags': ['conv'], 'name': 'conv1_1/conv1_1', 'depth': 64},
{'tags': ['conv'], 'name': 'conv1_2/conv1_2', 'depth': 64},
{'tags': ['conv'], 'name': 'conv2_1/conv2_1', 'depth': 128},
{'tags': ['conv'], 'name': 'conv2_2/conv2_2', 'depth': 128},
{'tags': ['conv'], 'name': 'conv3_1/conv3_1', 'depth': 256},
{'tags': ['conv'], 'name': 'conv3_2/conv3_2', 'depth': 256},
{'tags': ['conv'], 'name': 'conv3_3/conv3_3', 'depth': 256},
{'tags': ['conv'], 'name': 'conv4_1/conv4_1', 'depth': 512},
{'tags': ['conv'], 'name': 'conv4_2/conv4_2', 'depth': 512},
{'tags': ['conv'], 'name': 'conv4_3/conv4_3', 'depth': 512},
{'tags': ['conv'], 'name': 'conv5_1/conv5_1', 'depth': 512},
{'tags': ['conv'], 'name': 'conv5_2/conv5_2', 'depth': 512},
{'tags': ['conv'], 'name': 'conv5_3/conv5_3', 'depth': 512},
{'tags': ['dense'], 'name': 'fc6/fc6', 'depth': 4096},
{'tags': ['dense'], 'name': 'fc7/fc7', 'depth': 4096},
{'tags': ['dense'], 'name': 'prob', 'depth': 1000},
])
class VGG19_caffe(Model):
"""VGG16 model used in ImageNet ILSVRC-2014, ported from caffe.
VGG19 was introduced by Simonyan & Zisserman (2014):
https://arxiv.org/pdf/1409.1556.pdf
http://www.robots.ox.ac.uk/~vgg/research/very_deep/
as the Oxford Visual Geometry Group's submission for the ImageNet ILSVRC-2014
contest. We download their caffe trained model from
https://gist.github.com/ksimonyan/3785162f95cd2d5fee77#file-readme-md
and convert it with caffe-tensorflow.
"""
model_path = 'gs://modelzoo/vision/caffe_models/VGG19.pb'
labels_path = 'gs://modelzoo/labels/ImageNet_standard.txt'
synsets_path = 'gs://modelzoo/labels/ImageNet_standard_synsets.txt'
dataset = 'ImageNet'
image_shape = [224, 224, 3]
is_BGR = True
image_value_range = (-IMAGENET_MEAN_BGR, 255-IMAGENET_MEAN_BGR)
input_name = 'input'
VGG19_caffe.layers = _layers_from_list_of_dicts(VGG19_caffe(), [
{'tags': ['conv'], 'name': 'conv1_1/conv1_1', 'depth': 64},
{'tags': ['conv'], 'name': 'conv1_2/conv1_2', 'depth': 64},
{'tags': ['conv'], 'name': 'conv2_1/conv2_1', 'depth': 128},
{'tags': ['conv'], 'name': 'conv2_2/conv2_2', 'depth': 128},
{'tags': ['conv'], 'name': 'conv3_1/conv3_1', 'depth': 256},
{'tags': ['conv'], 'name': 'conv3_2/conv3_2', 'depth': 256},
{'tags': ['conv'], 'name': 'conv3_3/conv3_3', 'depth': 256},
{'tags': ['conv'], 'name': 'conv3_4/conv3_4', 'depth': 256},
{'tags': ['conv'], 'name': 'conv4_1/conv4_1', 'depth': 512},
{'tags': ['conv'], 'name': 'conv4_2/conv4_2', 'depth': 512},
{'tags': ['conv'], 'name': 'conv4_3/conv4_3', 'depth': 512},
{'tags': ['conv'], 'name': 'conv4_4/conv4_4', 'depth': 512},
{'tags': ['conv'], 'name': 'conv5_1/conv5_1', 'depth': 512},
{'tags': ['conv'], 'name': 'conv5_2/conv5_2', 'depth': 512},
{'tags': ['conv'], 'name': 'conv5_3/conv5_3', 'depth': 512},
{'tags': ['conv'], 'name': 'conv5_4/conv5_4', 'depth': 512},
{'tags': ['dense'], 'name': 'fc6/fc6', 'depth': 4096},
{'tags': ['dense'], 'name': 'fc7/fc7', 'depth': 4096},
{'tags': ['dense'], 'name': 'prob', 'depth': 1000},
])
| apache-2.0 | 5,251,851,677,240,020,000 | 45.5 | 91 | 0.626084 | false |
Joergen/zamboni | apps/market/tests/test_cron.py | 1 | 1471 | from datetime import datetime, timedelta
import time
import mock
from nose.tools import eq_
import amo
import amo.tests
from devhub.models import ActivityLog
from market.cron import mkt_gc
from users.models import UserProfile
class TestGarbage(amo.tests.TestCase):
def setUp(self):
self.user = UserProfile.objects.create(
email='[email protected]', name='gc_test')
amo.log(amo.LOG.CUSTOM_TEXT, 'testing', user=self.user,
created=datetime(2001, 1, 1))
def test_garbage_collection(self):
eq_(ActivityLog.objects.all().count(), 1)
mkt_gc()
eq_(ActivityLog.objects.all().count(), 0)
@mock.patch('os.stat')
@mock.patch('os.listdir')
@mock.patch('os.remove')
def test_dump_delete(self, rm_mock, ls_mock, stat_mock):
ls_mock.return_value = ['lol']
stat_mock.return_value = StatMock(days_ago=1000)
mkt_gc()
assert rm_mock.call_args_list[0][0][0].endswith('lol')
@mock.patch('os.stat')
@mock.patch('os.listdir')
@mock.patch('os.remove')
def test_new_no_delete(self, rm_mock, ls_mock, stat_mock):
ls_mock.return_value = ['lol']
stat_mock.return_value = StatMock(days_ago=1)
mkt_gc()
assert not rm_mock.called
class StatMock(object):
def __init__(self, days_ago):
self.st_mtime = time.mktime(
(datetime.now() - timedelta(days_ago)).timetuple())
self.st_size = 100
| bsd-3-clause | 24,974,685,451,685,576 | 27.288462 | 63 | 0.626105 | false |
seoweon/narajangteo | narajangteo_crawling.py | 1 | 9596 |
# coding: utf-8
# # <center> 나라장터 입찰공고 크롤링 with Python3</center>
#
# 나라장터에 올라오는 입찰공고를 모니터링하기 위해 개발된 간단한 프로그램으로, 검색어 리스트를 설정하면 그에 따라 최근 7일간 공고된 입찰공고 리스트를 가져와 엑셀파일로 정리해줍니다. 크롤링 프로그램이지만, BeautifulSoup을 사용하지 않습니다.
# In[18]:
import pandas as pd
import numpy as np
import requests
import os
import datetime, time
import string
from time import localtime, strftime
from datetime import timedelta
from tqdm import tqdm
from xlsxwriter.utility import xl_col_to_name, xl_range
from lxml import html
# In[6]:
class KoreaPageScraper(object):
def __init__(self):
pass
def request_url(self,cat):
'''returns url for a category'''
d = datetime.date.today()
fromtd = d - timedelta(days=7)
start_date = str(fromtd.strftime("%Y/%m/%d"))
end_date =str(d.strftime("%Y/%m/%d"))
fromBidDt = requests.utils.quote(start_date, safe='')
toBidDt = requests.utils.quote(end_date, safe='')
bidNm = requests.utils.quote(cat.encode('euc-kr'))
url = "http://www.g2b.go.kr:8101/ep/tbid/tbidList.do?taskClCds=&bidNm=" + bidNm + "&searchDtType=1&fromBidDt=" + fromBidDt + "&toBidDt=" + toBidDt + "&fromOpenBidDt=&toOpenBidDt=&radOrgan=1&instNm=&exceptEnd=Y&area=®Yn=Y&bidSearchType=1&searchType=1&recordCountPerPage=1000"
return url
def scrape_cat(self,cat):
'''searches for each category'''
cat_url = self.request_url(cat)
df = pd.read_html(cat_url)[0]
df['search_term']=cat
return df
def get_bidurl(self,bidnum):
'''gets the bid url based on the bid registration number
(ones that do not have a proper bid registration number usually doesn't have a corresponding link and would ask the user to go to the organization website for more informatioin)'''
num_split = str(bidnum).split(sep='-')
bidno = num_split[0]
if len(bidno) == 11:
bidseq = num_split[-1]
bidurl = "http://www.g2b.go.kr:8081/ep/invitation/publish/bidInfoDtl.do?bidno="+bidno+"&bidseq="+bidseq
return bidurl
else:
return "Check organization website (공고기관) for details"
bidseq = refnum_split[-1]
bidurl = "http://www.g2b.go.kr:8081/ep/invitation/publish/bidInfoDtl.do?bidno="+bidno+"&bidseq="+bidseq
return bidurl
def scrape_categories(self, categories):
'''scrapes each keyword and compiles it into a list.
There is a 1 second delay between each search term to prevent getting blocked out of the site'''
appended_df = []
for category in tqdm(categories):
one_df = self.scrape_cat(category)
appended_df.append(one_df)
time.sleep(1)
appended_df = pd.concat(appended_df, axis = 0)
urlist=[]
for index,row in appended_df.iterrows():
urlist.append(self.get_bidurl(row['공고번호-차수']))
appended_df['url']=urlist
return appended_df
# In[7]:
#function to read txt files and parse the list
def txt_reader(name):
with open(name+".txt",'rb') as f:
line = f.readline()
return line.decode('utf-8').split('/')
# In[8]:
#load the categories with the txt_reader function
category_list = txt_reader('category')
print("Getting the list of given keywords: " +str(category_list).replace('[','').replace(']','').replace("'",""))
#scrape with the "KoreaPageScraper" class
myscraper = KoreaPageScraper()
df = myscraper.scrape_categories(category_list)
# In[42]:
print(str(len(df))+" results have been found. ")
# In[11]:
#Load the excluding keywords
with open('exclude.txt','rb') as f:
line = f.readline()
contains_excluding = line.decode('utf-8').replace('/','|')
# In[40]:
print("Excluding the list of given keywords: "+str(txt_reader('exclude')).replace('[','').replace(']','').replace("'",""))
# In[43]:
#Deleting the excluding keywords and informing how many lines were deleted.
og = len(df)
df = df[-df.공고명.str.contains(contains_excluding).fillna(True)]
print("Deleted "+str(og-len(df))+" entries with keywords to exclude. (Currently at "+str(len(df))+" entries)")
# In[53]:
def clean_up(df):
#Delete duplicates (more than two keywords together)
og2 = len(df)
df = df[~df.duplicated(['공고명'])].copy()
print(str(og2-len(df))+" duplicates were found and deleted (Currently at "+str(len(df))+" entries)")
#Divide the register date and due date
df['register_date'],df['duedate'] = df['입력일시(입찰마감일시)'].str.split('(', 1).str
df['duedate']=df['duedate'].str.replace(')','').replace('-','')
df = df.drop('입력일시(입찰마감일시)',axis=1)
#Sort the values by duedate. To sort with a different value, change the following line's 'duedate' with the column name you desire to sort it by.
column_sort = 'duedate'
df = df.sort_values(by=column_sort,ascending=False)
print("Values are sorted by the column '"+column_sort+"'. To change this, please talk to the tool owner. ")
return df
# In[45]:
def filter_prioritize(df,filter_list,column):
new_df = df[df[column].isin(filter_list)].copy()
new_df[str(column+"_sorted")] = pd.Categorical(new_df[column],categories=filter_list,ordered=True)
new_df = new_df.sort_values(column+"_sorted")
return new_df
# In[54]:
#Cleaning up the df to make more sense
clean_df = clean_up(df)
# In[55]:
#Get the target organization list
org_list = txt_reader('orgs')
print("Getting the entries from target organization list: "+str(org_list).replace('[','').replace(']','').replace("'",""))
org_df = filter_prioritize(clean_df,org_list,'공고기관')
# In[56]:
class create_excel(object):
def get_length(self,column):
##
##This line is the problem!!
##
valueex = column[~column.isnull()].reset_index(drop=True)[0]
if type(valueex) == str:
if valueex.startswith('=HYPERLINK'):
return len('Click link')
else:
len_list = list(column.dropna().apply(lambda x: len(str(x))))
maxlen = max(len_list)
medlen = np.median(len_list)
meanlen = np.mean(len_list)
diff = maxlen-medlen
stdlen = np.std(len_list)
#min(A,B+C*numchars)
if maxlen < 10:
return maxlen+5
elif diff > 50:
if medlen == 0:
return min(55,meanlen+5)
return medlen
elif maxlen < 50:
return meanlen+15
else:
return 50
else:
return 5
def to_excel(self,df,name):
#Next step, format the excel file
print("saving the "+name+" list...")
docname = "나라장터_입찰공고-"+name+"-"+str(strftime("%y%m%d(%H%M%S)", localtime()))+".xlsx"
#make the destination directory, but guard against race condition
if not os.path.exists(name):
try:
os.makedirs(name)
except OSError as exc:
print(exc)
raise Exception('something failed')
writer = pd.ExcelWriter("%s/%s"%(name,docname), engine='xlsxwriter')
df.to_excel(writer,index=False,sheet_name='Sheet1')
workbook = writer.book
worksheet = writer.sheets['Sheet1']
tablerange = xl_range(0,0,len(df),len(df.columns)-1)
headerrange = xl_range(0,0,0,len(df.columns)-1)
contentrange = xl_range(1,0,len(df),len(df.columns)-1)
#Formatting headers
header_format = workbook.add_format({'bg_color':'black'})
column_format = workbook.add_format({'bottom':True,'bg_color':'white'})
link_format = workbook.add_format({'font_color':'#157993','underline':True})
# Set the column width and format.
columns = []
widths = []
for i in range(0,len(df.columns)):
a = xl_col_to_name(i)+":"+xl_col_to_name(i)
columns.append(a)
widths.append(self.get_length(df[df.columns[i]]))
for c,w in zip(columns,widths):
worksheet.set_column(c, w)
worksheet.conditional_format(contentrange,{'type':'no_errors',
'format':column_format})
worksheet.conditional_format(headerrange,{'type':'no_errors',
'format':header_format})
worksheet.conditional_format(tablerange,{'type':'text',
'criteria':'containing',
'value':'Click link',
'format':link_format})
#Formatting for putting in the header titles
table_headers = [{'header':c} for c in df.columns]
#Create a table with the data
worksheet.add_table(tablerange,{'columns' : table_headers})
writer.save()
return
# In[57]:
go_to_excel = create_excel()
# In[58]:
go_to_excel.to_excel(clean_df,'full')
# In[59]:
go_to_excel.to_excel(org_df,'orgs')
# In[60]:
print ('All done! Please hit Enter to exit this command prompt. ')
input()
# In[ ]:
| mit | 7,930,596,444,500,421,000 | 32.516245 | 285 | 0.588216 | false |
lahosken/pants | contrib/go/src/python/pants/contrib/go/tasks/go_checkstyle.py | 1 | 1900 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import subprocess
from pants.base.exceptions import TaskError
from pants.contrib.go.tasks.go_workspace_task import GoWorkspaceTask
class GoFmt(GoWorkspaceTask):
"""Checks Go code matches gofmt style."""
@classmethod
def register_options(cls, register):
super(GoFmt, cls).register_options(register)
register('--skip', type=bool, fingerprint=True, help='Skip checkstyle.')
_GO_SOURCE_EXTENSION = '.go'
def _is_checked(self, target):
return target.has_sources(self._GO_SOURCE_EXTENSION) and not target.is_synthetic
def execute(self):
if self.get_options().skip:
return
targets = self.context.targets(self._is_checked)
with self.invalidated(targets) as invalidation_check:
invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
sources = self.calculate_sources(invalid_targets)
if sources:
args = [os.path.join(self.go_dist.goroot, 'bin', 'gofmt'), '-l'] + list(sources)
try:
output = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
raise TaskError('{} failed with exit code {}'.format(' '.join(args), e.returncode),
exit_code=e.returncode)
if output:
raise TaskError('gofmt command {} failed with output {}'.format(' '.join(args), output))
def calculate_sources(self, targets):
sources = set()
for target in targets:
sources.update(source for source in target.sources_relative_to_buildroot()
if source.endswith(self._GO_SOURCE_EXTENSION))
return sources
| apache-2.0 | 7,952,592,629,862,693,000 | 36.254902 | 98 | 0.678947 | false |
lucidfrontier45/RethinkPool | tests/test_pool.py | 1 | 1063 | import rethinkdb as r
from future.moves.queue import Empty
from nose.tools import assert_raises
from rethinkpool import RethinkPool
def test_pool_create():
max_conns = 50
initial_conns = 10
rp = RethinkPool(max_conns=max_conns, initial_conns=initial_conns)
assert rp.current_conns == initial_conns
def test_create_connection():
initial_conns = 0
rp = RethinkPool(max_conns=10, initial_conns=initial_conns)
res = rp.get_resource()
assert rp.current_conns == (initial_conns + 1)
assert rp._queue.empty()
res.release()
assert not rp._queue.empty()
rp.get_resource()
assert not rp._queue.empty()
def test_pool_full():
n_conns = 10
rp = RethinkPool(max_conns=n_conns, initial_conns=n_conns, get_timeout=0.5)
assert rp._queue.full()
bussy_resources = [rp.get_resource() for _ in range(n_conns)]
assert rp._queue.empty()
with assert_raises(Empty):
res = rp.get_resource()
bussy_resources[0].release()
rp.get_resource()
[res.release() for res in bussy_resources]
| apache-2.0 | 2,208,437,086,947,595,500 | 24.309524 | 79 | 0.672625 | false |
dmanatunga/uAMP-sim | trace_reader.py | 1 | 4325 | import dateutil
import events
from sim_interface import TraceReader
import json
import pickle
import gzip
class JsonTraceReader(TraceReader):
def __init__(self, filename):
self.trace_filename = filename
self.trace_logs = None
self.trace_pos = 0
self.start_time = None
self.end_time = None
def build(self):
if self.trace_filename.endswith('.json'):
with open(self.trace_filename, 'r') as fp:
trace_data = json.load(fp, object_hook=events.json_decode_event)
elif self.trace_filename.endswith('.json.gz'):
with gzip.open(self.trace_filename, 'rt') as fp:
trace_data = json.load(fp, object_hook=events.json_decode_event)
else:
raise Exception('Invalid JSON file type. Expected .json or .json.gz')
# Identify start and end time of trace
self.start_time = dateutil.parser.parse(trace_data['start_time'])
self.end_time = dateutil.parser.parse(trace_data['end_time'])
# Get the list of logs in the trace
self.trace_logs = trace_data['logs']
def finish(self):
pass
def get_event(self):
if self.end_of_trace():
return None
event = self.trace_logs[self.trace_pos]
self.trace_pos += 1
return event
def peek_event(self):
if self.end_of_trace():
return None
event = self.trace_logs[self.trace_pos]
return event
def end_of_trace(self):
return self.trace_pos >= len(self.trace_logs)
def get_events(self, count):
events_list = []
for i in range(count):
event = self.get_event()
if event:
events_list.append(event)
else:
break
return events_list
def get_start_time(self):
return self.start_time
def get_end_time(self):
return self.end_time
class PickleTraceReader(TraceReader):
def __init__(self, filename):
self.trace_filename = filename
self.trace_logs = None
self.trace_pos = 0
self.start_time = None
self.end_time = None
def build(self):
if self.trace_filename.endswith('.pkl'):
with open(self.trace_filename, 'r') as fp:
trace_data = pickle.load(fp)
elif self.trace_filename.endswith('.pkl.gz'):
with gzip.open(self.trace_filename, 'rb') as fp:
trace_data = pickle.load(fp)
else:
raise Exception('Invalid JSON file type. Expected .json or .json.gz')
# Identify start and end time of trace
self.start_time = trace_data['start_time']
self.end_time = trace_data['end_time']
# Get the list of logs in the trace
self.trace_logs = trace_data['logs']
def finish(self):
pass
def get_event(self):
if self.end_of_trace():
return None
event = self.trace_logs[self.trace_pos]
self.trace_pos += 1
return event
def peek_event(self):
if self.end_of_trace():
return None
event = self.trace_logs[self.trace_pos]
return event
def end_of_trace(self):
return self.trace_pos >= len(self.trace_logs)
def get_events(self, count):
events_list = []
for i in range(count):
event = self.get_event()
if event:
events_list.append(event)
else:
break
return events_list
def get_start_time(self):
return self.start_time
def get_end_time(self):
return self.end_time
def get_trace_reader(filename, trace_type=None):
if trace_type:
if trace_type == 'json':
return JsonTraceReader(filename=filename)
elif trace_type == 'pickle':
return PickleTraceReader(filename=filename)
else:
raise Exception("Invalid Trace File Type")
else:
if filename.endswith('.json') or filename.endswith('.json.gz'):
return JsonTraceReader(filename=filename)
elif filename.endswith('.pkl') or filename.endswith('.pkl.gz'):
return PickleTraceReader(filename=filename)
else:
raise Exception("Invalid Trace File Type")
| mit | -3,759,522,340,739,681,000 | 27.833333 | 81 | 0.578497 | false |
bt3gl/Numerical-Methods-for-Physics | homework3_linear_algebra_FFT/condition_number/gaussElimination.py | 1 | 2113 | """
This module calculates a linear system by Gaussian elimination with pivoting.
Almost a copy of on Mike Zingale's code, spring 2013.
"""
import numpy as npy
def gaussElim(A, b):
""" perform gaussian elimination with pivoting, solving A x = b A
is an NxN matrix, x and b are an N-element vectors. Note: A
and b are changed upon exit to be in upper triangular (row
echelon) form """
# b is a vector
if not b.ndim == 1:
print "ERROR: b should be a vector"
return None
N = len(b)
# A is square, with each dimension of length N
if not (A.shape[0] == N and A.shape[1] == N):
print "ERROR: A should be square with each dim of same length as b"
return None
# allocation the solution array
x = npy.zeros((N), dtype=A.dtype)
# find the scale factors for each row -- this is used when pivoting
scales = npy.max(npy.abs(A), 1)
# keep track of the number of times we swapped rows
numRowSwap = 0
# main loop over rows
for k in range(N):
# find the pivot row based on the size of column k -- only consider
# the rows beyond the current row
rowMax = npy.argmax(A[k:, k]/scales[k:])
if (k > 0): rowMax += k # we sliced A from k:, correct for total rows
# swap the row with the largest scaled element in the current column
# with the current row (pivot) -- do this with b too!
if not rowMax == k:
A[[k, rowMax],:] = A[[rowMax, k],:]
b[[k, rowMax]] = b[[rowMax, k]]
numRowSwap += 1
# do the forward-elimination for all rows below the current
for i in range(k+1, N):
coeff = A[i,k]/A[k,k]
for j in range(k+1, N):
A[i,j] += -A[k,j]*coeff
A[i,k] = 0.0
b[i] += -coeff*b[k]
# last solution is easy
x[N-1] = b[N-1]/A[N-1,N-1]
for i in reversed(range(N-1)):
isum = b[i]
for j in range(i+1,N):
isum += -A[i,j]*x[j]
x[i] = isum/A[i,i]
return x
| apache-2.0 | -48,295,339,072,040,350 | 26.802632 | 81 | 0.550402 | false |
seem-sky/newspaper | newspaper/nlp.py | 1 | 5105 | # -*- coding: utf-8 -*-
"""
Anything natural language related should be abstracted into this file.
"""
__title__ = 'newspaper'
__author__ = 'Lucas Ou-Yang'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014, Lucas Ou-Yang'
import re
import math
import operator
from collections import Counter
from . import settings
with open(settings.NLP_STOPWORDS_EN, 'r') as f:
stopwords = set([w.strip() for w in f.readlines()])
ideal = 20.0
def summarize(url='', title='', text=''):
if (text == '' or title == ''):
return []
if isinstance(title, unicode):
title = title.encode('utf-8', 'ignore')
if isinstance(text, unicode):
text = text.encode('utf-8', 'ignore')
summaries = []
sentences = split_sentences(text)
keys = keywords(text)
titleWords = split_words(title)
# Score setences, and use the top 5 sentences
ranks = score(sentences, titleWords, keys).most_common(5)
for rank in ranks:
summaries.append(rank[0])
return summaries
def score(sentences, titleWords, keywords):
"""Score sentences based on different features
"""
senSize = len(sentences)
ranks = Counter()
for i, s in enumerate(sentences):
sentence = split_words(s)
titleFeature = title_score(titleWords, sentence)
sentenceLength = length_score(len(sentence))
sentencePosition = sentence_position(i+1, senSize)
sbsFeature = sbs(sentence, keywords)
dbsFeature = dbs(sentence, keywords)
frequency = (sbsFeature + dbsFeature) / 2.0 * 10.0
# Weighted average of scores from four categories
totalScore = (titleFeature*1.5 + frequency*2.0 +
sentenceLength*1.0 + sentencePosition*1.0)/4.0
ranks[s] = totalScore
return ranks
def sbs(words, keywords):
score = 0.0
if (len(words) == 0):
return 0
for word in words:
if word in keywords:
score += keywords[word]
return (1.0 / math.fabs(len(words)) * score)/10.0
def dbs(words, keywords):
if (len(words) == 0):
return 0
summ = 0
first = []
second = []
for i, word in enumerate(words):
if word in keywords:
score = keywords[word]
if first == []:
first = [i, score]
else:
second = first
first = [i, score]
dif = first[0] - second[0]
summ += (first[1]*second[1]) / (dif ** 2)
# Number of intersections
k = len(set(keywords.keys()).intersection(set(words)))+1
return (1/(k*(k+1.0))*summ)
def split_words(text):
"""Split a string into array of words
"""
try:
text = re.sub(r'[^\w ]', '', text) # strip special chars
return [x.strip('.').lower() for x in text.split()]
except TypeError:
return None
def keywords(text):
"""Get the top 10 keywords and their frequency scores ignores blacklisted
words in stopwords, counts the number of occurrences of each word, and
sorts them in reverse natural order (so descending) by number of
occurrences.
"""
text = split_words(text)
# of words before removing blacklist words
num_words = len(text)
text = [x for x in text if x not in stopwords]
freq = Counter()
for word in text:
freq[word] += 1
min_size = min(10, len(freq))
keywords = tuple(freq.most_common(min_size))
keywords = dict((x, y) for x, y in keywords)
for k in keywords:
articleScore = keywords[k]*1.0 / max(num_words, 1)
keywords[k] = articleScore * 1.5 + 1
keywords = sorted(keywords.iteritems(), key=operator.itemgetter(1))
keywords.reverse()
return dict(keywords)
def split_sentences(text):
"""Split a large string into sentences
"""
import nltk.data
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
sentences = tokenizer.tokenize(text)
sentences = [x.replace('\n', '') for x in sentences if len(x) > 10]
return sentences
def length_score(sentence_len):
return 1 - math.fabs(ideal - sentence_len) / ideal
def title_score(title, sentence):
title = [x for x in title if x not in stopwords]
count = 0.0
for word in sentence:
if (word not in stopwords and word in title):
count += 1.0
return count / max(len(title), 1)
def sentence_position(i, size):
"""Different sentence positions indicate different
probability of being an important sentence.
"""
normalized = i * 1.0 / size
if (normalized > 1.0):
return 0
elif (normalized > 0.9):
return 0.15
elif (normalized > 0.8):
return 0.04
elif (normalized > 0.7):
return 0.04
elif (normalized > 0.6):
return 0.06
elif (normalized > 0.5):
return 0.04
elif (normalized > 0.4):
return 0.05
elif (normalized > 0.3):
return 0.08
elif (normalized > 0.2):
return 0.14
elif (normalized > 0.1):
return 0.23
elif (normalized > 0):
return 0.17
else:
return 0
| mit | 8,296,216,312,425,050,000 | 26.446237 | 77 | 0.59667 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/idlelib/MultiCall.py | 1 | 14654 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: MultiCall.py
"""
MultiCall - a class which inherits its methods from a Tkinter widget (Text, for
example), but enables multiple calls of functions per virtual event - all
matching events will be called, not only the most specific one. This is done
by wrapping the event functions - event_add, event_delete and event_info.
MultiCall recognizes only a subset of legal event sequences. Sequences which
are not recognized are treated by the original Tk handling mechanism. A
more-specific event will be called before a less-specific event.
The recognized sequences are complete one-event sequences (no emacs-style
Ctrl-X Ctrl-C, no shortcuts like <3>), for all types of events.
Key/Button Press/Release events can have modifiers.
The recognized modifiers are Shift, Control, Option and Command for Mac, and
Control, Alt, Shift, Meta/M for other platforms.
For all events which were handled by MultiCall, a new member is added to the
event instance passed to the binded functions - mc_type. This is one of the
event type constants defined in this module (such as MC_KEYPRESS).
For Key/Button events (which are handled by MultiCall and may receive
modifiers), another member is added - mc_state. This member gives the state
of the recognized modifiers, as a combination of the modifier constants
also defined in this module (for example, MC_SHIFT).
Using these members is absolutely portable.
The order by which events are called is defined by these rules:
1. A more-specific event will be called before a less-specific event.
2. A recently-binded event will be called before a previously-binded event,
unless this conflicts with the first rule.
Each function will be called at most once for each event.
"""
import sys
import string
import re
import Tkinter
from idlelib import macosxSupport
MC_KEYPRESS = 0
MC_KEYRELEASE = 1
MC_BUTTONPRESS = 2
MC_BUTTONRELEASE = 3
MC_ACTIVATE = 4
MC_CIRCULATE = 5
MC_COLORMAP = 6
MC_CONFIGURE = 7
MC_DEACTIVATE = 8
MC_DESTROY = 9
MC_ENTER = 10
MC_EXPOSE = 11
MC_FOCUSIN = 12
MC_FOCUSOUT = 13
MC_GRAVITY = 14
MC_LEAVE = 15
MC_MAP = 16
MC_MOTION = 17
MC_MOUSEWHEEL = 18
MC_PROPERTY = 19
MC_REPARENT = 20
MC_UNMAP = 21
MC_VISIBILITY = 22
MC_SHIFT = 1
MC_CONTROL = 4
MC_ALT = 8
MC_META = 32
MC_OPTION = 64
MC_COMMAND = 128
if macosxSupport.runningAsOSXApp():
_modifiers = (
('Shift', ), ('Control', ), ('Option', ), ('Command', ))
_modifier_masks = (MC_SHIFT, MC_CONTROL, MC_OPTION, MC_COMMAND)
else:
_modifiers = (
('Control', ), ('Alt', ), ('Shift', ), ('Meta', 'M'))
_modifier_masks = (MC_CONTROL, MC_ALT, MC_SHIFT, MC_META)
_modifier_names = dict([ (name, number) for number in range(len(_modifiers)) for name in _modifiers[number]
])
class _SimpleBinder:
def __init__(self, type, widget, widgetinst):
self.type = type
self.sequence = '<' + _types[type][0] + '>'
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = []
self.handlerid = None
return
def bind(self, triplet, func):
if not self.handlerid:
def handler(event, l=self.bindedfuncs, mc_type=self.type):
event.mc_type = mc_type
wascalled = {}
for i in range(len(l) - 1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = func(event)
if r:
return r
self.handlerid = self.widget.bind(self.widgetinst, self.sequence, handler)
self.bindedfuncs.append(func)
def unbind(self, triplet, func):
self.bindedfuncs.remove(func)
if not self.bindedfuncs:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
self.handlerid = None
return
def __del__(self):
if self.handlerid:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
_states = range(1 << len(_modifiers))
_state_names = [ ''.join((m[0] + '-' for i, m in enumerate(_modifiers) if 1 << i & s))
for s in _states
]
def expand_substates(states):
"""For each item of states return a list containing all combinations of
that item with individual bits reset, sorted by the number of set bits.
"""
def nbits(n):
"""number of bits set in n base 2"""
nb = 0
while n:
n, rem = divmod(n, 2)
nb += rem
return nb
statelist = []
for state in states:
substates = list(set((state & x for x in states)))
substates.sort(key=nbits, reverse=True)
statelist.append(substates)
return statelist
_state_subsets = expand_substates(_states)
_state_codes = []
for s in _states:
r = 0
for i in range(len(_modifiers)):
if 1 << i & s:
r |= _modifier_masks[i]
_state_codes.append(r)
class _ComplexBinder:
def __create_handler(self, lists, mc_type, mc_state):
def handler(event, lists=lists, mc_type=mc_type, mc_state=mc_state, ishandlerrunning=self.ishandlerrunning, doafterhandler=self.doafterhandler):
ishandlerrunning[:] = [
True]
event.mc_type = mc_type
event.mc_state = mc_state
wascalled = {}
r = None
for l in lists:
for i in range(len(l) - 1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = l[i](event)
if r:
break
if r:
break
ishandlerrunning[:] = []
while doafterhandler:
doafterhandler.pop()()
if r:
return r
else:
return
return handler
def __init__(self, type, widget, widgetinst):
self.type = type
self.typename = _types[type][0]
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = {None: [ [] for s in _states ]}
self.handlerids = []
self.ishandlerrunning = []
self.doafterhandler = []
for s in _states:
lists = [ self.bindedfuncs[None][i] for i in _state_subsets[s] ]
handler = self.__create_handler(lists, type, _state_codes[s])
seq = '<' + _state_names[s] + self.typename + '>'
self.handlerids.append((seq,
self.widget.bind(self.widgetinst, seq, handler)))
return
def bind(self, triplet, func):
if triplet[2] not in self.bindedfuncs:
self.bindedfuncs[triplet[2]] = [ [] for s in _states ]
for s in _states:
lists = [ self.bindedfuncs[detail][i] for detail in (
triplet[2], None) for i in _state_subsets[s] ]
handler = self.__create_handler(lists, self.type, _state_codes[s])
seq = '<%s%s-%s>' % (_state_names[s], self.typename, triplet[2])
self.handlerids.append((seq,
self.widget.bind(self.widgetinst, seq, handler)))
doit = lambda : self.bindedfuncs[triplet[2]][triplet[0]].append(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
return
def unbind(self, triplet, func):
doit = lambda : self.bindedfuncs[triplet[2]][triplet[0]].remove(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def __del__(self):
for seq, id in self.handlerids:
self.widget.unbind(self.widgetinst, seq, id)
_types = (
('KeyPress', 'Key'), ('KeyRelease', ), ('ButtonPress', 'Button'),
('ButtonRelease', ), ('Activate', ), ('Circulate', ), ('Colormap', ),
('Configure', ), ('Deactivate', ), ('Destroy', ), ('Enter', ), ('Expose', ),
('FocusIn', ), ('FocusOut', ), ('Gravity', ), ('Leave', ), ('Map', ),
('Motion', ), ('MouseWheel', ), ('Property', ), ('Reparent', ), ('Unmap', ),
('Visibility', ))
_binder_classes = (
_ComplexBinder,) * 4 + (_SimpleBinder,) * (len(_types) - 4)
_type_names = dict([ (name, number) for number in range(len(_types)) for name in _types[number]
])
_keysym_re = re.compile('^\\w+$')
_button_re = re.compile('^[1-5]$')
def _parse_sequence(sequence):
"""Get a string which should describe an event sequence. If it is
successfully parsed as one, return a tuple containing the state (as an int),
the event type (as an index of _types), and the detail - None if none, or a
string if there is one. If the parsing is unsuccessful, return None.
"""
if not sequence or sequence[0] != '<' or sequence[-1] != '>':
return
else:
words = string.split(sequence[1:-1], '-')
modifiers = 0
while words and words[0] in _modifier_names:
modifiers |= 1 << _modifier_names[words[0]]
del words[0]
if words and words[0] in _type_names:
type = _type_names[words[0]]
del words[0]
else:
return
if _binder_classes[type] is _SimpleBinder:
if modifiers or words:
return
detail = None
else:
if type in [ _type_names[s] for s in ('KeyPress', 'KeyRelease') ]:
type_re = _keysym_re
else:
type_re = _button_re
if not words:
detail = None
elif len(words) == 1 and type_re.match(words[0]):
detail = words[0]
else:
return
return (
modifiers, type, detail)
def _triplet_to_sequence(triplet):
if triplet[2]:
return '<' + _state_names[triplet[0]] + _types[triplet[1]][0] + '-' + triplet[2] + '>'
else:
return '<' + _state_names[triplet[0]] + _types[triplet[1]][0] + '>'
_multicall_dict = {}
def MultiCallCreator(widget):
"""Return a MultiCall class which inherits its methods from the
given widget class (for example, Tkinter.Text). This is used
instead of a templating mechanism.
"""
if widget in _multicall_dict:
return _multicall_dict[widget]
class MultiCall(widget):
def __init__(self, *args, **kwargs):
widget.__init__(self, *args, **kwargs)
self.__eventinfo = {}
self.__binders = [ _binder_classes[i](i, widget, self) for i in range(len(_types))
]
def bind(self, sequence=None, func=None, add=None):
if type(sequence) is str and len(sequence) > 2 and sequence[:2] == '<<' and sequence[-2:] == '>>':
if sequence in self.__eventinfo:
ei = self.__eventinfo[sequence]
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].unbind(triplet, ei[0])
ei[0] = func
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].bind(triplet, func)
else:
self.__eventinfo[sequence] = [
func, []]
return widget.bind(self, sequence, func, add)
def unbind(self, sequence, funcid=None):
if type(sequence) is str and len(sequence) > 2 and sequence[:2] == '<<' and sequence[-2:] == '>>' and sequence in self.__eventinfo:
func, triplets = self.__eventinfo[sequence]
if func is not None:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
self.__eventinfo[sequence][0] = None
return widget.unbind(self, sequence, funcid)
def event_add(self, virtual, *sequences):
if virtual not in self.__eventinfo:
self.__eventinfo[virtual] = [
None, []]
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
widget.event_add(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].bind(triplet, func)
triplets.append(triplet)
return
def event_delete(self, virtual, *sequences):
if virtual not in self.__eventinfo:
return
else:
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
widget.event_delete(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].unbind(triplet, func)
triplets.remove(triplet)
return
def event_info(self, virtual=None):
if virtual is None or virtual not in self.__eventinfo:
return widget.event_info(self, virtual)
else:
return tuple(map(_triplet_to_sequence, self.__eventinfo[virtual][1])) + widget.event_info(self, virtual)
return
def __del__(self):
for virtual in self.__eventinfo:
func, triplets = self.__eventinfo[virtual]
if func:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
_multicall_dict[widget] = MultiCall
return MultiCall
if __name__ == '__main__':
root = Tkinter.Tk()
text = MultiCallCreator(Tkinter.Text)(root)
text.pack()
def bindseq(seq, n=[0]):
def handler(event):
print seq
text.bind('<<handler%d>>' % n[0], handler)
text.event_add('<<handler%d>>' % n[0], seq)
n[0] += 1
bindseq('<Key>')
bindseq('<Control-Key>')
bindseq('<Alt-Key-a>')
bindseq('<Control-Key-a>')
bindseq('<Alt-Control-Key-a>')
bindseq('<Key-b>')
bindseq('<Control-Button-1>')
bindseq('<Alt-Button-1>')
bindseq('<FocusOut>')
bindseq('<Enter>')
bindseq('<Leave>')
root.mainloop() | unlicense | 1,765,385,453,857,526,300 | 33.64539 | 152 | 0.556435 | false |
d3QUone/wmfe_backend | blueprints/tests.py | 1 | 1650 |
# TODO:
"""
# register
curl -v -X POST -d 'vkid=3sfddd&recovery_code=12o_pda_iod' http://127.0.0.1:8080/register_user
# register with list of vk-friends
curl -v -X POST -d 'vkid=id0013j&recovery_code=abc-check-reg&fr=id9616&&fr=id7987&fr=id6530' http://127.0.0.1:8080/register_user
# create post
curl -v -X POST --cookie 'auth=wmfe-013443b2-da9b-464c-8e74-dadd1ffef53a' -d 'vkid=3sfddd&food=iiko877&food=iiko653333&text="I like foo"' http://127.0.0.1:8080/create_post
curl -v -X POST --cookie 'auth=wmfe-013443b2-da9b-464c-8e74-dadd1ffef53a' -d 'vkid=3sfddd&food=iiko1&food=iiko32x&text="I like food, Piter food is cool!!!"' http://127.0.0.1:8080/create_post
# get personal feed
curl -v -X GET --cookie 'auth=wmfe-013443b2-da9b-464c-8e74-dadd1ffef53a' http://127.0.0.1:8080/get_feed?vkid=3sfddd
curl -v -X GET --cookie 'auth=wmfe-013443b2-da9b-464c-8e74-dadd1ffef53a' http://127.0.0.1:8080/get_feed?vkid=3sfddd&order=likes
curl -v -X GET --cookie 'auth=wmfe-013443b2-da9b-464c-8e74-dadd1ffef53a' http://127.0.0.1:8080/get_feed?vkid=3sfddd&order=date
# like
curl -v -X POST --cookie 'auth=wmfe-013443b2-da9b-464c-8e74-dadd1ffef53a' -d 'vkid=3sfddd&post_id=1' http://127.0.0.1:8080/like_post
# dislike
curl -v -X POST --cookie 'auth=wmfe-013443b2-da9b-464c-8e74-dadd1ffef53a' -d 'vkid=3sfddd&post_id=3' http://127.0.0.1:8080/dislike_post
# get detailed likes
curl -v -X GET --cookie 'auth=wmfe-013443b2-da9b-464c-8e74-dadd1ffef53a' -d 'vkid=3sfddd' http://127.0.0.1:8080/get_likes_to_post?post_id=1
# get global feed
curl -v -X GET --cookie 'auth=wmfe-013443b2-da9b-464c-8e74-dadd1ffef53a' http://127.0.0.1:8080/global_feed?vkid=3sfddd
"""
| mit | -2,636,871,834,832,690,700 | 47.529412 | 190 | 0.724848 | false |
ilveroluca/seal | seal/dist_bcl2qseq.py | 1 | 9909 | # Copyright (C) 2011-2012 CRS4.
#
# This file is part of Seal.
#
# Seal is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# Seal is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with Seal. If not, see <http://www.gnu.org/licenses/>.
"""
pydoop script to drive Illumina's bclToQseq program and
convert BCL files to Qseq.
Works in tandem with automator.bcl2qseq_mr. This program *needs direct
access to sequencer's run directory*. It will generate a file listing all the
tiles to be converted, with relative file paths. In turn, this file will be
processed by the distributed component that runs on Hadoop.
"""
import argparse
import logging
import os
import subprocess
import sys
import tempfile
import urlparse
import seal.lib.illumina_run_dir as ill
import seal.bcl2qseq_mr as bcl2qseq_mr
import pydoop.hdfs as hdfs
def serialize_cmd_data(cmd_dict):
def serialize_item(k,v):
# replace None values with empty strings
k = k or ''
v = v or ''
# if there are "illegal" characters raise an exception
if ':' in k or ';' in k or ';' in v or ';' in v:
raise RuntimeError("datum '%s' with : or ;. Can't serialize!" % (k + ' ' + v))
return "%s:%s;" % (k,v)
return ''.join(serialize_item(k,v) for k,v in cmd_dict.iteritems())
class DistBcl2QseqDriver(object):
def __init__(self, options):
self.log = logging.getLogger('DistBcl2Qseq')
self.log.setLevel(logging.DEBUG)
executable = options.bclToQseq_path or self.find_exec('bclToQseq')
if not executable:
self.log.warning("Can't find bclToQseq in PATH. Will try to run anyways...")
executable = 'bclToQseq'
self.options = {
'bclToQseq': executable,
'append_ld_library_path': options.append_ld_library_path or '',
'ignore_missing_bcl': options.ignore_missing_bcl,
'ignore_missing_control': options.ignore_missing_control,
'exclude_controls': options.exclude_controls,
'no_eamss': options.no_eamss
}
u = urlparse.urlparse(options.run_dir)
if u.scheme and u.scheme != 'file':
raise RuntimeError("Sorry! Current implementation requires that " +
"the run directory be on a mounted file system (scheme %s not supported)" % u.scheme)
self.run_dir = ill.RunDir(u.path)
# collect necessary info
self.run_params = self.run_dir.get_run_parameters()
if hdfs.path.exists(options.output_dir):
raise RuntimeError("output path %s already exists." % options.output_dir)
self.output_path = options.output_dir
def __write_mr_input(self, fd):
"""
Write parameters for all the file conversions to be done in a format
suitable for our map-reduce helper script.
Returns the number of records written.
"""
# commands are written one per line, in a form suitable for execution via sh. If module loading
# is required, it is inserted at the start of the command line, followed by && and finally the bclToQseq call.
conversion_params = {
'bclToQseq': self.options['bclToQseq'],
'ld_library_path': self.options['append_ld_library_path'],
'--exclude-controls': '',
'--repeat': '1',
'--instrument': self.run_params.setup['ComputerName'],
'--run-id': self.run_params.setup['ScanNumber'],
'--input-directory': self.run_dir.get_base_calls_dir(),
}
# For the following arguments, we don't want them to be in the conversion_params
# dictionary unless they're set
if self.options['ignore_missing_bcl']:
conversion_params['--ignore-missing-bcl'] = None
if self.options['ignore_missing_control']:
conversion_params['--ignore-missing-control'] = None
if self.options['exclude_controls']:
conversion_params['--exclude-controls'] = None
if self.options['no_eamss']:
conversion_params['--no-eamss'] = None
count = 0
for lane in self.run_params.get_lanes():
conversion_params['--lane'] = str(lane)
for read in self.run_params.get_reads():
conversion_params['--read'] = str(read.num)
conversion_params['--first-cycle'] = str(read.first_cycle)
conversion_params['--number-of-cycles'] = str(read.last_cycle - read.first_cycle + 1)
for tile in self.run_params.iget_simple_tile_codes():
conversion_params['--tile'] = str(tile)
# set filter, control, posotions files
conversion_params['--filter-file'] = self.run_dir.make_filter_path(lane, tile)
conversion_params['--control-file'] = self.run_dir.make_control_path(lane, tile)
conversion_params['--positions-file'] = self.run_dir.make_clocs_path(lane, tile)
# we put the standard qseq name here. The slave implementation may decide not to use it....
conversion_params['--qseq-file'] = os.path.join(self.output_path, self.run_dir.make_qseq_name(lane, tile, read.num))
fd.write(serialize_cmd_data(conversion_params))
fd.write("\n")
count += 1
return count
@staticmethod
def find_exec(exec_name):
"""
Find an executable in the current PATH.
Returns the full path to the executable, if found.
Returns None if not found.
"""
for p in os.environ.get('PATH', '').split(os.pathsep):
exec_path = os.path.join(p, exec_name)
if os.access(exec_path, os.X_OK):
return exec_path
return None
def run(self):
pydoop_exec = self.find_exec('pydoop')
if pydoop_exec is None:
raise RuntimeError("Can't find pydoop executable in PATH")
with tempfile.NamedTemporaryFile() as f:
num_records = self.__write_mr_input(f)
f.flush()
self.log.debug("Wrote temp input file %s", f.name)
input_filename = tempfile.mktemp(dir=os.path.dirname(self.output_path), prefix="dist_bcl2qseq_input")
tmpfile_uri = "file://%s" % f.name
try:
self.log.debug("copying input from %s to %s", tmpfile_uri, input_filename)
hdfs.cp(tmpfile_uri, input_filename)
self.log.info("Run analyzed. Launching distributed job")
# launch mr task
cmd = [ 'pydoop', 'script', '--num-reducers', '0', '--kv-separator', '',
'-Dmapred.map.tasks=%d' % num_records,
'-Dmapred.input.format.class=org.apache.hadoop.mapred.lib.NLineInputFormat',
'-Dmapred.line.input.format.linespermap=1',
bcl2qseq_mr.__file__,
input_filename,
self.output_path]
self.log.debug(str(cmd))
subprocess.check_call(cmd)
self.log.info("Distributed job complete")
except subprocess.CalledProcessError as e:
self.log.exception(e)
self.log.error("Error running pydoop script component")
raise
finally:
try:
hdfs.rmr(input_filename)
except IOError as e:
self.log.debug("Problem cleaning up. Error deleting temporary input file %s", input_filename)
self.log.debug(str(e))
def main(args=None):
from seal import logformat
parser = argparse.ArgumentParser(description="Distributed bcl2qseq.")
parser.add_argument("-l", "--logfile", metavar="FILE", help="Write log output to a file")
parser.add_argument("--bclToQseq-path", metavar="PATH",
help="Full path to the bclToQseq binary. Needed only if it's not in the PATH")
parser.add_argument("--append-ld-library-path", metavar="PATHLIST",
help="If you need to append to the value of LD_LIBRARY_PATH to run the Illumina executable, use this argument")
parser.add_argument("--ignore-missing-bcl", action='store_true',
help="interprets missing *.bcl files as a base calling of '.'")
parser.add_argument("--ignore-missing-control", action='store_true',
help="don't throw an error when *.control files are missing")
parser.add_argument("--exclude-controls", action='store_true',
help="do not include clusters that are used as controls")
parser.add_argument("--no-eamss", action='store_true',
help="do not apply the EAMSS masking on the quality values")
parser.add_argument('run_dir', help="Illumina run directory to process")
parser.add_argument('output_dir', help="Path where the output qseq files should be created")
options = parser.parse_args(args)
if options.logfile:
logging.basicConfig(format=logformat, filename=options.logfile)
else:
logging.basicConfig(format=logformat)
try:
driver = DistBcl2QseqDriver(options)
except StandardError as e:
logging.critical("Error initializing")
if e.message:
logging.exception(e)
return 1
try:
driver.run()
return 0
except RuntimeError as e:
return 2
| gpl-3.0 | -2,729,236,264,081,831,000 | 43.635135 | 136 | 0.610455 | false |
vessemer/concept-to-clinic | interface/config/settings/base.py | 1 | 3697 | """
Adapted from pydanny/django-cookiecutter
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import environ
BASE_DIR = environ.Path(__file__) - 3
APPS_DIR = BASE_DIR.path('backend')
# Datasource from where the images will be loaded initially
DATASOURCE_DIR = '/images'
IMAGE_EXTENSIONS = [
'.dcm',
]
env = environ.Env()
env.read_env(str(BASE_DIR.path('.env')))
DEBUG = env.bool('DEBUG', default=False)
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('SECRET_KEY')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# Django
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third party
'rest_framework',
# Project specific
'backend.images',
'backend.cases',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
str(BASE_DIR.path('frontend/templates')),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# CSRF Token
CSRF_COOKIE_NAME = 'csrftoken'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = str(BASE_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(BASE_DIR.path('frontend/static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Project specific
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny'
]
}
try:
with open('/HEAD') as f:
APP_VERSION_NUMBER = f.readlines()[-1].split(' ')[1][:7]
except (IOError, IndexError):
APP_VERSION_NUMBER = '(unknown)'
| mit | 3,250,317,145,646,073,000 | 25.219858 | 98 | 0.675412 | false |
nicoboss/Floatmotion | OpenGLLibrary/glLibOBJLoad.py | 1 | 3861 | import pygame, os
from OpenGL.GL import *
def MTL(filename):
contents = {}
mtl = None
for line in open(filename, "r"):
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'newmtl':
mtl = contents[values[1]] = {}
elif mtl is None:
raise ValueError, "mtl file doesn't start with newmtl stmt"
elif values[0] == 'map_Kd':
# load the texture referred to by this declaration
mtl[values[0]] = values[1]
surf = pygame.image.load(mtl['map_Kd'])
image = pygame.image.tostring(surf, 'RGBA', 1)
ix, iy = surf.get_rect().size
texid = mtl['texture_Kd'] = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, texid)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, ix, iy, 0, GL_RGBA,
GL_UNSIGNED_BYTE, image)
else:
mtl[values[0]] = map(float, values[1:])
return contents
class OBJ:
def __init__(self, filename, swapyz=False):
"""Loads a Wavefront OBJ file. """
self.vertices = []
self.normals = []
self.texcoords = []
self.faces = []
filename = filename.split("/")
self.mtl = MTL(os.path.join(*filename[:-1]+[filename[-1][:-4]+".mtl"]))
material = None
for line in open(os.path.join(*filename), "r"):
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'v':
v = map(float, values[1:4])
if swapyz:
v = v[0], v[2], v[1]
self.vertices.append(v)
elif values[0] == 'vn':
v = map(float, values[1:4])
if swapyz:
v = v[0], v[2], v[1]
self.normals.append(v)
elif values[0] == 'vt':
self.texcoords.append(map(float, values[1:3]))
elif values[0] in ('usemtl', 'usemat'):
material = values[1]
elif values[0] == 'mtllib':
continue
elif values[0] == 'f':
face = []
texcoords = []
norms = []
for v in values[1:]:
w = v.split('/')
face.append(int(w[0]))
if len(w) >= 2 and len(w[1]) > 0:
texcoords.append(int(w[1]))
else:
texcoords.append(0)
if len(w) >= 3 and len(w[2]) > 0:
norms.append(int(w[2]))
else:
norms.append(0)
self.faces.append((face, norms, texcoords, material))
self.gl_list = glGenLists(1)
glNewList(self.gl_list, GL_COMPILE)
for face in self.faces:
vertices, normals, texture_coords, material = face
mtl = self.mtl[material]
if 'texture_Kd' in mtl:
# use diffuse texmap
glBindTexture(GL_TEXTURE_2D, mtl['texture_Kd'])
else:
# just use diffuse colour
glColor(*mtl['Kd'])
glBegin(GL_POLYGON)
for i in range(0, len(vertices)):
if normals[i] > 0:
glNormal3fv(self.normals[normals[i] - 1])
if texture_coords[i] > 0:
glTexCoord2fv(self.texcoords[texture_coords[i] - 1])
glVertex3fv(self.vertices[vertices[i] - 1])
glEnd()
glColor3f(1,1,1)
glEndList()
| agpl-3.0 | -8,005,413,216,815,216,000 | 36.125 | 79 | 0.465682 | false |
akshaybabloo/Python-QT-5-Tutorial | 2_QTDesigner/2_8_SimpleFormDesigner/SimpleFormDesigner.py | 1 | 1647 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'SimpleFormDesigner.ui'
#
# Created by: PyQt5 UI code generator 5.4.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(640, 249)
self.widget = QtWidgets.QWidget(Dialog)
self.widget.setGeometry(QtCore.QRect(150, 80, 401, 101))
self.widget.setObjectName("widget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.widget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.edit = QtWidgets.QLineEdit(self.widget)
self.edit.setObjectName("edit")
self.horizontalLayout.addWidget(self.edit)
self.verticalLayout.addLayout(self.horizontalLayout)
self.change = QtWidgets.QLabel(self.widget)
self.change.setObjectName("change")
self.verticalLayout.addWidget(self.change)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "Full Name"))
self.change.setText(_translate("Dialog", "TextLabel"))
| mit | -401,647,907,074,165,570 | 39.170732 | 76 | 0.695811 | false |
michael-christen/repo-monitor | repo_monitor/python/parsers.py | 1 | 3438 | import argparse
from .deserializers import CoverageDeserializer
from .deserializers import NosetestDeserializer
from .deserializers import RadonDeserializer
class CoverageParser(object):
def __init__(self):
self.base_parser = argparse.ArgumentParser(
description='Get Python Coverage',
)
self.base_parser.add_argument(
'--file',
default='coverage.xml',
help='Coverage File')
self.base_parser.add_argument(
'--num_decimals',
default=0,
help='Number of decimals to output')
def run(self, args):
parsed_args = self.base_parser.parse_args(args)
with open(parsed_args.file, 'r') as f:
line_rate = CoverageDeserializer(f.read()).line_rate
format_string = '{:.' + str(parsed_args.num_decimals) + 'f}%'
coverage_string = format_string.format(100 * line_rate)
print coverage_string
return coverage_string
class NosetestParser(object):
def __init__(self):
self.base_parser = argparse.ArgumentParser(
description='Get Python Test Output Metrics',
)
self.base_parser.add_argument(
'metric',
choices=['time', 'num_tests', 'test2time'],
help='Metric to gather')
self.base_parser.add_argument(
'--file',
default='nosetests.xml',
help='Test Output File')
def run(self, args):
parsed_args = self.base_parser.parse_args(args)
with open(parsed_args.file, 'r') as f:
data = f.read()
nosetest_data = NosetestDeserializer(data)
metric = getattr(nosetest_data, parsed_args.metric)
output_str = ''
if isinstance(metric, dict):
test_list = ['{} {}'.format(k, v) for k, v in metric.viewitems()]
output_str = '\n'.join(test_list)
else:
output_str = '{}'.format(metric)
print output_str
return output_str
class RadonParser(object):
def __init__(self):
self.base_parser = argparse.ArgumentParser(
description='Get Code Quality Metrics',
)
self.base_parser.add_argument(
'metric',
choices=['lloc', 'cc', 'mi'],
help='Metric to gather')
self.base_parser.add_argument(
'--package',
help='Package to inspect. (Needed for cc).')
self.base_parser.add_argument(
'--raw_json',
help='JSON file with raw Radon metrics')
self.base_parser.add_argument(
'--mi_json',
help='JSON file with maintanability index Radon metrics')
def _read_file_if_available(self, file_name):
if file_name is None:
return None
with open(file_name, 'r') as f:
return f.read()
def run(self, args):
parsed_args = self.base_parser.parse_args(args)
radon_data = RadonDeserializer(
package=parsed_args.package,
raw_json=self._read_file_if_available(parsed_args.raw_json),
mi_json=self._read_file_if_available(parsed_args.mi_json),
).metric_dict
if parsed_args.metric == 'lloc':
format_str = '{:d}'
else:
format_str = '{:0.2f}'
output_str = format_str.format(radon_data[parsed_args.metric])
print output_str
return output_str
| mit | 3,296,597,069,242,023,400 | 33.38 | 77 | 0.57039 | false |
aleperno/labotpfinal | src/labo.py | 1 | 1051 | #!/usr/bin/python
import RPi.GPIO as GPIO
import time
import sys,os
import serial
import socket
dir = "./log.txt"
port = '/dev/ttyACM0'
ser = serial.Serial(port,9600)
def getTime():
return time.strftime("%Y;%m;%d;%H;%M;%S")
def is_valid_number(number):
"""In this case numbers higher than 100 will be considered
serial communication errors"""
if float(number) > 100:
#print "Error while validating %s ." % number
return False
else:
return True
def is_valid(data):
try:
float(data)
#Passes first test
aux = data.split('.')
if (len(aux[1]) is 2):
#Passes second test
return is_valid_number(data)
except:
#print "Error while validating %s ." % data
return False
while True:
try:
hora = getTime()
f = open(dir,"a")
p = open('/var/www/log.txt',"w+")
volt = ser.readline()
volt = volt.replace('\r\n','')
if is_valid(volt):
pass
else:
continue
print volt
s=hora+";"+volt+'\n';
f.write(s);
p.write(volt);
except KeyboardInterrupt:
print "Exited cleanly"
break
#except :
#break
| gpl-2.0 | -4,666,988,098,183,961,000 | 17.12069 | 59 | 0.644148 | false |
catsop/CATMAID | django/applications/catmaid/tests/apis/test_treenodes.py | 1 | 48018 | import json
from django.shortcuts import get_object_or_404
from catmaid.control.common import get_relation_to_id_map, get_class_to_id_map
from catmaid.models import ClassInstance, ClassInstanceClassInstance, Log
from catmaid.models import Treenode, TreenodeClassInstance, TreenodeConnector
from catmaid.models import User
from catmaid.state import make_nocheck_state
from .common import CatmaidApiTestCase
class TreenodesApiTests(CatmaidApiTestCase):
def test_list_treenode_table_empty(self):
self.fake_authentication()
response = self.client.post('/%d/treenode/table/%d/content' % \
(self.test_project_id, 0))
self.assertEqual(response.status_code, 200)
expected_result = [[], [], []]
parsed_response = json.loads(response.content)
self.assertEqual(expected_result, parsed_response)
def test_fail_update_confidence(self):
treenode_id = Treenode.objects.order_by("-id")[0].id + 1 # Inexistant
self.fake_authentication()
response = self.client.post(
'/%d/treenodes/%d/confidence' % (self.test_project_id, treenode_id),
{'new_confidence': '4'})
self.assertEqual(response.status_code, 200)
expected_result = 'No skeleton and neuron for treenode %s' % treenode_id
parsed_response = json.loads(response.content)
self.assertEqual(expected_result, parsed_response['error'])
def test_update_confidence_of_treenode(self):
treenode_id = 11
self.fake_authentication()
response = self.client.post(
'/%d/treenodes/%d/confidence' % (self.test_project_id, treenode_id),
{'new_confidence': '4', 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
treenode = Treenode.objects.filter(id=treenode_id).get()
parsed_response = json.loads(response.content)
expected_result = {
'message': 'success',
'updated_partners': {
'7': {
'edition_time': '2016-04-13T05:57:44.444Z',
'old_confidence': 5
}
}
}
self.assertIn('message', parsed_response)
self.assertEqual(expected_result.get('message'), parsed_response.get('message'))
self.assertIn('updated_partners', parsed_response)
self.assertIn('7', parsed_response.get('updated_partners'))
self.assertEqual(expected_result.get('updated_partners').get('7').get('old_confidence'),
parsed_response.get('updated_partners').get('7').get('old_confidence'))
self.assertEqual(4, treenode.confidence)
response = self.client.post(
'/%d/treenodes/%d/confidence' % (self.test_project_id, treenode_id),
{'new_confidence': '5', 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
treenode = Treenode.objects.filter(id=treenode_id).get()
parsed_response = json.loads(response.content)
expected_result = {
'message': 'success',
'updated_partners': {
'7': {
'edition_time': '2016-04-13T05:57:44.444Z',
'old_confidence': 4
}
}
}
self.assertIn('message', parsed_response)
self.assertEqual(expected_result.get('message'), parsed_response.get('message'))
self.assertIn('updated_partners', parsed_response)
self.assertIn('7', parsed_response.get('updated_partners'))
self.assertEqual(expected_result.get('updated_partners').get('7').get('old_confidence'),
parsed_response.get('updated_partners').get('7').get('old_confidence'))
self.assertEqual(5, treenode.confidence)
def test_update_confidence_of_treenode_connector(self):
treenode_id = 285
treenode_connector_id = 360
self.fake_authentication()
response = self.client.post(
'/%d/treenodes/%d/confidence' % (self.test_project_id, treenode_id),
{'new_confidence': '4', 'to_connector': 'true',
'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
connector = TreenodeConnector.objects.filter(id=treenode_connector_id).get()
parsed_response = json.loads(response.content)
expected_result = {
'message': 'success',
'updated_partners': {
'356': {
'edition_time': '2016-04-13T05:57:44.444Z',
'old_confidence': 5
}
}
}
self.assertIn('message', parsed_response)
self.assertEqual(expected_result.get('message'), parsed_response.get('message'))
self.assertIn('updated_partners', parsed_response)
self.assertIn('356', parsed_response.get('updated_partners'))
self.assertEqual(expected_result.get('updated_partners').get('356').get('old_confidence'),
parsed_response.get('updated_partners').get('356').get('old_confidence'))
self.assertEqual(4, connector.confidence)
response = self.client.post(
'/%d/treenodes/%d/confidence' % (self.test_project_id, treenode_id),
{'new_confidence': '5', 'to_connector': 'true', 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
connector = TreenodeConnector.objects.filter(id=treenode_connector_id).get()
parsed_response = json.loads(response.content)
expected_result = {
'message': 'success',
'updated_partners': {
'356': {
'edition_time': '2016-04-13T05:57:44.444Z',
'old_confidence': 4
}
}
}
self.assertIn('message', parsed_response)
self.assertEqual(expected_result.get('message'), parsed_response.get('message'))
self.assertIn('updated_partners', parsed_response)
self.assertIn('356', parsed_response.get('updated_partners'))
self.assertEqual(expected_result.get('updated_partners').get('356').get('old_confidence'),
parsed_response.get('updated_partners').get('356').get('old_confidence'))
self.assertEqual(5, connector.confidence)
def test_create_treenode(self):
self.fake_authentication()
relation_map = get_relation_to_id_map(self.test_project_id)
class_map = get_class_to_id_map(self.test_project_id)
count_treenodes = lambda: Treenode.objects.all().count()
count_skeletons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['skeleton']).count()
count_neurons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['neuron']).count()
treenode_count = count_treenodes()
skeleton_count = count_skeletons()
neuron_count = count_neurons()
response = self.client.post('/%d/treenode/create' % self.test_project_id, {
'x': 5,
'y': 10,
'z': 15,
'confidence': 5,
'parent_id': -1,
'radius': 2})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
self.assertTrue('treenode_id' in parsed_response)
self.assertTrue('skeleton_id' in parsed_response)
self.assertEqual(treenode_count + 1, count_treenodes())
self.assertEqual(skeleton_count + 1, count_skeletons())
self.assertEqual(neuron_count + 1, count_neurons())
treenode_skeleton_relation = TreenodeClassInstance.objects.filter(
project=self.test_project_id,
relation=relation_map['element_of'],
treenode=parsed_response['treenode_id'],
class_instance=parsed_response['skeleton_id'])
neuron_skeleton_relation = ClassInstanceClassInstance.objects.filter(
project=self.test_project_id,
relation=relation_map['model_of'],
class_instance_a=parsed_response['skeleton_id'])
neuron_log = Log.objects.filter(
project=self.test_project_id,
operation_type='create_neuron')
# FIXME: discussed in
# https://github.com/catmaid/CATMAID/issues/754
#self.assertEqual(1, treenode_skeleton_relation.count())
self.assertEqual(1, neuron_skeleton_relation.count())
# FIXME: This test doesn't work like expected
#self.assertEqual(1, neuron_log.count())
#neuron_log_location = neuron_log[0].location
#self.assertEqual(5, neuron_log_location.x)
#self.assertEqual(10, neuron_log_location.y)
#self.assertEqual(15, neuron_log_location.z)
def test_create_treenode2(self):
self.fake_authentication()
relation_map = get_relation_to_id_map(self.test_project_id)
class_map = get_class_to_id_map(self.test_project_id)
count_treenodes = lambda: Treenode.objects.all().count()
count_skeletons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['skeleton']).count()
count_neurons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['neuron']).count()
treenode_count = count_treenodes()
skeleton_count = count_skeletons()
neuron_count = count_neurons()
response = self.client.post('/%d/treenode/create' % self.test_project_id, {
'x': 5,
'y': 10,
'z': 15,
'confidence': 5,
'parent_id': -1,
'radius': 2})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
self.assertTrue('treenode_id' in parsed_response)
self.assertTrue('skeleton_id' in parsed_response)
self.assertEqual(treenode_count + 1, count_treenodes())
self.assertEqual(skeleton_count + 1, count_skeletons())
self.assertEqual(neuron_count + 1, count_neurons())
treenode_skeleton_relation = TreenodeClassInstance.objects.filter(
project=self.test_project_id,
relation=relation_map['element_of'],
treenode=parsed_response['treenode_id'],
class_instance=parsed_response['skeleton_id'])
neuron_skeleton_relation = ClassInstanceClassInstance.objects.filter(
project=self.test_project_id,
relation=relation_map['model_of'],
class_instance_a=parsed_response['skeleton_id'])
# FIXME: Log test doesn't work like this, because we don't have the
# neuron ID available
#neuron_log = Log.objects.filter(
# project=self.test_project_id,
# operation_type='create_neuron',
# freetext='Create neuron %s and skeleton %s' % (parsed_response['neuron_id'], parsed_response['skeleton_id']))
root = ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['root'])[0]
self.assertEqual(1, neuron_skeleton_relation.count())
#FIXME: These tests don't work like expected anymore
#self.assertEqual(1, neuron_log.count())
#self.assertEqual(1, treenode_skeleton_relation.count())
#neuron_log_location = neuron_log[0].location
#self.assertEqual(5, neuron_log_location.x)
#self.assertEqual(10, neuron_log_location.y)
#self.assertEqual(15, neuron_log_location.z)
def test_create_treenode_with_existing_neuron(self):
self.fake_authentication()
relation_map = get_relation_to_id_map(self.test_project_id)
class_map = get_class_to_id_map(self.test_project_id)
neuron_id = 2389
count_skeletons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['skeleton']).count()
count_treenodes = lambda: Treenode.objects.all().count()
treenode_count = count_treenodes()
skeleton_count = count_skeletons()
response = self.client.post('/%d/treenode/create' % self.test_project_id, {
'x': 5,
'y': 10,
'z': 15,
'confidence': 5,
'parent_id': -1,
'useneuron': neuron_id,
'radius': 2})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
self.assertTrue('treenode_id' in parsed_response)
self.assertTrue('skeleton_id' in parsed_response)
self.assertEqual(treenode_count + 1, count_treenodes())
self.assertEqual(skeleton_count + 1, count_skeletons())
treenode_skeleton_relation = TreenodeClassInstance.objects.filter(
project=self.test_project_id,
relation=relation_map['element_of'],
treenode=parsed_response['treenode_id'],
class_instance=parsed_response['skeleton_id'])
neuron_skeleton_relation = ClassInstanceClassInstance.objects.filter(
project=self.test_project_id,
relation=relation_map['model_of'],
class_instance_a=parsed_response['skeleton_id'],
class_instance_b=neuron_id)
# FIXME: treenode_skeleton_relation.count() should be 1, but we
# currently don't store these relations.
# See: https://github.com/catmaid/CATMAID/issues/754
self.assertEqual(0, treenode_skeleton_relation.count())
self.assertEqual(1, neuron_skeleton_relation.count())
def test_create_treenode_with_nonexisting_parent_failure(self):
self.fake_authentication()
parent_id = 555555
treenode_count = Treenode.objects.all().count()
relation_count = TreenodeClassInstance.objects.all().count()
response = self.client.post('/%d/treenode/create' % self.test_project_id, {
'x': 5,
'y': 10,
'z': 15,
'confidence': 5,
'parent_id': parent_id,
'radius': 2,
'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
expected_result = {'error': 'Parent treenode %d does not exist' % parent_id}
self.assertIn(expected_result['error'], parsed_response['error'])
self.assertEqual(treenode_count, Treenode.objects.all().count())
self.assertEqual(relation_count, TreenodeClassInstance.objects.all().count())
def test_update_treenode_parent(self):
self.fake_authentication()
skeleton_id = 373
treenode_id = 405
new_parent_id = 403
response = self.client.post(
'/%d/treenodes/%d/parent' % (self.test_project_id, treenode_id),
{'parent_id': new_parent_id, 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
response = self.client.post(
'/%d/%d/1/1/compact-skeleton' % (self.test_project_id, skeleton_id))
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
expected_response = [
[[377, None, 3, 7620.0, 2890.0, 0.0, -1.0, 5],
[403, 377, 3, 7840.0, 2380.0, 0.0, -1.0, 5],
[405, 403, 3, 7390.0, 3510.0, 0.0, -1.0, 5],
[407, 405, 3, 7080.0, 3960.0, 0.0, -1.0, 5],
[409, 407, 3, 6630.0, 4330.0, 0.0, -1.0, 5]],
[[377, 356, 1, 6730.0, 2700.0, 0.0],
[409, 421, 1, 6260.0, 3990.0, 0.0]],
{"uncertain end": [403]}]
self.assertItemsEqual(parsed_response[0], expected_response[0])
self.assertItemsEqual(parsed_response[1], expected_response[1])
self.assertEqual(parsed_response[2], expected_response[2])
def test_delete_root_treenode_with_children_failure(self):
self.fake_authentication()
treenode_id = 367
tn_count = Treenode.objects.all().count()
child_count = Treenode.objects.filter(parent=treenode_id).count()
response = self.client.post(
'/%d/treenode/delete' % self.test_project_id,
{'treenode_id': treenode_id, 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
expected_result = "Could not delete root node: You can't delete the " \
"root node when it has children."
self.assertEqual(expected_result, parsed_response['error'])
self.assertEqual(1, Treenode.objects.filter(id=treenode_id).count())
self.assertEqual(tn_count, Treenode.objects.all().count())
self.assertEqual(child_count, Treenode.objects.filter(parent=treenode_id).count())
def test_insert_treenoded_on_edge(self):
self.fake_authentication()
class_map = get_class_to_id_map(self.test_project_id)
count_treenodes = lambda: Treenode.objects.all().count()
count_skeletons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['skeleton']).count()
count_neurons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['neuron']).count()
treenode_count = count_treenodes()
skeleton_count = count_skeletons()
neuron_count = count_neurons()
# Get two nodes and calculate point between them
child_id = 2374
parent_id = 2372
child = Treenode.objects.get(pk=child_id)
parent = Treenode.objects.get(pk=parent_id)
new_node_x = 0.5 * (child.location_x + parent.location_x)
new_node_y = 0.5 * (child.location_y + parent.location_y)
new_node_z = 0.5 * (child.location_z + parent.location_z)
response = self.client.post('/%d/treenode/insert' % self.test_project_id, {
'x': new_node_x,
'y': new_node_y,
'z': new_node_z,
'child_id': child_id,
'parent_id': parent_id,
'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
self.assertTrue('treenode_id' in parsed_response)
self.assertTrue('skeleton_id' in parsed_response)
self.assertEqual(treenode_count + 1, count_treenodes())
self.assertEqual(skeleton_count, count_skeletons())
self.assertEqual(neuron_count, count_neurons())
new_node_id = parsed_response['treenode_id']
new_node = Treenode.objects.get(pk=new_node_id)
child = Treenode.objects.get(pk=child_id)
self.assertEqual(new_node.parent_id, parent_id)
self.assertEqual(child.parent_id, new_node_id)
self.assertEqual(new_node.user, child.user)
self.assertEqual(new_node.creation_time, child.creation_time)
self.assertEqual(new_node.skeleton_id, child.skeleton_id)
self.assertEqual(new_node.location_x, new_node_x)
self.assertEqual(new_node.location_y, new_node_y)
self.assertEqual(new_node.location_z, new_node_z)
def test_insert_treenoded_not_on_edge_with_permission(self):
self.fake_authentication()
class_map = get_class_to_id_map(self.test_project_id)
count_treenodes = lambda: Treenode.objects.all().count()
count_skeletons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['skeleton']).count()
count_neurons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['neuron']).count()
treenode_count = count_treenodes()
skeleton_count = count_skeletons()
neuron_count = count_neurons()
# Get two nodes and calculate point between them
child_id = 2374
parent_id = 2372
child = Treenode.objects.get(pk=child_id)
parent = Treenode.objects.get(pk=parent_id)
new_node_x = 0.5 * (child.location_x + parent.location_x)
new_node_y = 0.5 * (child.location_y + parent.location_y) + 10
new_node_z = 0.5 * (child.location_z + parent.location_z)
# Try to insert with a slight distorition in Y. This is allowed if the
# user has permission to edit the neuron.
response = self.client.post('/%d/treenode/insert' % self.test_project_id, {
'x': new_node_x,
'y': new_node_y,
'z': new_node_z,
'child_id': child_id,
'parent_id': parent_id,
'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
self.assertTrue('treenode_id' in parsed_response)
self.assertTrue('skeleton_id' in parsed_response)
self.assertEqual(treenode_count + 1, count_treenodes())
self.assertEqual(skeleton_count, count_skeletons())
self.assertEqual(neuron_count, count_neurons())
new_node_id = parsed_response['treenode_id']
new_node = Treenode.objects.get(pk=new_node_id)
child = Treenode.objects.get(pk=child_id)
self.assertEqual(new_node.parent_id, parent_id)
self.assertEqual(child.parent_id, new_node_id)
self.assertEqual(new_node.user, child.user)
self.assertEqual(new_node.creation_time, child.creation_time)
self.assertEqual(new_node.skeleton_id, child.skeleton_id)
self.assertEqual(new_node.location_x, new_node_x)
self.assertEqual(new_node.location_y, new_node_y)
self.assertEqual(new_node.location_z, new_node_z)
def test_insert_treenoded_not_on_edge_without_permission(self):
self.fake_authentication(username='test0')
class_map = get_class_to_id_map(self.test_project_id)
count_treenodes = lambda: Treenode.objects.all().count()
count_skeletons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['skeleton']).count()
count_neurons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['neuron']).count()
treenode_count = count_treenodes()
skeleton_count = count_skeletons()
neuron_count = count_neurons()
# Get two nodes and calculate point between them
child_id = 2374
parent_id = 2372
child = Treenode.objects.get(pk=child_id)
parent = Treenode.objects.get(pk=parent_id)
# Set chld and parent to different creators and lock it
owner = User.objects.get(username='admin')
for n in (child, parent):
n.creator = owner
n.save()
new_node_x = 0.5 * (child.location_x + parent.location_x)
new_node_y = 0.5 * (child.location_y + parent.location_y) + 10
new_node_z = 0.5 * (child.location_z + parent.location_z)
# Try to insert with a slight distorition in Y. This should fail since
# the new node would introduce a structural change to the skeleton.
response = self.client.post('/%d/treenode/insert' % self.test_project_id, {
'x': new_node_x,
'y': new_node_y,
'z': new_node_z,
'child_id': child_id,
'parent_id': parent_id})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
self.assertTrue('error' in parsed_response)
self.assertEqual(treenode_count, count_treenodes())
self.assertEqual(skeleton_count, count_skeletons())
self.assertEqual(neuron_count, count_neurons())
def test_insert_treenoded_no_child_parent(self):
self.fake_authentication()
class_map = get_class_to_id_map(self.test_project_id)
count_treenodes = lambda: Treenode.objects.all().count()
count_skeletons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['skeleton']).count()
count_neurons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['neuron']).count()
treenode_count = count_treenodes()
skeleton_count = count_skeletons()
neuron_count = count_neurons()
# Get two nodes and calculate point between them
child_id = 2376
parent_id = 2372
child = Treenode.objects.get(pk=child_id)
parent = Treenode.objects.get(pk=parent_id)
new_node_x = 0.5 * (child.location_x + parent.location_x)
new_node_y = 0.5 * (child.location_y + parent.location_y)
new_node_z = 0.5 * (child.location_z + parent.location_z)
# Try to insert with a slight distorition in Y
response = self.client.post('/%d/treenode/insert' % self.test_project_id, {
'x': new_node_x,
'y': new_node_y,
'z': new_node_z,
'child_id': child_id,
'parent_id': parent_id})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
self.assertTrue('error' in parsed_response)
self.assertEqual(treenode_count, count_treenodes())
self.assertEqual(skeleton_count, count_skeletons())
self.assertEqual(neuron_count, count_neurons())
def test_delete_non_root_non_parent_treenode(self):
self.fake_authentication()
treenode_id = 349
tn_count = Treenode.objects.all().count()
response = self.client.post(
'/%d/treenode/delete' % self.test_project_id,
{'treenode_id': treenode_id, 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
expected_result = 'Removed treenode successfully.'
self.assertEqual(expected_result, parsed_response['success'])
self.assertEqual(0, Treenode.objects.filter(id=treenode_id).count())
self.assertEqual(tn_count - 1, Treenode.objects.all().count())
def test_delete_root_treenode(self):
self.fake_authentication()
treenode_id = 2437
treenode = Treenode.objects.filter(id=treenode_id)[0]
children = Treenode.objects.filter(parent=treenode_id)
self.assertEqual(0, children.count())
self.assertEqual(None, treenode.parent)
tn_count = Treenode.objects.all().count()
response = self.client.post(
'/%d/treenode/delete' % self.test_project_id,
{'treenode_id': treenode_id, 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
expected_result = {
'success': 'Removed treenode successfully.',
'parent_id': None,
'deleted_neuron': True,
'skeleton_id': 2433,
'children': [],
'confidence': 5,
'radius': -1.0,
'links': [],
'x': 5290.0,
'y': 3930.0,
'z': 279.0
}
self.assertEqual(expected_result, parsed_response)
self.assertEqual(0, Treenode.objects.filter(id=treenode_id).count())
self.assertEqual(tn_count - 1, Treenode.objects.all().count())
def test_delete_non_root_treenode(self):
self.fake_authentication()
treenode_id = 265
relation_map = get_relation_to_id_map(self.test_project_id)
get_skeleton = lambda: TreenodeClassInstance.objects.filter(
project=self.test_project_id,
relation=relation_map['element_of'],
treenode=treenode_id)
self.assertEqual(1, get_skeleton().count())
children = Treenode.objects.filter(parent=treenode_id)
self.assertTrue(children.count() > 0)
tn_count = Treenode.objects.all().count()
parent = get_object_or_404(Treenode, id=treenode_id).parent
response = self.client.post(
'/%d/treenode/delete' % self.test_project_id,
{'treenode_id': treenode_id, 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
expected_result = 'Removed treenode successfully.'
self.assertEqual(expected_result, parsed_response['success'])
self.assertEqual(0, Treenode.objects.filter(id=treenode_id).count())
self.assertEqual(0, get_skeleton().count())
self.assertEqual(tn_count - 1, Treenode.objects.all().count())
for child in children:
child_after_change = get_object_or_404(Treenode, id=child.id)
self.assertEqual(parent, child_after_change.parent)
def test_treenode_info_nonexisting_treenode_failure(self):
self.fake_authentication()
treenode_id = 55555
response = self.client.post(
'/%d/treenodes/%s/info' % (self.test_project_id, treenode_id))
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
expected_result = 'No skeleton and neuron for treenode %s' % treenode_id
self.assertIn('error', parsed_response)
self.assertEqual(expected_result, parsed_response['error'])
def test_treenode_info(self):
self.fake_authentication()
treenode_id = 239
response = self.client.post(
'/%d/treenodes/%s/info' % (self.test_project_id, treenode_id))
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
expected_result = {'skeleton_id': 235, 'neuron_id': 233, 'skeleton_name': 'skeleton 235', 'neuron_name': 'branched neuron'}
self.assertEqual(expected_result, parsed_response)
def assertTreenodeHasRadius(self, treenode_id, radius):
"""Helper function for radius update tests."""
treenode = Treenode.objects.get(id=treenode_id)
self.assertEqual(radius, treenode.radius,
'Treenode %d has radius %s not %s' % (treenode_id, treenode.radius, radius))
def test_update_treenode_radius_single_node(self):
self.fake_authentication()
treenode_id = 257
new_r = 5
old_r = -1
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 0, 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
expected = [(259, old_r), (257, new_r), (255, old_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
def test_update_treenode_radius_next_branch(self):
self.fake_authentication()
# Test to end node
treenode_id = 257
new_r = 5
old_r = -1
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 1, 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
expected = [(261, new_r), (259, new_r), (257, new_r),
(255, old_r), (253, old_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
# Test to branch node
treenode_id = 263
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 1, 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
expected = [(253, old_r), (263, new_r), (265, new_r),
(269, old_r), (267, old_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
def test_update_treenode_radius_prev_branch(self):
self.fake_authentication()
# Test to branch node
treenode_id = 257
new_r = 5
old_r = -1
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 2, 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
expected = [(261, old_r), (259, old_r), (257, new_r),
(255, new_r), (253, old_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
# Test to root node
treenode_id = 253
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 2, 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
expected = [(255, new_r), (263, old_r), (253, new_r),
(251, new_r), (249, new_r), (247, new_r),
(247, new_r), (245, new_r), (243, new_r),
(241, new_r), (239, new_r), (237, old_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
def test_update_treenode_radius_prev_defined_node(self):
self.fake_authentication()
# Set radius at ancestor node
ancestor = Treenode.objects.get(id=251)
ancestor.radius = 7
ancestor.save()
# Test to previous defined node
treenode_id = 257
new_r = 5
old_r = -1
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 3, 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
expected = [(261, old_r), (259, old_r), (257, new_r),
(255, new_r), (253, new_r), (251, 7)]
# Test on node with defined radius (and propagation to root)
treenode_id = ancestor.id
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 3, 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
expected = [(253, new_r), (251, new_r), (249, new_r),
(247, new_r), (247, new_r), (245, new_r),
(243, new_r), (241, new_r), (239, new_r),
(237, new_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
def test_update_treenode_radius_to_root(self):
self.fake_authentication()
treenode_id = 257
new_r = 5
old_r = -1
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 4, 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
expected = [(261, old_r), (259, old_r), (257, new_r),
(255, new_r), (253, new_r), (263, old_r),
(251, new_r), (249, new_r), (247, new_r),
(247, new_r), (245, new_r), (243, new_r),
(241, new_r), (239, new_r), (237, new_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
def test_update_treenode_radius_all_nodes(self):
self.fake_authentication()
treenode_id = 2417
new_r = 5.0
old_r = -1.0
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 5, 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
expected_response = {
'success': True,
'new_radius': new_r,
'updated_nodes': {
'2415': {'edition_time': u'2016-04-08T15:33:16.133Z',
'new': 5.0,
'old': -1.0,
'skeleton_id': 2411},
'2417': {'edition_time': u'2016-04-08T15:33:16.133Z',
'new': 5.0,
'old': -1.0,
'skeleton_id': 2411},
'2419': {'edition_time': u'2016-04-08T15:33:16.133Z',
'new': 5.0,
'old': -1.0,
'skeleton_id': 2411},
'2423': {'edition_time': u'2016-04-08T15:33:16.133Z',
'new': 5.0,
'old': -1.0,
'skeleton_id': 2411}}
}
# The response has updated timetamps (since we updated nodes), we have
# to compare fields manually to ignore them
for k,v in expected_response.iteritems():
self.assertIn(k, parsed_response)
if 'updated_nodes' == k:
continue
self.assertEqual(v, parsed_response.get(k))
for k,v in expected_response['updated_nodes'].iteritems():
self.assertIn(k, parsed_response['updated_nodes'])
result_node = parsed_response['updated_nodes'][k]
for p,pv in v.iteritems():
self.assertIn(p, result_node)
result_value = result_node.get(p)
if 'edition_time' == p:
# Changes through the updated, and the test can't know the
# value, but only check if it changed
self.assertNotEqual(pv, result_value)
else:
self.assertEqual(pv, result_value)
# Don't expect any more items than the above:
self.assertEqual(len(expected_response['updated_nodes']),
len(parsed_response['updated_nodes']))
expected = [(2419, new_r), (2417, new_r), (2415, new_r), (2423, new_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
def test_node_find_previous_branch(self):
self.fake_authentication()
treenode_id = 257
response = self.client.post(
'/%d/treenodes/%d/previous-branch-or-root' % (self.test_project_id, treenode_id),
{'alt': 0})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
# Response should contain one branch.
expected_result = [253, 3685.0, 2160.0, 0.0]
self.assertEqual(expected_result, parsed_response)
treenode_id = 253
response = self.client.post(
'/%d/treenodes/%d/previous-branch-or-root' % (self.test_project_id, treenode_id),
{'alt': 0})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
# Response should contain one branch.
expected_result = [237, 1065.0, 3035.0, 0.0]
self.assertEqual(expected_result, parsed_response)
treenode_id = 237
response = self.client.post(
'/%d/treenodes/%d/previous-branch-or-root' % (self.test_project_id, treenode_id),
{'alt': 0})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
# Response should contain one branch.
expected_result = [237, 1065.0, 3035.0, 0.0]
self.assertEqual(expected_result, parsed_response)
def test_node_find_next_branch(self):
self.fake_authentication()
treenode_id = 391
response = self.client.post(
'/%d/treenodes/%d/next-branch-or-end' % (self.test_project_id, treenode_id))
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
# Response should contain one branch.
expected_result = [[[393, 6910.0, 990.0, 0.0],
[393, 6910.0, 990.0, 0.0],
[399, 5670.0, 640.0, 0.0]]]
self.assertEqual(expected_result, parsed_response)
treenode_id = 253
response = self.client.post(
'/%d/treenodes/%d/next-branch-or-end' % (self.test_project_id, treenode_id))
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
# Response should contain two branches, and the larger branch headed by
# node 263 should be first.
expected_result = [[[263, 3915.0, 2105.0, 0.0],
[263, 3915.0, 2105.0, 0.0],
[265, 4570.0, 2125.0, 0.0]],
[[255, 3850.0, 1790.0, 0.0],
[255, 3850.0, 1790.0, 0.0],
[261, 2820.0, 1345.0, 0.0]]]
self.assertEqual(expected_result, parsed_response)
def test_treenode_find_children(self):
self.fake_authentication()
treenode_id = 387
response = self.client.post(
'/%d/treenodes/%d/children' % (self.test_project_id, treenode_id))
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
expected_result = []
self.assertEqual(expected_result, parsed_response)
treenode_id = 385
response = self.client.post(
'/%d/treenodes/%d/children' % (self.test_project_id, treenode_id))
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
expected_result = [[[387, 9030.0, 1480.0, 0.0]]]
self.assertEqual(expected_result, parsed_response)
treenode_id = 367
response = self.client.post(
'/%d/treenodes/%d/children' % (self.test_project_id, treenode_id))
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
expected_result = [[383, 7850.0, 1970.0, 0.0], [391, 6740.0, 1530.0, 0.0]]
parsed_response = [p[0] for p in parsed_response]
for (expected, parsed) in zip(sorted(expected_result), sorted(parsed_response)):
self.assertEqual(expected, parsed)
def test_suppressed_virtual_nodes(self):
self.fake_authentication()
response = self.client.post(
'/%d/treenode/create' % (self.test_project_id, ),
{'x': 1,
'y': -1,
'z': 0})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
parent_id = parsed_response['treenode_id']
skeleton_id = parsed_response['skeleton_id']
response = self.client.post(
'/%d/treenode/create' % (self.test_project_id, ),
{'x': 3,
'y': -3,
'z': 2,
'parent_id': parent_id,
'state': make_nocheck_state()})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
child_id = parsed_response['treenode_id']
# Initially no nodes should be supppressed
response = self.client.get(
'/%d/treenodes/%d/suppressed-virtual/' % (self.test_project_id, child_id))
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
expected_result = []
self.assertEqual(expected_result, parsed_response)
# Reject attempt to suppress root node
response = self.client.post(
'/%d/treenodes/%d/suppressed-virtual/' % (self.test_project_id, parent_id),
{'location_coordinate': 1,
'orientation': 0})
self.assertEqual(response.status_code, 400)
# Reject coordinate outside edge
response = self.client.post(
'/%d/treenodes/%d/suppressed-virtual/' % (self.test_project_id, child_id),
{'location_coordinate': 4,
'orientation': 0})
self.assertEqual(response.status_code, 400)
# Create virtual node
response = self.client.post(
'/%d/treenodes/%d/suppressed-virtual/' % (self.test_project_id, child_id),
{'location_coordinate': 2,
'orientation': 0})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
suppressed_id = parsed_response['id']
# Delete virtual node
response = self.client.delete(
'/%d/treenodes/%d/suppressed-virtual/%d' % (self.test_project_id, child_id, suppressed_id))
self.assertEqual(response.status_code, 204)
def test_list_treenode_table_simple(self):
self.fake_authentication()
response = self.client.post(
'/%d/treenode/table/%d/content' % (self.test_project_id, 235))
self.assertEqual(response.status_code, 200)
expected_result = [[
[417, 415, 5, 4990.0, 4200.0, 0.0, -1.0, 3, 1323093096.0],
[415, 289, 5, 5810.0, 3950.0, 0.0, -1.0, 3, 1323093096.0],
[289, 285, 5, 6210.0, 3480.0, 0.0, -1.0, 3, 1320587496.0],
[285, 283, 5, 6100.0, 2980.0, 0.0, -1.0, 3, 1323006696.0],
[283, 281, 5, 5985.0, 2745.0, 0.0, -1.0, 3, 1323957096.0],
[281, 279, 5, 5675.0, 2635.0, 0.0, -1.0, 3, 1323093096.0],
[279, 267, 5, 5530.0, 2465.0, 0.0, -1.0, 3, 1323093096.0],
[277, 275, 5, 6090.0, 1550.0, 0.0, -1.0, 3, 1323093096.0],
[275, 273, 5, 5800.0, 1560.0, 0.0, -1.0, 3, 1323093096.0],
[273, 271, 5, 5265.0, 1610.0, 0.0, -1.0, 3, 1323093096.0],
[271, 269, 5, 5090.0, 1675.0, 0.0, -1.0, 3, 1323093096.0],
[269, 265, 5, 4820.0, 1900.0, 0.0, -1.0, 3, 1323093096.0],
[267, 265, 5, 5400.0, 2200.0, 0.0, -1.0, 3, 1323093096.0],
[265, 263, 5, 4570.0, 2125.0, 0.0, -1.0, 3, 1323093096.0],
[263, 253, 5, 3915.0, 2105.0, 0.0, -1.0, 3, 1323093096.0],
[261, 259, 5, 2820.0, 1345.0, 0.0, -1.0, 3, 1323093096.0],
[259, 257, 5, 3445.0, 1385.0, 0.0, -1.0, 3, 1323093096.0],
[257, 255, 5, 3825.0, 1480.0, 0.0, -1.0, 3, 1323093096.0],
[255, 253, 5, 3850.0, 1790.0, 0.0, -1.0, 3, 1323093096.0],
[253, 251, 5, 3685.0, 2160.0, 0.0, -1.0, 3, 1323093096.0],
[251, 249, 5, 3380.0, 2330.0, 0.0, -1.0, 3, 1323093096.0],
[249, 247, 5, 2815.0, 2590.0, 0.0, -1.0, 3, 1323093096.0],
[247, 245, 5, 2610.0, 2700.0, 0.0, -1.0, 3, 1323093096.0],
[245, 243, 5, 1970.0, 2595.0, 0.0, -1.0, 3, 1323093096.0],
[243, 241, 5, 1780.0, 2570.0, 0.0, -1.0, 3, 1323093096.0],
[241, 239, 5, 1340.0, 2660.0, 0.0, -1.0, 3, 1323093096.0],
[239, 237, 5, 1135.0, 2800.0, 0.0, -1.0, 3, 1323093096.0],
[237, None, 5, 1065.0, 3035.0, 0.0, -1.0, 3, 1323093096.0]],
[], [[261, 'TODO']]]
parsed_response = json.loads(response.content)
# Check each aaData row instead of everything at once for more granular
# error reporting. Don't expext the same ordering.
for (expected, parsed) in zip(sorted(expected_result[0]), sorted(parsed_response[0])):
self.assertEqual(expected, parsed)
self.assertEqual(expected_result[1], parsed_response[1])
self.assertEqual(expected_result[2], parsed_response[2])
| gpl-3.0 | 4,589,188,642,307,267,000 | 43.33795 | 131 | 0.579887 | false |
mnestis/advent2015 | 18/part1.py | 1 | 1208 | #!/usr/bin/python
import numpy as np
import re
import itertools
def animated_lights(input_string):
other_chars = re.compile("[^#\.]")
lights = []
for row in input_string.split("\n"):
if row == "":
continue
row = other_chars.sub("", row)
row = row.replace("#", "1")
row = row.replace(".", "0")
lights.append(map(lambda x: int(x), row))
lights = np.array(lights, dtype=int)
for i in range(100):
lights = step_lights(lights)
return np.sum(lights)
def step_lights(lights):
next_lights = np.empty(lights.shape, dtype=int)
for i, j in itertools.product(range(lights.shape[0]), range(lights.shape[1])):
x0 = max(i-1, 0)
x1 = min(i+2, lights.shape[0])
y0 = max(j-1, 0)
y1 = min(j+2, lights.shape[1])
neighbourhood = np.sum(lights[x0:x1, y0:y1])
if lights[i,j] == 1:
next_lights[i,j] = 1 if neighbourhood == 3 or neighbourhood == 4 else 0
else:
next_lights[i,j] = 1 if neighbourhood == 3 else 0
return next_lights
if __name__=="__main__":
input_string = open("input.txt").read()
print animated_lights(input_string)
| mit | -8,158,571,688,017,475,000 | 22.230769 | 83 | 0.56043 | false |
mozilla-metrics/socorro-toolbox | src/main/python/checkimprovedskiplist.py | 1 | 1764 | import sys, os
file, = sys.argv[1:]
oldsignaturemap = {}
newsignaturemap = {}
for line in open(file):
line = line.rstrip('\n')
try:
oldsignature, newsignature, count, example = line.split('\t')
except ValueError:
print >>sys.stderr, "Questionable line: %r" % (line,)
continue
count = int(count)
t = count, example
oldsignaturemap.setdefault(oldsignature, {})[newsignature] = t
newsignaturemap.setdefault(newsignature, {})[oldsignature] = t
print "Signature generation report: %s" % (file,)
print
print "******"
print
print "Mappings of current signatures to new signatures"
print
items = filter(lambda i: i[0] > 5,
((sum(count for newsignature, (count, example) in newsignatures.iteritems()),
oldsignature,
newsignatures)
for oldsignature, newsignatures in oldsignaturemap.iteritems()))
items.sort(key=lambda i: i[0])
for totalcount, oldsignature, newsignatures in items:
if len(newsignatures) == 1:
newsignature, (count, example) = newsignatures.items()[0]
print "'%s' always maps to '%s' (%i : %s)" % (oldsignature, newsignature, count, example)
else:
print "'%s' maps to multiple new signatures:" % (oldsignature,)
for newsignature, (count, example) in newsignatures.items():
print " '%s' (%i : %s)" % (newsignature, count, example)
print
print "******"
print
print "New signatures which combine several old signatures"
print
for newsignature, oldsignatures in newsignaturemap.iteritems():
if len(oldsignatures) == 1: continue
print "'%s' combines multiple old signatures:" % (newsignature,)
for oldsignature, (count, example) in oldsignatures.items():
print " '%s' (%i : %s)" % (oldsignature, count, example)
| apache-2.0 | -5,036,359,242,503,436,000 | 29.947368 | 97 | 0.663265 | false |
hjoliver/cylc | tests/unit/test_task_id.py | 1 | 2507 | # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from cylc.flow.task_id import TaskID
class TestTaskId(unittest.TestCase):
def test_get(self):
self.assertEqual("a.1", TaskID.get("a", 1))
self.assertEqual("a._1", TaskID.get("a", "_1"))
self.assertEqual(
"WTASK.20101010T101010", TaskID.get("WTASK", "20101010T101010"))
def test_split(self):
self.assertEqual(["a", '1'], TaskID.split("a.1"))
self.assertEqual(["a", '_1'], TaskID.split("a._1"))
self.assertEqual(
["WTAS", '20101010T101010'], TaskID.split("WTAS.20101010T101010"))
def test_is_valid_name(self):
for name in [
"abc", "123", "____", "_", "a_b", "a_1", "1_b", "ABC"
]:
self.assertTrue(TaskID.is_valid_name(name))
for name in [
"a.1", None, "%abc", "", " "
]:
self.assertFalse(TaskID.is_valid_name(name))
def test_is_valid_id(self):
for id1 in [
"a.1", "_.098098439535$#%#@!#~"
]:
self.assertTrue(TaskID.is_valid_id(id1))
for id2 in [
"abc", "123", "____", "_", "a_b", "a_1", "1_b", "ABC", "a.A A"
]:
self.assertFalse(TaskID.is_valid_id(id2))
def test_is_valid_id_2(self):
# TBD: a.A A is invalid for valid_id, but valid for valid_id_2?
# TBD: a/a.a is OK?
for id1 in [
"a.1", "_.098098439535$#%#@!#~", "a/1", "_/098098439535$#%#@!#~",
"a.A A", "a/a.a"
]:
self.assertTrue(TaskID.is_valid_id_2(id1))
for id2 in [
"abc", "123", "____", "_", "a_b", "a_1", "1_b", "ABC"
]:
self.assertFalse(TaskID.is_valid_id_2(id2))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,325,459,664,491,436,500 | 34.309859 | 78 | 0.564819 | false |
arseneyr/essentia | src/examples/python/streaming_extractor/highlevel.py | 1 | 4041 | #!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import essentia
import essentia.standard as standard
import essentia.streaming as streaming
import numpy
from postprocess import postProcess
def tonalPoolCleaning(pool, namespace=None):
tonalspace = 'tonal.'
if namespace: tonalspace = namespace + '.tonal.'
tuningFreq = pool[tonalspace + 'tuning_frequency'][-1]
pool.remove(tonalspace + 'tuning_frequency')
pool.set(tonalspace + 'tuning_frequency', tuningFreq)
pool.remove(tonalspace + 'hpcp_highres')
def normalize(array):
max = numpy.max(array)
return [float(val)/float(max) for val in array]
def tuningSystemFeatures(pool, namespace=''):
# expects tonal descriptors and tuning features to be in pool
tonalspace = 'tonal.'
if namespace: tonalspace = namespace + '.tonal.'
# 1-diatonic strength
hpcp_highres = normalize(numpy.mean(pool[tonalspace + 'hpcp_highres'], 0))
key,scale,strength,_ = standard.Key(profileType='diatonic')(hpcp_highres)
pool.set(tonalspace + 'tuning_diatonic_strength', strength)
# 2- high resolution features
eqTempDeviation, ntEnergy,_ = standard.HighResolutionFeatures()(hpcp_highres)
pool.set(tonalspace+'tuning_equal_tempered_deviation', eqTempDeviation)
pool.set(tonalspace+'tuning_nontempered_energy_ratio', ntEnergy)
# 3- THPCP
hpcp = normalize(numpy.mean(pool[tonalspace + 'hpcp'], 0))
hpcp_copy = hpcp[:]
idx = numpy.argmax(hpcp)
offset = len(hpcp)-idx
hpcp[:offset] = hpcp_copy[idx:offset+idx]
hpcp[offset:offset+idx] = hpcp_copy[0:idx]
pool.set(tonalspace+'thpcp', essentia.array(hpcp))
def sfxPitch(pool, namespace=''):
sfxspace = 'sfx.'
llspace = 'lowlevel.'
if namespace:
sfxspace = namespace + '.sfx.'
llspace = namespace + '.lowlevel.'
pitch = pool[llspace+'pitch']
gen = streaming.VectorInput(pitch)
maxtt = streaming.MaxToTotal()
mintt = streaming.MinToTotal()
amt = streaming.AfterMaxToBeforeMaxEnergyRatio()
gen.data >> maxtt.envelope
gen.data >> mintt.envelope
gen.data >> amt.pitch
maxtt.maxToTotal >> (pool, sfxspace+'pitch_max_to_total')
mintt.minToTotal >> (pool, sfxspace+'pitch_min_to_total')
amt.afterMaxToBeforeMaxEnergyRatio >> (pool, sfxspace+'pitch_after_max_to_before_max_energy_ratio')
essentia.run(gen)
pc = standard.Centroid(range=len(pitch)-1)(pitch)
pool.set(sfxspace+'pitch_centroid', pc)
def compute(pool, namespace=''):
# 5th pass: High-level descriptors that depend on others, but we
# don't need to stream the audio anymore
# Average Level
from level import levelAverage
levelAverage(pool, namespace)
# SFX Descriptors
sfxPitch(pool, namespace)
# Tuning System Features
tuningSystemFeatures(pool, namespace)
# Pool Cleaning (removing temporary descriptors):
tonalPoolCleaning(pool, namespace)
# Add missing descriptors which are not computed yet, but will be for the
# final release or during the 1.x cycle. However, the schema need to be
# complete before that, so just put default values for these.
postProcess(pool, namespace)
| agpl-3.0 | -1,231,980,796,570,652,200 | 34.080357 | 103 | 0.690423 | false |
sepol/bp-neural-net | python/runner.py | 1 | 1182 | import sys
import numpy as np
from neuralNet import neuralNet
with open('input.txt') as f:
inputs = []
for line in f:
line = line.split()
if line:
line = [float(i) for i in line]
inputs.append(line)
with open('output.txt') as f:
outputs = []
for line in f:
line = line.split()
if line:
line = [int(i) for i in line]
outputs.append(line)
input = np.array(inputs)
output = np.array(outputs)
nn = neuralNet(400,30,10)
# Training
# ---
# Batch training
nn.trainBatch(input,output,20)
# Live training
#tests = np.size(input,0)
#acc = 0
#for i in xrange(0, tests):
# if (np.argmax(nn.trainLive(input[[i],:],output[i,0])) == output[i,0]):
# acc = acc + 1
#acc = acc / float(tests) * 100
#print("Live training accuracy: %f" % (acc))
# Save/Load
# ---
# Saving weights
#nn.saveWeights('saved.txt')
# Loading weights
#nn.loadWeights('saved.txt')
print("Value: %d, Result: %d" % (output[20,0],nn.classify(input[[20],:])))
print("Value: %d, Result: %d" % (output[300,0],nn.classify(input[[300],:])))
print("Value: %d, Result: %d" % (output[2500,0],nn.classify(input[[2500],:])))
print("Value: %d, Result: %d" % (output[4800,0],nn.classify(input[[4800],:])))
| mit | 2,482,796,611,280,662,500 | 21.301887 | 78 | 0.63198 | false |
idlesign/django-sitemessage | sitemessage/tests/test_messengers.py | 1 | 12035 | import pytest
from sitemessage.messengers.base import MessengerBase
from sitemessage.models import Subscription, DispatchError
from sitemessage.toolbox import recipients, schedule_messages, send_scheduled_messages
from sitemessage.utils import get_registered_messenger_objects
from .testapp.sitemessages import (
WONDERLAND_DOMAIN, MessagePlainForTest, MessengerForTest, BuggyMessenger,
messenger_fb,
messenger_smtp,
messenger_telegram,
messenger_twitter,
messenger_vk,
messenger_xmpp,
)
def test_init_params():
messengers = get_registered_messenger_objects()
my = messengers['test_messenger']
assert my.login == 'mylogin'
assert my.password == 'mypassword'
def test_alias():
messenger = type('MyMessenger', (MessengerBase,), {'alias': 'myalias'})
assert messenger.get_alias() == 'myalias'
messenger = type('MyMessenger', (MessengerBase,), {})
assert messenger.get_alias() == 'MyMessenger'
def test_get_recipients_data(user_create):
user = user_create(attributes=dict(username='myuser'))
to = ['gogi', 'givi', user]
r1 = MessengerForTest.structure_recipients_data(to)
assert len(r1) == len(to)
assert r1[0].address == f'gogi{WONDERLAND_DOMAIN}'
assert r1[0].messenger == 'test_messenger'
assert r1[1].address == f'givi{WONDERLAND_DOMAIN}'
assert r1[1].messenger == 'test_messenger'
assert r1[2].address == f'user_myuser{WONDERLAND_DOMAIN}'
assert r1[2].messenger == 'test_messenger'
def test_recipients():
r = MessagePlainForTest.recipients('smtp', 'someone')
assert len(r) == 1
assert r[0].address == 'someone'
def test_send():
m = MessengerForTest('l', 'p')
m.send('message_cls', 'message_model', 'dispatch_models')
assert m.last_send['message_cls'] == 'message_cls'
assert m.last_send['message_model'] == 'message_model'
assert m.last_send['dispatch_models'] == 'dispatch_models'
m = BuggyMessenger()
recipiets_ = recipients('test_messenger', ['a', 'b', 'c', 'd'])
with pytest.raises(Exception):
m.send('a buggy message', '', recipiets_)
def test_subscription(user_create):
user1 = user_create(attributes=dict(username='first'))
user2 = user_create(attributes=dict(username='second'))
user2.is_active = False
user2.save()
Subscription.create(user1.id, MessagePlainForTest, MessengerForTest)
Subscription.create(user2.id, MessagePlainForTest, MessengerForTest)
assert len(MessagePlainForTest.get_subscribers(active_only=False)) == 2
assert len(MessagePlainForTest.get_subscribers(active_only=True)) == 1
def assert_called_n(func, n=1):
assert func.call_count == n
func.call_count = 0
def test_exception_propagation(monkeypatch):
schedule_messages('text', recipients('telegram', ''))
schedule_messages('text', recipients('telegram', ''))
def new_method(*args, **kwargs):
raise Exception('telegram beforesend failed')
monkeypatch.setattr(messenger_telegram, 'before_send', new_method)
send_scheduled_messages()
errors = list(DispatchError.objects.all())
assert len(errors) == 2
assert errors[0].error_log == 'telegram beforesend failed'
assert errors[1].error_log == 'telegram beforesend failed'
class TestSMTPMessenger:
def setup_method(self, method):
messenger_smtp.smtp.sendmail.call_count = 0
def test_get_address(self):
r = object()
assert messenger_smtp.get_address(r) == r
r = type('r', (object,), dict(email='somewhere'))
assert messenger_smtp.get_address(r) == 'somewhere'
def test_send(self):
schedule_messages('text', recipients('smtp', 'someone'))
send_scheduled_messages()
assert_called_n(messenger_smtp.smtp.sendmail)
def test_send_fail(self):
schedule_messages('text', recipients('smtp', 'someone'))
def new_method(*args, **kwargs):
raise Exception('smtp failed')
old_method = messenger_smtp.smtp.sendmail
messenger_smtp.smtp.sendmail = new_method
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'smtp failed'
assert errors[0].dispatch.address == 'someone'
finally:
messenger_smtp.smtp.sendmail = old_method
def test_send_test_message(self):
messenger_smtp.send_test_message('someone', 'sometext')
assert_called_n(messenger_smtp.smtp.sendmail)
class TestTwitterMessenger:
def test_get_address(self):
r = object()
assert messenger_twitter.get_address(r) == r
r = type('r', (object,), dict(twitter='somewhere'))
assert messenger_twitter.get_address(r) == 'somewhere'
def test_send(self):
schedule_messages('text', recipients('twitter', 'someone'))
send_scheduled_messages()
messenger_twitter.api.statuses.update.assert_called_with(status='@someone text')
def test_send_test_message(self):
messenger_twitter.send_test_message('someone', 'sometext')
messenger_twitter.api.statuses.update.assert_called_with(status='@someone sometext')
messenger_twitter.send_test_message('', 'sometext')
messenger_twitter.api.statuses.update.assert_called_with(status='sometext')
def test_send_fail(self):
schedule_messages('text', recipients('twitter', 'someone'))
def new_method(*args, **kwargs):
raise Exception('tweet failed')
old_method = messenger_twitter.api.statuses.update
messenger_twitter.api.statuses.update = new_method
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'tweet failed'
assert errors[0].dispatch.address == 'someone'
finally:
messenger_twitter.api.statuses.update = old_method
class TestXMPPSleekMessenger:
def test_get_address(self):
r = object()
assert messenger_xmpp.get_address(r) == r
r = type('r', (object,), dict(jabber='somewhere'))
assert messenger_xmpp.get_address(r) == 'somewhere'
def test_send(self):
schedule_messages('text', recipients('xmppsleek', 'someone'))
send_scheduled_messages()
messenger_xmpp.xmpp.send_message.assert_called_once_with(
mtype='chat', mbody='text', mfrom='somjid', mto='someone'
)
def test_send_test_message(self):
messenger_xmpp.send_test_message('someone', 'sometext')
messenger_xmpp.xmpp.send_message.assert_called_with(
mtype='chat', mbody='sometext', mfrom='somjid', mto='someone'
)
def test_send_fail(self):
schedule_messages('text', recipients('xmppsleek', 'someone'))
def new_method(*args, **kwargs):
raise Exception('xmppsleek failed')
old_method = messenger_xmpp.xmpp.send_message
messenger_xmpp.xmpp.send_message = new_method
messenger_xmpp._session_started = True
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'xmppsleek failed'
assert errors[0].dispatch.address == 'someone'
finally:
messenger_xmpp.xmpp.send_message = old_method
class TestTelegramMessenger:
def setup_method(self, method):
messenger_telegram._verify_bot()
messenger_telegram.lib.post.call_count = 0
def test_get_address(self):
r = object()
assert messenger_telegram.get_address(r) == r
r = type('r', (object,), dict(telegram='chat_id'))
assert messenger_telegram.get_address(r) == 'chat_id'
def test_send(self):
schedule_messages('text', recipients('telegram', '1234567'))
send_scheduled_messages()
assert_called_n(messenger_telegram.lib.post, 2)
assert messenger_telegram.lib.post.call_args[1]['proxies'] == {'https': 'socks5://user:pass@host:port'}
def test_send_test_message(self):
messenger_telegram.send_test_message('someone', 'sometext')
assert_called_n(messenger_telegram.lib.post)
messenger_telegram.send_test_message('', 'sometext')
assert_called_n(messenger_telegram.lib.post)
def test_get_chat_ids(self):
assert messenger_telegram.get_chat_ids() == []
assert_called_n(messenger_telegram.lib.post)
def test_send_fail(self):
schedule_messages('text', recipients('telegram', 'someone'))
def new_method(*args, **kwargs):
raise Exception('telegram failed')
old_method = messenger_telegram.lib.post
messenger_telegram.lib.post = new_method
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'telegram failed'
assert errors[0].dispatch.address == 'someone'
finally:
messenger_telegram.lib.post = old_method
class TestFacebookMessenger:
def setup_method(self, method):
messenger_fb.lib.post.call_count = 0
messenger_fb.lib.get.call_count = 0
def test_send(self):
schedule_messages('text', recipients('fb', ''))
send_scheduled_messages()
assert_called_n(messenger_fb.lib.post)
assert messenger_fb.lib.post.call_args[1]['proxies'] == {'https': '0.0.0.0'}
def test_send_test_message(self):
messenger_fb.send_test_message('', 'sometext')
assert_called_n(messenger_fb.lib.post)
messenger_fb.send_test_message('', 'sometext')
assert_called_n(messenger_fb.lib.post)
def test_get_page_access_token(self):
assert messenger_fb.get_page_access_token('app_id', 'app_secret', 'user_token') == {}
assert_called_n(messenger_fb.lib.get, 2)
def test_send_fail(self):
schedule_messages('text', recipients('fb', ''))
def new_method(*args, **kwargs):
raise Exception('fb failed')
old_method = messenger_fb.lib.post
messenger_fb.lib.post = new_method
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'fb failed'
assert errors[0].dispatch.address == ''
finally:
messenger_fb.lib.post = old_method
class TestVKontakteMessenger:
def setup_method(self, method):
messenger_vk.lib.post.call_count = 0
messenger_vk.lib.get.call_count = 0
def test_send(self):
schedule_messages('text', recipients('vk', '12345'))
send_scheduled_messages()
assert_called_n(messenger_vk.lib.post)
assert messenger_vk.lib.post.call_args[1]['data']['owner_id'] == '12345'
def test_get_access_token(self, monkeypatch):
monkeypatch.setattr('webbrowser.open', lambda *args: None)
result = messenger_vk.get_access_token(app_id='00000')
assert '00000&scope=wall,' in result
def test_send_test_message(self):
messenger_vk.send_test_message('12345', 'sometext')
assert_called_n(messenger_vk.lib.post)
messenger_vk.send_test_message('12345', 'sometext')
assert_called_n(messenger_vk.lib.post)
def test_send_fail(self):
schedule_messages('text', recipients('vk', '12345'))
def new_method(*args, **kwargs):
raise Exception('vk failed')
old_method = messenger_vk.lib.post
messenger_vk.lib.post = new_method
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'vk failed'
assert errors[0].dispatch.address == '12345'
finally:
messenger_vk.lib.post = old_method
| bsd-3-clause | -6,108,604,954,456,369,000 | 32.901408 | 111 | 0.637059 | false |
plockaby/dart | agent/lib/dart/agent/cli.py | 1 | 1572 | """
This is a supervisord event listener.
"""
import sys
import argparse
import logging
import traceback
def main():
parser = argparse.ArgumentParser(
prog="dart-agent",
formatter_class=argparse.RawTextHelpFormatter,
description=__doc__,
)
parser.add_argument("--write-configuration", action="store_true", dest="write_configuration", help="write configuration files and exit")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", default=False, help="send verbose output to the console")
args = parser.parse_args()
# configure logging
logging.captureWarnings(True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_handler = logging.StreamHandler(stream=sys.stderr)
log_handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)-8s - %(message)s"))
logger.addHandler(log_handler)
# change the level and output format if we're going to be verbose
if args.verbose:
logger.setLevel(logging.DEBUG)
log_handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] - %(message)s"))
# start the main program
try:
options = vars(args)
configuration = dict(verbose=options.pop("verbose"))
from .app import DartAgent
runnable = DartAgent(**configuration)
return runnable.run(**options)
except Exception as e:
logger.error(str(e))
logger.debug(traceback.format_exc())
return 1
if (__name__ == "__main__"):
sys.exit(main())
| artistic-2.0 | 2,599,248,250,923,130,000 | 31.081633 | 140 | 0.666031 | false |
hipikat/scow | scow/__main__.py | 1 | 1947 |
from os import path
from fabric.api import env, run, sudo, prefix
from fabric.contrib import files as fabric_files
import fabtools
from . import scow_task, pkgs, users, python, web, db
@scow_task
def init_droplet(*args, **kwargs):
"""Set up admin users and a web stack on the droplet"""
pkgs.upgrade_packages()
pkgs.install_packages()
users.create_missing_admins()
installed_admins = env.machine.installed_admins or []
# TODO: Make sure this is done by functions in the users module too.
bash_rc_lines = [line for line in env.scow.SCOW_SHELL_SETUP_STRING.splitlines() if line]
for user in installed_admins + ['root']:
admin_bash_rc = path.join(fabtools.user.home_directory(user), '.bashrc')
run('touch ' + admin_bash_rc)
for line in bash_rc_lines:
fabric_files.append(admin_bash_rc, line)
for line in bash_rc_lines:
fabric_files.append('/etc/profile', line)
python.install_python_env()
if env.project.PYTHON_VERSION not in env.scow.pyenv_versions:
sudo('pyenv install ' + env.project.PYTHON_VERSION)
sudo('pyenv rehash')
sudo('pyenv global ' + env.project.PYTHON_VERSION)
#with prefix('pyenv global ):
python.setup_local_python_tools()
# TODO: Check ALLOW_SYSTEM_PYTHON, and whether the requested project
# Python version matches the installed system version.
#if not getattr(env.project, 'ALLOW_SYSTEM_PYTHON', False):
#python.setup_local_python(env.project.PYTHON_VERSION)
db.setup_postgres()
web.setup_nginx()
#setup_uwsgi_emperor()
@scow_task
def install_project(settings_class, *args, **kwargs):
"""Install the project. Requires settings_class, tag optional"""
#setup_project_virtualenv(*args, **kwargs)
#setup_django_databases(*args, **kwargs)
#install_project_src(settings_class, *args, **kwargs)
#ggset_project_settings_class(str(settings_class), *args, **kwargs)
| bsd-2-clause | -1,971,765,330,369,858,600 | 38.734694 | 92 | 0.687211 | false |
red-hood/calendarserver | contrib/performance/benchmarks/bounded_recurrence.py | 1 | 1816 | ##
# Copyright (c) 2011-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Benchmark a server's handling of events with a bounded recurrence.
"""
from uuid import uuid4
from itertools import count
from datetime import datetime, timedelta
from contrib.performance._event_create import (
makeAttendees, makeVCalendar, formatDate, measure as _measure)
def makeEvent(i, organizerSequence, attendeeCount):
"""
Create a new half-hour long event that starts soon and recurs
daily for the next five days.
"""
now = datetime.now()
start = now.replace(minute=15, second=0, microsecond=0) + timedelta(hours=i)
end = start + timedelta(minutes=30)
until = start + timedelta(days=5)
rrule = "RRULE:FREQ=DAILY;INTERVAL=1;UNTIL=" + formatDate(until)
return makeVCalendar(
uuid4(), start, end, rrule, organizerSequence,
makeAttendees(attendeeCount))
def measure(host, port, dtrace, attendeeCount, samples):
calendar = "bounded-recurrence"
organizerSequence = 1
# An infinite stream of recurring VEVENTS to PUT to the server.
events = ((i, makeEvent(i, organizerSequence, attendeeCount)) for i in count(2))
return _measure(
calendar, organizerSequence, events,
host, port, dtrace, samples)
| apache-2.0 | 3,689,230,688,405,523,000 | 33.264151 | 84 | 0.720815 | false |
vialectrum/vialectrum | electrum_ltc/gui/kivy/uix/dialogs/lightning_tx_dialog.py | 1 | 3445 | import copy
from datetime import datetime
from decimal import Decimal
from typing import NamedTuple, Callable, TYPE_CHECKING
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.uix.label import Label
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from electrum_ltc.gui.kivy.i18n import _
if TYPE_CHECKING:
from ...main_window import ElectrumWindow
Builder.load_string('''
<LightningTxDialog>
id: popup
title: _('Lightning Payment')
preimage: ''
is_sent: False
amount_str: ''
fee_str: ''
date_str: ''
payment_hash: ''
description: ''
BoxLayout:
orientation: 'vertical'
ScrollView:
scroll_type: ['bars', 'content']
bar_width: '25dp'
GridLayout:
height: self.minimum_height
size_hint_y: None
cols: 1
spacing: '10dp'
padding: '10dp'
GridLayout:
height: self.minimum_height
size_hint_y: None
cols: 1
spacing: '10dp'
BoxLabel:
text: _('Description') if root.description else ''
value: root.description
BoxLabel:
text: _('Date')
value: root.date_str
BoxLabel:
text: _('Amount sent') if root.is_sent else _('Amount received')
value: root.amount_str
BoxLabel:
text: _('Transaction fee') if root.fee_str else ''
value: root.fee_str
TopLabel:
text: _('Payment hash') + ':'
TxHashLabel:
data: root.payment_hash
name: _('Payment hash')
TopLabel:
text: _('Preimage')
TxHashLabel:
data: root.preimage
name: _('Preimage')
Widget:
size_hint: 1, 0.1
BoxLayout:
size_hint: 1, None
height: '48dp'
Widget
Button:
size_hint: 0.5, None
height: '48dp'
text: _('Close')
on_release: root.dismiss()
''')
class ActionButtonOption(NamedTuple):
text: str
func: Callable
enabled: bool
class LightningTxDialog(Factory.Popup):
def __init__(self, app, tx_item):
Factory.Popup.__init__(self)
self.app = app # type: ElectrumWindow
self.wallet = self.app.wallet
self._action_button_fn = lambda btn: None
self.is_sent = bool(tx_item['direction'] == 'sent')
self.description = tx_item['label']
self.timestamp = tx_item['timestamp']
self.date_str = datetime.fromtimestamp(self.timestamp).isoformat(' ')[:-3]
self.amount = Decimal(tx_item['amount_msat']) /1000
self.payment_hash = tx_item['payment_hash']
self.preimage = tx_item['preimage']
format_amount = self.app.format_amount_and_units
self.amount_str = format_amount(self.amount)
if self.is_sent:
self.fee_str = format_amount(Decimal(tx_item['fee_msat']) / 1000)
| mit | 6,040,807,483,877,699,000 | 30.036036 | 88 | 0.520464 | false |
mbudiu-vmw/hiero | bin/upload-data.py | 1 | 3589 | #!/usr/bin/env python
# We attempted to make this program work with both python2 and python3
"""This script takes a set of files and a cluster configuration describing a set of machines.
It uploads the files to the given machines in round-robin fashion.
The script can also be given an optional schema file.
This file will be uploaded to all machines.
The list of machines is provided in a Hillview configuration file."""
# pylint: disable=invalid-name
from argparse import ArgumentParser, REMAINDER
import os.path
from hillviewCommon import ClusterConfiguration, get_config, get_logger
logger = get_logger("upload-data")
created_folders = set()
def create_remote_folder(remoteHost, folder):
"""Creates a folder on a remote machine"""
shortcut = "" + remoteHost.host + ":" + folder
if shortcut in created_folders:
return
remoteHost.create_remote_folder(folder)
created_folders.add(shortcut)
def copy_file_to_remote_host(rh, source, folder, copyOption):
"""Copy files in the specified folder to the remote machine"""
create_remote_folder(rh, folder)
rh.copy_file_to_remote(source, folder, copyOption)
def copy_everywhere(config, file, folder, copyOption):
"""Copy specified file to all worker machines"""
assert isinstance(config, ClusterConfiguration)
message = "Copying " + file + " to all hosts"
logger.info(message)
config.run_on_all_workers(lambda rh: copy_file_to_remote_host(rh, file, folder, copyOption))
def copy_files(config, folder, filelist, copyOption):
"""Copy the files to the given machines in round-robin fashion"""
assert isinstance(config, ClusterConfiguration)
message = "Copying " + str(len(filelist)) + " files to all hosts in round-robin"
logger.info(message)
index = 0
workers = config.get_workers()
for f in filelist:
rh = workers[index]
index = (index + 1) % len(workers)
copy_file_to_remote_host(rh, f, folder, copyOption)
def main():
"""Main function"""
parser = ArgumentParser(epilog="The argument in the list are uploaded in round-robin " +
"to the worker machines in the cluster")
parser.add_argument("config", help="json cluster configuration file")
parser.add_argument("-d", "--directory",
help="destination folder where output is written" +\
" (if relative it is with respect to config.service_folder)")
parser.add_argument("-L", "--symlinks", help="Follow symlinks instead of ignoring them",
action="store_true")
parser.add_argument("--common", "-s", help="File that is loaded to all machines", action="append")
parser.add_argument("files", help="Files to copy", nargs=REMAINDER)
args = parser.parse_args()
config = get_config(parser, args)
folder = args.directory
if folder is None:
logger.error("Directory argument is mandatory")
parser.print_help()
exit(1)
if args.symlinks:
copyOptions = "-L"
else:
copyOptions = ""
if not os.path.isabs(folder):
folder = os.path.join(config.service_folder, folder)
message = "Folder is relative, using " + folder
logger.info(message)
for c in args.common:
copy_everywhere(config, c, folder, copyOptions)
if args.files:
copy_files(config, folder, args.files, copyOptions)
else:
logger.info("No files to upload to the machines provided in a Hillview configuration")
logger.info("Done.")
if __name__ == "__main__":
main()
| apache-2.0 | -5,015,177,832,050,948,000 | 40.732558 | 102 | 0.670382 | false |
reviewboard/reviewboard | reviewboard/reviews/tests/test_review_request_manager.py | 2 | 38511 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from djblets.testing.decorators import add_fixtures
from reviewboard.diffviewer.models import DiffSetHistory
from reviewboard.reviews.models import (DefaultReviewer, ReviewRequest,
ReviewRequestDraft)
from reviewboard.scmtools.errors import ChangeNumberInUseError
from reviewboard.site.models import LocalSite
from reviewboard.testing import TestCase
class ReviewRequestManagerTests(TestCase):
"""Unit tests for reviewboard.reviews.managers.ReviewRequestManager."""
fixtures = ['test_users']
@add_fixtures(['test_scmtools'])
def test_create_with_site(self):
"""Testing ReviewRequest.objects.create with LocalSite"""
user = User.objects.get(username='doc')
local_site = LocalSite.objects.create(name='test')
repository = self.create_repository()
review_request = ReviewRequest.objects.create(
user, repository, local_site=local_site)
self.assertEqual(review_request.repository, repository)
self.assertEqual(review_request.local_site, local_site)
self.assertEqual(review_request.local_id, 1)
@add_fixtures(['test_scmtools'])
def test_create_with_site_and_commit_id(self):
"""Testing ReviewRequest.objects.create with LocalSite and commit ID"""
user = User.objects.get(username='doc')
local_site = LocalSite.objects.create(name='test')
repository = self.create_repository()
review_request = ReviewRequest.objects.create(
user, repository,
commit_id='123',
local_site=local_site)
self.assertEqual(review_request.repository, repository)
self.assertEqual(review_request.commit_id, '123')
self.assertEqual(review_request.local_site, local_site)
self.assertEqual(review_request.local_id, 1)
@add_fixtures(['test_scmtools'])
def test_create_with_site_and_commit_id_conflicts_review_request(self):
"""Testing ReviewRequest.objects.create with LocalSite and commit ID
that conflicts with a review request
"""
user = User.objects.get(username='doc')
local_site = LocalSite.objects.create(name='test')
repository = self.create_repository()
# This one should be fine.
ReviewRequest.objects.create(user, repository, commit_id='123',
local_site=local_site)
self.assertEqual(local_site.review_requests.count(), 1)
# This one will yell.
with self.assertRaises(ChangeNumberInUseError):
ReviewRequest.objects.create(
user,
repository,
commit_id='123',
local_site=local_site)
# Make sure that entry doesn't exist in the database.
self.assertEqual(local_site.review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_create_with_site_and_commit_id_conflicts_draft(self):
"""Testing ReviewRequest.objects.create with LocalSite and
commit ID that conflicts with a draft
"""
user = User.objects.get(username='doc')
local_site = LocalSite.objects.create(name='test')
repository = self.create_repository()
# This one should be fine.
existing_review_request = ReviewRequest.objects.create(
user, repository, local_site=local_site)
existing_draft = ReviewRequestDraft.create(existing_review_request)
existing_draft.commit_id = '123'
existing_draft.save()
self.assertEqual(local_site.review_requests.count(), 1)
# This one will yell.
with self.assertRaises(ChangeNumberInUseError):
ReviewRequest.objects.create(
user,
repository,
commit_id='123',
local_site=local_site)
# Make sure that entry doesn't exist in the database.
self.assertEqual(local_site.review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_create_with_site_and_commit_id_and_fetch_problem(self):
"""Testing ReviewRequest.objects.create with LocalSite and
commit ID with problem fetching commit details
"""
user = User.objects.get(username='doc')
local_site = LocalSite.objects.create(name='test')
repository = self.create_repository()
self.assertEqual(local_site.review_requests.count(), 0)
self.assertEqual(DiffSetHistory.objects.count(), 0)
self.assertEqual(ReviewRequestDraft.objects.count(), 0)
with self.assertRaises(NotImplementedError):
ReviewRequest.objects.create(
user, repository,
commit_id='123',
local_site=local_site,
create_from_commit_id=True)
# Make sure that entry and related objects don't exist in the database.
self.assertEqual(local_site.review_requests.count(), 0)
self.assertEqual(DiffSetHistory.objects.count(), 0)
self.assertEqual(ReviewRequestDraft.objects.count(), 0)
@add_fixtures(['test_scmtools'])
def test_create_with_create_from_commit_id(self):
"""Testing ReviewRequest.objects.create with commit ID and
create_from_commit_id
"""
user = User.objects.get(username='doc')
repository = self.create_repository(tool_name='Test')
review_request = ReviewRequest.objects.create(
user,
repository,
commit_id='123',
create_from_commit_id=True)
self.assertEqual(review_request.repository, repository)
self.assertEqual(review_request.diffset_history.diffsets.count(), 0)
self.assertEqual(review_request.commit_id, '123')
self.assertEqual(review_request.changenum, 123)
draft = review_request.get_draft()
self.assertIsNotNone(draft)
self.assertIsNotNone(draft.diffset)
self.assertEqual(draft.commit_id, '123')
@add_fixtures(['test_scmtools'])
def test_create_with_create_from_commit_id_and_default_reviewers(self):
"""Testing ReviewRequest.objects.create with commit ID,
create_from_commit_id, and default reviewers
"""
user = User.objects.get(username='doc')
repository = self.create_repository(tool_name='Test')
default_reviewer = DefaultReviewer.objects.create(
name='Default Reviewer',
file_regex='.')
default_reviewer.repository.add(repository)
default_reviewer.people.add(user)
default_reviewer.groups.add(self.create_review_group())
review_request = ReviewRequest.objects.create(
user,
repository,
commit_id='123',
create_from_commit_id=True)
self.assertEqual(review_request.target_people.count(), 0)
self.assertEqual(review_request.target_groups.count(), 0)
draft = review_request.get_draft()
self.assertIsNotNone(draft)
self.assertEqual(draft.target_people.count(), 1)
self.assertEqual(draft.target_groups.count(), 1)
def test_public(self):
"""Testing ReviewRequest.objects.public"""
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='grumpy')
self.create_review_request(summary='Test 1',
publish=True,
submitter=user1)
self.create_review_request(summary='Test 2',
submitter=user2)
self.create_review_request(summary='Test 3',
status='S',
public=True,
submitter=user1)
self.create_review_request(summary='Test 4',
status='S',
public=True,
submitter=user2)
self.create_review_request(summary='Test 5',
status='D',
public=True,
submitter=user1)
self.create_review_request(summary='Test 6',
status='D',
submitter=user2)
self.assertValidSummaries(
ReviewRequest.objects.public(user=user1),
[
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.public(status=None),
[
'Test 5',
'Test 4',
'Test 3',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.public(user=user2, status=None),
[
'Test 6',
'Test 5',
'Test 4',
'Test 3',
'Test 2',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.public(status=None,
show_all_unpublished=True),
[
'Test 6',
'Test 5',
'Test 4',
'Test 3',
'Test 2',
'Test 1',
])
@add_fixtures(['test_scmtools'])
def test_public_with_repository_on_local_site(self):
"""Testing ReviewRequest.objects.public with repository on a
Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
repository = self.create_repository(local_site=local_site)
review_request = self.create_review_request(repository=repository,
local_site=local_site,
publish=True)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_public_without_private_repo_access(self):
"""Testing ReviewRequest.objects.public without access to private
repositories
"""
user = User.objects.get(username='grumpy')
repository = self.create_repository(public=False)
review_request = self.create_review_request(repository=repository,
publish=True)
self.assertFalse(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 0)
@add_fixtures(['test_scmtools'])
def test_public_without_private_repo_access_on_local_site(self):
"""Testing ReviewRequest.objects.public without access to private
repositories on a Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
repository = self.create_repository(public=False,
local_site=local_site)
review_request = self.create_review_request(repository=repository,
local_site=local_site,
publish=True)
self.assertFalse(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 0)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_access(self):
"""Testing ReviewRequest.objects.public with access to private
repositories
"""
user = User.objects.get(username='grumpy')
repository = self.create_repository(public=False)
repository.users.add(user)
review_request = self.create_review_request(repository=repository,
publish=True)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_access_on_local_site(self):
"""Testing ReviewRequest.objects.public with access to private
repositories on a Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
repository = self.create_repository(public=False,
local_site=local_site)
repository.users.add(user)
review_request = self.create_review_request(repository=repository,
publish=True,
local_site=local_site)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_access_through_group(self):
"""Testing ReviewRequest.objects.public with access to private
repositories
"""
user = User.objects.get(username='grumpy')
group = self.create_review_group(invite_only=True)
group.users.add(user)
repository = self.create_repository(public=False)
repository.review_groups.add(group)
review_request = self.create_review_request(repository=repository,
publish=True)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_access_through_group_on_local_site(self):
"""Testing ReviewRequest.objects.public with access to private
repositories on a Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
group = self.create_review_group(invite_only=True)
group.users.add(user)
repository = self.create_repository(public=False,
local_site=local_site)
repository.review_groups.add(group)
review_request = self.create_review_request(repository=repository,
local_site=local_site,
publish=True)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 1)
def test_public_without_private_group_access(self):
"""Testing ReviewRequest.objects.public without access to private
group
"""
user = User.objects.get(username='grumpy')
group = self.create_review_group(invite_only=True)
review_request = self.create_review_request(publish=True)
review_request.target_groups.add(group)
self.assertFalse(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 0)
def test_public_with_private_group_access(self):
"""Testing ReviewRequest.objects.public with access to private
group
"""
user = User.objects.get(username='grumpy')
group = self.create_review_group(invite_only=True)
group.users.add(user)
review_request = self.create_review_request(publish=True)
review_request.target_groups.add(group)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 1)
def test_public_with_private_group_access_on_local_site(self):
"""Testing ReviewRequest.objects.public with access to private
group on a Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
group = self.create_review_group(invite_only=True,
local_site=local_site)
group.users.add(user)
review_request = self.create_review_request(publish=True,
local_site=local_site)
review_request.target_groups.add(group)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_and_public_group(self):
"""Testing ReviewRequest.objects.public without access to private
repositories and with access to private group
"""
user = User.objects.get(username='grumpy')
group = self.create_review_group()
repository = self.create_repository(public=False)
review_request = self.create_review_request(repository=repository,
publish=True)
review_request.target_groups.add(group)
self.assertFalse(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 0)
@add_fixtures(['test_scmtools'])
def test_public_with_private_group_and_public_repo(self):
"""Testing ReviewRequest.objects.public with access to private
group and without access to private group
"""
user = User.objects.get(username='grumpy')
group = self.create_review_group(invite_only=True)
repository = self.create_repository(public=False)
repository.users.add(user)
review_request = self.create_review_request(repository=repository,
publish=True)
review_request.target_groups.add(group)
self.assertFalse(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 0)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_and_owner(self):
"""Testing ReviewRequest.objects.public without access to private
repository and as the submitter
"""
user = User.objects.get(username='grumpy')
repository = self.create_repository(public=False)
review_request = self.create_review_request(repository=repository,
submitter=user,
publish=True)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_and_owner_on_local_site(self):
"""Testing ReviewRequest.objects.public without access to private
repository and as the submitter on a Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
repository = self.create_repository(public=False,
local_site=local_site)
review_request = self.create_review_request(repository=repository,
submitter=user,
local_site=local_site,
publish=True)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 1)
def test_public_with_private_group_and_owner(self):
"""Testing ReviewRequest.objects.public without access to private
group and as the submitter
"""
user = User.objects.get(username='grumpy')
group = self.create_review_group(invite_only=True)
review_request = self.create_review_request(submitter=user,
publish=True)
review_request.target_groups.add(group)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 1)
def test_public_with_private_group_and_owner_on_local_site(self):
"""Testing ReviewRequest.objects.public without access to private
group and as the submitter on a Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
group = self.create_review_group(invite_only=True,
local_site=local_site)
review_request = self.create_review_request(submitter=user,
local_site=local_site,
publish=True)
review_request.target_groups.add(group)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_and_target_people(self):
"""Testing ReviewRequest.objects.public without access to private
repository and user in target_people
"""
user = User.objects.get(username='grumpy')
repository = self.create_repository(public=False)
review_request = self.create_review_request(repository=repository,
publish=True)
review_request.target_people.add(user)
self.assertFalse(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 0)
def test_public_with_private_group_and_target_people(self):
"""Testing ReviewRequest.objects.public without access to private
group and user in target_people
"""
user = User.objects.get(username='grumpy')
group = self.create_review_group(invite_only=True)
review_request = self.create_review_request(publish=True)
review_request.target_groups.add(group)
review_request.target_people.add(user)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 1)
def test_public_with_private_group_and_target_people_on_local_site(self):
"""Testing ReviewRequest.objects.public without access to private
group and user in target_people on a Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
group = self.create_review_group(invite_only=True,
local_site=local_site)
review_request = self.create_review_request(publish=True,
local_site=local_site)
review_request.target_groups.add(group)
review_request.target_people.add(user)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 1)
def test_to_group(self):
"""Testing ReviewRequest.objects.to_group"""
user1 = User.objects.get(username='doc')
group1 = self.create_review_group(name='privgroup')
group1.users.add(user1)
review_request = self.create_review_request(summary='Test 1',
public=True,
submitter=user1)
review_request.target_groups.add(group1)
review_request = self.create_review_request(summary='Test 2',
public=False,
submitter=user1)
review_request.target_groups.add(group1)
review_request = self.create_review_request(summary='Test 3',
public=True,
status='S',
submitter=user1)
review_request.target_groups.add(group1)
self.assertValidSummaries(
ReviewRequest.objects.to_group('privgroup', None),
[
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.to_group('privgroup', None, status=None),
[
'Test 3',
'Test 1',
])
def test_to_user_group(self):
"""Testing ReviewRequest.objects.to_user_groups"""
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='grumpy')
group1 = self.create_review_group(name='group1')
group1.users.add(user1)
group2 = self.create_review_group(name='group2')
group2.users.add(user2)
review_request = self.create_review_request(summary='Test 1',
public=True,
submitter=user1)
review_request.target_groups.add(group1)
review_request = self.create_review_request(summary='Test 2',
submitter=user2,
public=True,
status='S')
review_request.target_groups.add(group1)
review_request = self.create_review_request(summary='Test 3',
public=True,
submitter=user2)
review_request.target_groups.add(group1)
review_request.target_groups.add(group2)
self.assertValidSummaries(
ReviewRequest.objects.to_user_groups('doc', local_site=None),
[
'Test 3',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.to_user_groups(
'doc', status=None, local_site=None),
[
'Test 3',
'Test 2',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.to_user_groups(
'grumpy', user=user2, local_site=None),
[
'Test 3',
])
def test_to_or_from_user(self):
"""Testing ReviewRequest.objects.to_or_from_user"""
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='grumpy')
group1 = self.create_review_group(name='group1')
group1.users.add(user1)
group2 = self.create_review_group(name='group2')
group2.users.add(user2)
self.create_review_request(summary='Test 1',
public=True,
submitter=user1)
self.create_review_request(summary='Test 2',
public=False,
submitter=user1)
self.create_review_request(summary='Test 3',
public=True,
status='S',
submitter=user1)
review_request = self.create_review_request(summary='Test 4',
public=True,
submitter=user1)
review_request.target_groups.add(group1)
review_request.target_people.add(user2)
review_request = self.create_review_request(summary='Test 5',
submitter=user2,
status='S')
review_request.target_groups.add(group1)
review_request.target_people.add(user2)
review_request.target_people.add(user1)
review_request = self.create_review_request(summary='Test 6',
public=True,
submitter=user2)
review_request.target_groups.add(group1)
review_request.target_groups.add(group2)
review_request.target_people.add(user1)
review_request = self.create_review_request(summary='Test 7',
public=True,
status='S',
submitter=user2)
review_request.target_people.add(user1)
self.assertValidSummaries(
ReviewRequest.objects.to_or_from_user('doc', local_site=None),
[
'Test 6',
'Test 4',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.to_or_from_user('grumpy', local_site=None),
[
'Test 6',
'Test 4',
])
self.assertValidSummaries(
ReviewRequest.objects.to_or_from_user('doc', status=None,
local_site=None),
[
'Test 7',
'Test 6',
'Test 4',
'Test 3',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.to_or_from_user('doc', user=user2,
status=None,
local_site=None),
[
'Test 7',
'Test 6',
'Test 5',
'Test 4',
'Test 3',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.to_or_from_user('doc', user=user1,
status=None,
local_site=None),
[
'Test 7',
'Test 6',
'Test 4',
'Test 3',
'Test 2',
'Test 1',
])
def test_to_user_directly(self):
"""Testing ReviewRequest.objects.to_user_directly"""
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='grumpy')
group1 = self.create_review_group(name='group1')
group1.users.add(user1)
group2 = self.create_review_group(name='group2')
group2.users.add(user2)
review_request = self.create_review_request(summary='Test 1',
public=True,
submitter=user1)
review_request.target_groups.add(group1)
review_request.target_people.add(user2)
review_request = self.create_review_request(summary='Test 2',
submitter=user2,
status='S')
review_request.target_groups.add(group1)
review_request.target_people.add(user2)
review_request.target_people.add(user1)
review_request = self.create_review_request(summary='Test 3',
public=True,
submitter=user2)
review_request.target_groups.add(group1)
review_request.target_groups.add(group2)
review_request.target_people.add(user1)
review_request = self.create_review_request(summary='Test 4',
public=True,
status='S',
submitter=user2)
review_request.target_people.add(user1)
self.assertValidSummaries(
ReviewRequest.objects.to_user_directly('doc', local_site=None),
[
'Test 3',
])
self.assertValidSummaries(
ReviewRequest.objects.to_user_directly('doc', status=None),
[
'Test 4',
'Test 3',
])
self.assertValidSummaries(
ReviewRequest.objects.to_user_directly(
'doc', user2, status=None, local_site=None),
[
'Test 4',
'Test 3',
'Test 2',
])
def test_from_user(self):
"""Testing ReviewRequest.objects.from_user"""
user1 = User.objects.get(username='doc')
self.create_review_request(summary='Test 1',
public=True,
submitter=user1)
self.create_review_request(summary='Test 2',
public=False,
submitter=user1)
self.create_review_request(summary='Test 3',
public=True,
status='S',
submitter=user1)
self.assertValidSummaries(
ReviewRequest.objects.from_user('doc', local_site=None),
[
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.from_user('doc', status=None,
local_site=None),
[
'Test 3',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.from_user(
'doc', user=user1, status=None, local_site=None),
[
'Test 3',
'Test 2',
'Test 1',
])
def test_to_user(self):
"""Testing ReviewRequest.objects.to_user"""
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='grumpy')
group1 = self.create_review_group(name='group1')
group1.users.add(user1)
group2 = self.create_review_group(name='group2')
group2.users.add(user2)
review_request = self.create_review_request(summary='Test 1',
publish=True,
submitter=user1)
review_request.target_groups.add(group1)
review_request = self.create_review_request(summary='Test 2',
submitter=user2,
status='S')
review_request.target_groups.add(group1)
review_request.target_people.add(user2)
review_request.target_people.add(user1)
review_request = self.create_review_request(summary='Test 3',
publish=True,
submitter=user2)
review_request.target_groups.add(group1)
review_request.target_groups.add(group2)
review_request.target_people.add(user1)
review_request = self.create_review_request(summary='Test 4',
publish=True,
status='S',
submitter=user2)
review_request.target_groups.add(group1)
review_request.target_groups.add(group2)
review_request.target_people.add(user1)
self.assertValidSummaries(
ReviewRequest.objects.to_user('doc', local_site=None),
[
'Test 3',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.to_user('doc', status=None, local_site=None),
[
'Test 4',
'Test 3',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.to_user(
'doc', user=user2, status=None, local_site=None),
[
'Test 4',
'Test 3',
'Test 2',
'Test 1',
])
def assertValidSummaries(self, review_requests, summaries):
r_summaries = [r.summary for r in review_requests]
for summary in r_summaries:
self.assertIn(summary, summaries,
'summary "%s" not found in summary list'
% summary)
for summary in summaries:
self.assertIn(summary, r_summaries,
'summary "%s" not found in review request list'
% summary)
| mit | -6,242,167,966,362,509,000 | 39.709302 | 79 | 0.540287 | false |
mpi-sws-rse/thingflow-python | tests/test_linq.py | 1 | 2060 | # Copyright 2016 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""Tests of the linq apis. Pretty much still manually verified, although running
it as a part of the automated test suite makes a decent regression test.
"""
import asyncio
import unittest
from thingflow.base import *
from utils import make_test_output_thing, make_test_output_thing_from_vallist,\
ValidationInputThing
import thingflow.filters.where
import thingflow.filters.output
import thingflow.filters
from thingflow.filters.never import Never
def pp_buf(x):
print("Buffered output: ", x)
print("\n")
class TestLinq(unittest.TestCase):
def test_case(self):
"""Rupak, if you want to test more, just add it here or add additional
methods starting with test_
"""
loop = asyncio.get_event_loop()
s = make_test_output_thing(1, stop_after_events=5)
t = s.skip(2).some(lambda x: x[2]>100)
s.connect(print)
t.connect(print)
scheduler = Scheduler(loop)
scheduler.schedule_periodic(s, 2) # sample once every 2 seconds
u = s.take_last(3).scan(lambda a, x: a+x[2], 0)
u.connect(print)
v = s.take_last(3).reduce(lambda a, x: a+x[2], 0)
v.connect(print)
w = s.buffer_with_time(5, scheduler)
w.connect(pp_buf)
# w = Never()
# w.connect(print)
# scheduler.schedule_periodic(w, 1)
s.print_downstream()
loop.call_later(30, scheduler.stop)
scheduler.run_forever()
print("That's all folks")
def test_first(self):
"""Test the first() operator
"""
p = make_test_output_thing_from_vallist(1, [1, 2, 3, 4, 5, 6])
vs = ValidationInputThing([1], self)
p.first().connect(vs)
scheduler = Scheduler(asyncio.get_event_loop())
scheduler.schedule_recurring(p)
scheduler.run_forever()
self.assertTrue(vs.completed)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 7,569,400,810,597,433,000 | 27.611111 | 80 | 0.615534 | false |
kernevil/samba | python/samba/tests/blackbox/smbcontrol_process.py | 1 | 4652 | # Blackbox tests for the smbcontrol fault injection commands
#
# Copyright (C) Andrew Bartlett <[email protected]> 2018
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# As the test terminates and sleeps samba processes these tests need to run
# in the preforkrestartdc test environment to prevent them impacting other
# tests.
#
from __future__ import print_function
import time
from samba.tests import BlackboxTestCase, BlackboxProcessError
from samba.messaging import Messaging
COMMAND = "bin/smbcontrol"
PING = "ping"
class SmbcontrolProcessBlockboxTests(BlackboxTestCase):
def setUp(self):
super(SmbcontrolProcessBlockboxTests, self).setUp()
lp_ctx = self.get_loadparm()
self.msg_ctx = Messaging(lp_ctx=lp_ctx)
def get_process_data(self):
services = self.msg_ctx.irpc_all_servers()
processes = []
for service in services:
for id in service.ids:
processes.append((service.name, id.pid))
return processes
def get_process(self, name):
processes = self.get_process_data()
for pname, pid in processes:
if name == pname:
return pid
return None
def test_inject_fault(self):
INJECT = "inject"
FAULT = "segv"
#
# Note that this process name needs to be different to the one used
# in the sleep test to avoid a race condition
#
pid = self.get_process("rpc_server")
#
# Ensure we can ping the process before injecting a fault.
#
try:
self.check_run("%s %s %s" % (COMMAND, pid, PING),
msg="trying to ping rpc_server")
except BlackboxProcessError as e:
self.fail("Unable to ping rpc_server process")
#
# Now inject a fault.
#
try:
self.check_run("%s %s %s %s" % (COMMAND, pid, INJECT, FAULT),
msg="injecting fault into rpc_server")
except BlackboxProcessError as e:
print(e)
self.fail("Unable to inject a fault into the rpc_server process")
#
# The process should have died, so we should not be able to ping it
#
try:
self.check_run("%s %s %s" % (COMMAND, pid, PING),
msg="trying to ping rpc_server")
self.fail("Could ping rpc_server process")
except BlackboxProcessError as e:
pass
def test_sleep(self):
SLEEP = "sleep" # smbcontrol sleep command
DURATION = 5 # duration to sleep server for
DELTA = 1 # permitted error for the sleep duration
#
# Note that this process name needs to be different to the one used
# in the inject fault test to avoid a race condition
#
pid = self.get_process("ldap_server")
#
# Ensure we can ping the process before getting it to sleep
#
try:
self.check_run("%s %s %s" % (COMMAND, pid, PING),
msg="trying to ping rpc_server")
except BlackboxProcessError as e:
self.fail("Unable to ping rpc_server process")
#
# Now ask it to sleep
#
start = time.time()
try:
self.check_run("%s %s %s %s" % (COMMAND, pid, SLEEP, DURATION),
msg="putting rpc_server to sleep for %d" % DURATION)
except BlackboxProcessError as e:
print(e)
self.fail("Failed to get rpc_server to sleep for %d" % DURATION)
#
# The process should be sleeping and not respond until it wakes
#
try:
self.check_run("%s %s %s" % (COMMAND, pid, PING),
msg="trying to ping rpc_server")
end = time.time()
duration = end - start
self.assertGreater(duration + DELTA, DURATION)
except BlackboxProcessError as e:
self.fail("Unable to ping rpc_server process")
| gpl-3.0 | 2,958,449,365,037,097,500 | 34.242424 | 79 | 0.595658 | false |
zaironne/SnippetDetector | SD_AddSnippet.py | 1 | 4637 | """
SD_AddSnippet Add a snippet to local/global database. A new snippet is defined by:
- snippet name: directly taken from the name of the function
- snippet description: taken from the comment (if there's one) of the function
- syntactic and semantic bytes sequences
- snippet comments: all the available comments added by the user
by ZaiRoN (zairon.wordpress.com)
Copyright (C) 2015
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from SD_db import sd_db
from SD_Semantic import semantic
from SD_Common import sd_common
db_type = ['global', 'local']
# ask for local/global database
db_answer = AskYN(1, 'Do you want to save the snippet inside the local database?\n[YES = local, NO = global, CANCEL = abort]')
if db_answer == -1:
print('\n[SNIPPET DETECTOR] Add snippet operation aborted')
else:
# start/end address function is automatically taken from the cursor
sd_c = sd_common()
func_start, func_end = sd_c.get_start_end_function(ScreenEA())
if func_start != BADADDR & func_end != BADADDR:
# create database file path
sddb = sd_db()
n_instr = sd_c.get_total_instructions(func_start, func_end)
db_file = sddb.get_db_folder(db_answer, True) + os.sep + 'sd_db_' + str(n_instr) + '.sd'
if not os.path.isfile(db_file):
# create database file
sddb.open_db_connection(db_file)
sddb.create_snippet_table()
else:
sddb.open_db_connection(db_file)
# is the syntactic bytes sequence already inside the db?
syntactic_bytes = GetManyBytes(func_start, func_end - func_start, False)
_snippet = sddb.get_snippet_by_syntactic_bytes(syntactic_bytes)
fail = False
add_snippet = False
if _snippet:
print('\n[SNIPPET DETECTOR] Snippet is already inside the database (syntactic match):')
fail = True
else:
# get semantic bytes sequence
sd_sem = semantic()
semantic_bytes = sd_sem.from_syntactic_to_semantic(func_start, func_end)
if semantic_bytes is not None:
# is the semantic bytes sequence already inside the db?
_snippet = sddb.get_snippet_by_semantic_bytes(semantic_bytes)
if not _snippet:
# add snippet
add_snippet = True
else:
# semantic bytes sequence could be not unique
save_answer = AskYN(1, 'Snippet is already inside the database (semantic match), do you want'
' to add this snippet too?')
if save_answer == 1:
# add the snippet
add_snippet = True
else:
fail = True
print('[SNIPPET DETECTOR] Snippet is already inside the database (semantic match):')
else:
print('\n[SNIPPET DETECTOR] Unable to convert syntactical snippet into semantic one inside function at 0x%x' % func_start)
if fail:
# print the information about the snippet inside the database
print('Snippet name: %s' % _snippet[0])
print('Snippet description: %s\n' % _snippet[1])
if add_snippet:
# time to save the new snippet inside the database
comments = sd_c.get_comments(func_start, func_end)
snippet_name = GetFunctionName(func_start)
snippet_description = GetFunctionCmt(func_start, False)
sddb.save_snippet(snippet_name, snippet_description, syntactic_bytes, semantic_bytes, comments)
print('\n[SNIPPET DETECTOR] Snippet correctly inserted inside %s database!' % db_type[db_answer])
sddb.close_db_connection()
else:
print('\n[SNIPPET DETECTOR] Unable to get function start/end addresses from cursor at 0x%X...' % ScreenEA())
| gpl-3.0 | 6,602,680,185,730,456,000 | 47.302083 | 138 | 0.614622 | false |
kubeflow/kfp-tekton-backend | components/aws/sagemaker/tests/integration_tests/conftest.py | 1 | 2737 | import pytest
import boto3
import kfp
import os
import utils
from datetime import datetime
def pytest_addoption(parser):
parser.addoption(
"--region",
default="us-west-2",
required=False,
help="AWS region where test will run",
)
parser.addoption(
"--role-arn", required=True, help="SageMaker execution IAM role ARN",
)
parser.addoption(
"--s3-data-bucket",
required=True,
help="Regional S3 bucket name in which test data is hosted",
)
parser.addoption(
"--minio-service-port",
default="9000",
required=False,
help="Localhost port to which minio service is mapped to",
)
parser.addoption(
"--kfp-namespace",
default="kubeflow",
required=False,
help="Cluster namespace where kubeflow pipelines is installed",
)
@pytest.fixture(scope="session", autouse=True)
def region(request):
os.environ["AWS_REGION"] = request.config.getoption("--region")
return request.config.getoption("--region")
@pytest.fixture(scope="session", autouse=True)
def role_arn(request):
os.environ["ROLE_ARN"] = request.config.getoption("--role-arn")
return request.config.getoption("--role-arn")
@pytest.fixture(scope="session", autouse=True)
def s3_data_bucket(request):
os.environ["S3_DATA_BUCKET"] = request.config.getoption("--s3-data-bucket")
return request.config.getoption("--s3-data-bucket")
@pytest.fixture(scope="session", autouse=True)
def minio_service_port(request):
os.environ["MINIO_SERVICE_PORT"] = request.config.getoption("--minio-service-port")
return request.config.getoption("--minio-service-port")
@pytest.fixture(scope="session", autouse=True)
def kfp_namespace(request):
os.environ["NAMESPACE"] = request.config.getoption("--kfp-namespace")
return request.config.getoption("--kfp-namespace")
@pytest.fixture(scope="session")
def boto3_session(region):
return boto3.Session(region_name=region)
@pytest.fixture(scope="session")
def sagemaker_client(boto3_session):
return boto3_session.client(service_name="sagemaker")
@pytest.fixture(scope="session")
def s3_client(boto3_session):
return boto3_session.client(service_name="s3")
@pytest.fixture(scope="session")
def kfp_client():
kfp_installed_namespace = utils.get_kfp_namespace()
return kfp.Client(namespace=kfp_installed_namespace)
@pytest.fixture(scope="session")
def experiment_id(kfp_client):
exp_name = datetime.now().strftime("%Y-%m-%d")
try:
experiment = kfp_client.get_experiment(experiment_name=exp_name)
except ValueError:
experiment = kfp_client.create_experiment(name=exp_name)
return experiment.id
| apache-2.0 | 3,486,290,735,644,155,000 | 27.216495 | 87 | 0.685787 | false |
johngrantuk/piupdue | piupdue/ArduinoFlashEefc.py | 1 | 7591 | """ Handles main processor operations.
WriteFileToFlash()
LoadBuffer()
WritePage()
EraseFlash()
SetBootFlash()
Reset()
Not sure what all are doing but tests have worked.
"""
import ArduinoFlashSerial, ArduinoFlashHardValues
import ctypes, time, os
def WriteFileToFlash(SerialPort, Log, File, IsNativePort):
"""
Writes File to processors flash in blocks pageSize long.
"""
Log.Log("Writing file to flash: " + File)
pageSize = ArduinoFlashHardValues.size # Size of data blocks to be written.
pageNum = 0
offset = 0 # -- Flash.h LN99 => 0
numPages = 0
onBufferA = True # LN52 Flash.cpp
fileSizeBytes = os.path.getsize(File) # Find file size.
numPages = (fileSizeBytes + pageSize - 1) / pageSize # 47 pages for blink.
if numPages > ArduinoFlashHardValues.pages:
raise Exception("WriteFlash()-File Size Error. numPages: " + str(numPages))
Log.Log("Writing " + str(fileSizeBytes) + "bytes to flash in " + str(numPages) + " pages.")
f = open(File, 'rb')
while True:
piece = f.read(pageSize) # Reads a block of data from file.
if not piece:
Log.Log("End of file??")
break
readBytes = len(piece)
Log.Log("Read: " + str(readBytes) + "bytes from file. onBufferA: " + str(onBufferA) + ", PageNum: " + str(pageNum))
dataJ = []
for i in range(0, readBytes):
dataJ.append(ord(piece[i]))
LoadBuffer(SerialPort, Log, onBufferA, dataJ, IsNativePort)
page = offset + pageNum
onBufferA = WritePage(page, onBufferA, SerialPort, Log)
pageNum += 1
if pageNum == numPages or readBytes != pageSize:
Log.Log("End of file...")
break
f.close()
Log.Log("End of WriteFlash()\n")
def EraseFlash(SerialPort, Log):
""" Erases processor flash. """
Log.Log("EraseFlash():")
WaitFSR(SerialPort, Log)
WriteFCR0(ArduinoFlashHardValues.EEFC_FCMD_EA, 0, SerialPort, Log)
WaitFSR(SerialPort, Log)
WriteFCR1(ArduinoFlashHardValues.EEFC_FCMD_EA, 0, SerialPort, Log)
Log.Log("Flash Erased.")
def LoadBuffer(SerialPort, Log, OnBufferA, Data, IsNativePort):
"""
Writes SXXXXXXXX,XXXXXXXX# command then Xmodem.
"""
Log.Log("LoadBuffer():")
ArduinoFlashSerial.Write(SerialPort, Log, ArduinoFlashHardValues.pageBufferA if OnBufferA else ArduinoFlashHardValues.pageBufferB, Data, ArduinoFlashHardValues.size, IsNativePort)
Log.Log("End of LoadBuffer()\n")
def WritePage(Page, OnBufferA, SerialPort, Log):
""" LN256 EefcFlash """
Log.Log("Write Page(), Page: " + str(Page) + ", OnBufferA: " + str(OnBufferA))
SetDstAddr(ArduinoFlashHardValues.addr + Page * ArduinoFlashHardValues.size, SerialPort, Log)
SetSrcAddr(ArduinoFlashHardValues.pageBufferA if OnBufferA else ArduinoFlashHardValues.pageBufferB, SerialPort, Log)
OnBufferA = not OnBufferA
WaitFSR(SerialPort, Log)
ArduinoFlashSerial.WriteWord(SerialPort, ArduinoFlashHardValues.reset, ArduinoFlashHardValues.start + 1, Log) # _wordCopy.runv();
ArduinoFlashSerial.Go(SerialPort, Log, ArduinoFlashHardValues.stack + ArduinoFlashHardValues.user) # _wordCopy.runv();
if ArduinoFlashHardValues.planes == 2 and Page >= ArduinoFlashHardValues.pages / 2:
WriteFCR1(ArduinoFlashHardValues.EEFC_FCMD_EWP if ArduinoFlashHardValues.eraseAuto else ArduinoFlashHardValues.EEFC_FCMD_WP, Page - ArduinoFlashHardValues.pages / 2, SerialPort, Log)
else:
WriteFCR0(ArduinoFlashHardValues.EEFC_FCMD_EWP if ArduinoFlashHardValues.eraseAuto else ArduinoFlashHardValues.EEFC_FCMD_WP, Page, SerialPort, Log)
Log.Log("End of Write Page()\n")
return OnBufferA
def SetBootFlash(SerialPort, Log, Enable):
""" Sets boot flash. """
Log.Log("SetBootFlash():")
WaitFSR(SerialPort, Log)
WriteFCR0(ArduinoFlashHardValues.EEFC_FCMD_SGPB if Enable else ArduinoFlashHardValues.EEFC_FCMD_CGPB, 1, SerialPort, Log)
WaitFSR(SerialPort, Log)
time.sleep(1)
Log.Log("End of SetBootFlash.")
def Reset(SerialPort, Log):
""" Resets processor. """
Log.Log("Reset()...")
ArduinoFlashSerial.WriteWord(SerialPort, 0x400E1A00, 0xA500000D, Log)
Log.Log("Reset done...")
time.sleep(1)
def WaitFSR(SerialPort, Log):
""" Not sure what it does. """
Log.Log("WaitFSR():")
tries = 0
fsr1 = ctypes.c_uint32(0x1).value
while tries <= 500:
addr = "w" + '{0:08X}'.format(ArduinoFlashHardValues.EEFC0_FSR) + ",4#"
Log.Log("Sending EEFC0_FSR: " + addr)
if ArduinoFlashHardValues.LiveWrite:
fsr0 = ArduinoFlashSerial.ReadWord(SerialPort, addr, Log)
if fsr0 & (1 << 2):
Log.Log("WaitFSR() Error. fsr0")
addr = "w" + '{0:08X}'.format(ArduinoFlashHardValues.EEFC1_FSR) + ",4#"
Log.Log("Sending EFC1_FSR: " + addr)
if ArduinoFlashHardValues.LiveWrite:
fsr1 = ArduinoFlashSerial.ReadWord(SerialPort, addr, Log)
if fsr1 & (1 << 2):
Log.Log("WaitFSR() Error. fsr1")
if fsr0 & fsr1 & 0x1:
Log.Log("Breaking.")
break
time.sleep(1) ##########???????
else:
break
tries += 1
if tries > 500:
Log.Log("WaitFSR() Error. Tried and failed!!")
def WriteFCR0(cmd, arg, SerialPort, Log):
"""
writeFCR0(uint8_t cmd, uint32_t arg)
writeFCR0(EEFC_FCMD_EA, 0);
EefcFlash.cpp LN314 _samba.writeWord(EEFC0_FCR, (EEFC_KEY << 24) | (arg << 8) | cmd);
"""
Log.Log("WriteFCR0()")
value = (ArduinoFlashHardValues.EEFC_KEY << 24) | (arg << 8) | cmd
ArduinoFlashSerial.WriteWord(SerialPort, ArduinoFlashHardValues.EEFC0_FCR, value, Log)
def WriteFCR1(cmd, arg, SerialPort, Log):
"""
EefcFlash::writeFCR1(uint8_t cmd, uint32_t arg)
_samba.writeWord(EEFC1_FCR, (EEFC_KEY << 24) | (arg << 8) | cmd);
"""
Log.Log("WriteFCR1()")
value = (ArduinoFlashHardValues.EEFC_KEY << 24) | (arg << 8) | cmd
ArduinoFlashSerial.WriteWord(SerialPort, ArduinoFlashHardValues.EEFC1_FCR, value, Log)
def SetDstAddr(DstAddr, SerialPort, Log):
""" Unsure """
Log.Log("SetDstAddr()")
ArduinoFlashSerial.WriteWord(SerialPort, ArduinoFlashHardValues.user + ArduinoFlashHardValues.dstAddr, DstAddr, Log) # WordCopyApplet (0x20001000 + 0x00000028), DstAddr
def SetSrcAddr(SrcAddr, SerialPort, Log):
""" Unsure """
Log.Log("SetSrcAddr()")
ArduinoFlashSerial.WriteWord(SerialPort, ArduinoFlashHardValues.user + ArduinoFlashHardValues.srcAddr, SrcAddr, Log) # WordCopyApplet (0x20001000 + 0x00000028), DstAddr | mit | -2,672,416,731,614,591,000 | 37.538071 | 247 | 0.577658 | false |
zenefits/sentry | src/sentry/web/frontend/debug/mail.py | 1 | 19243 | from __future__ import absolute_import, print_function
import itertools
import logging
import time
import traceback
import uuid
from datetime import datetime, timedelta
from random import Random
import six
from django.contrib.webdesign.lorem_ipsum import WORDS
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.views.generic import View
from sentry.app import tsdb
from sentry.constants import LOG_LEVELS
from sentry.digests import Record
from sentry.digests.notifications import Notification, build_digest
from sentry.digests.utilities import get_digest_metadata
from sentry.http import get_server_hostname
from sentry.models import (
Activity, Event, Group, GroupStatus, GroupSubscriptionReason, Organization,
OrganizationMember, Project, Release, Rule, Team
)
from sentry.plugins.sentry_mail.activity import emails
from sentry.utils.dates import to_datetime, to_timestamp
from sentry.utils.email import inline_css
from sentry.utils.http import absolute_uri
from sentry.utils.samples import load_data
from sentry.web.decorators import login_required
from sentry.web.helpers import render_to_response, render_to_string
logger = logging.getLogger(__name__)
def get_random(request):
seed = request.GET.get('seed', six.text_type(time.time()))
return Random(seed)
def make_message(random, length=None):
if length is None:
length = int(random.weibullvariate(8, 3))
return ' '.join(random.choice(WORDS) for _ in range(length))
def make_culprit(random):
def make_module_path_components(min, max):
for _ in range(random.randint(min, max)):
yield ''.join(random.sample(WORDS, random.randint(1, int(random.paretovariate(2.2)))))
return '{module} in {function}'.format(
module='.'.join(make_module_path_components(1, 4)),
function=random.choice(WORDS)
)
def make_group_metadata(random, group):
return {
'type': 'error',
'metadata': {
'type': '{}Error'.format(
''.join(word.title() for word in random.sample(WORDS, random.randint(1, 3))),
),
'value': make_message(random),
}
}
def make_group_generator(random, project):
epoch = to_timestamp(datetime(2016, 6, 1, 0, 0, 0, tzinfo=timezone.utc))
for id in itertools.count(1):
first_seen = epoch + random.randint(0, 60 * 60 * 24 * 30)
last_seen = random.randint(first_seen, first_seen + (60 * 60 * 24 * 30))
group = Group(
id=id,
project=project,
culprit=make_culprit(random),
level=random.choice(LOG_LEVELS.keys()),
message=make_message(random),
first_seen=to_datetime(first_seen),
last_seen=to_datetime(last_seen),
status=random.choice((
GroupStatus.UNRESOLVED,
GroupStatus.RESOLVED,
)),
)
if random.random() < 0.8:
group.data = make_group_metadata(random, group)
yield group
def add_unsubscribe_link(context):
if 'unsubscribe_link' not in context:
context['unsubscribe_link'] = 'javascript:alert("This is a preview page, what did you expect to happen?");'
# TODO(dcramer): use https://github.com/disqus/django-mailviews
class MailPreview(object):
def __init__(self, html_template, text_template, context=None, subject=None):
self.html_template = html_template
self.text_template = text_template
self.subject = subject
self.context = context if context is not None else {}
add_unsubscribe_link(self.context)
def text_body(self):
return render_to_string(self.text_template, self.context)
def html_body(self):
try:
return inline_css(render_to_string(self.html_template, self.context))
except Exception:
traceback.print_exc()
raise
def render(self, request):
return render_to_response('sentry/debug/mail/preview.html', {
'preview': self,
'format': request.GET.get('format'),
})
class ActivityMailPreview(object):
def __init__(self, request, activity):
self.request = request
self.email = emails.get(activity.type)(activity)
def get_context(self):
context = self.email.get_base_context()
context['reason'] = get_random(self.request).choice(
GroupSubscriptionReason.descriptions.values()
)
context.update(self.email.get_context())
add_unsubscribe_link(context)
return context
def text_body(self):
return render_to_string(self.email.get_template(), self.get_context())
def html_body(self):
try:
return inline_css(render_to_string(
self.email.get_html_template(), self.get_context()))
except Exception:
import traceback
traceback.print_exc()
raise
class ActivityMailDebugView(View):
def get(self, request):
org = Organization(
id=1,
slug='organization',
name='My Company',
)
team = Team(
id=1,
slug='team',
name='My Team',
organization=org,
)
project = Project(
id=1,
organization=org,
team=team,
slug='project',
name='My Project',
)
group = next(
make_group_generator(
get_random(request),
project,
),
)
event = Event(
id=1,
project=project,
group=group,
message=group.message,
data=load_data('python'),
datetime=datetime(2016, 6, 13, 3, 8, 24, tzinfo=timezone.utc),
)
activity = Activity(
group=event.group, project=event.project,
**self.get_activity(request, event)
)
return render_to_response('sentry/debug/mail/preview.html', {
'preview': ActivityMailPreview(request, activity),
'format': request.GET.get('format'),
})
@login_required
def alert(request):
platform = request.GET.get('platform', 'python')
org = Organization(
id=1,
slug='example',
name='Example',
)
team = Team(
id=1,
slug='example',
name='Example',
organization=org,
)
project = Project(
id=1,
slug='example',
name='Example',
team=team,
organization=org,
)
random = get_random(request)
group = next(
make_group_generator(random, project),
)
event = Event(
id=1,
project=project,
group=group,
message=group.message,
data=load_data(platform),
datetime=to_datetime(
random.randint(
to_timestamp(group.first_seen),
to_timestamp(group.last_seen),
),
),
)
rule = Rule(label="An example rule")
interface_list = []
for interface in six.itervalues(event.interfaces):
body = interface.to_email_html(event)
if not body:
continue
interface_list.append((interface.get_title(), mark_safe(body)))
return MailPreview(
html_template='sentry/emails/error.html',
text_template='sentry/emails/error.txt',
context={
'rule': rule,
'group': group,
'event': event,
'link': 'http://example.com/link',
'interfaces': interface_list,
'tags': event.get_tags(),
'project_label': project.name,
'tags': [
('logger', 'javascript'),
('environment', 'prod'),
('level', 'error'),
('device', 'Other')
]
},
).render(request)
@login_required
def digest(request):
random = get_random(request)
# TODO: Refactor all of these into something more manageable.
org = Organization(
id=1,
slug='example',
name='Example Organization',
)
team = Team(
id=1,
slug='example',
name='Example Team',
organization=org,
)
project = Project(
id=1,
slug='example',
name='Example Project',
team=team,
organization=org,
)
rules = {i: Rule(
id=i,
project=project,
label="Rule #%s" % (i,),
) for i in range(1, random.randint(2, 4))}
state = {
'project': project,
'groups': {},
'rules': rules,
'event_counts': {},
'user_counts': {},
}
records = []
event_sequence = itertools.count(1)
group_generator = make_group_generator(random, project)
for i in range(random.randint(1, 30)):
group = next(group_generator)
state['groups'][group.id] = group
offset = timedelta(seconds=0)
for i in range(random.randint(1, 10)):
offset += timedelta(seconds=random.random() * 120)
event = Event(
id=next(event_sequence),
event_id=uuid.uuid4().hex,
project=project,
group=group,
message=group.message,
data=load_data('python'),
datetime=to_datetime(
random.randint(
to_timestamp(group.first_seen),
to_timestamp(group.last_seen),
),
)
)
records.append(
Record(
event.event_id,
Notification(
event,
random.sample(state['rules'], random.randint(1, len(state['rules']))),
),
to_timestamp(event.datetime),
)
)
state['event_counts'][group.id] = random.randint(10, 1e4)
state['user_counts'][group.id] = random.randint(10, 1e4)
digest = build_digest(project, records, state)
start, end, counts = get_digest_metadata(digest)
context = {
'project': project,
'counts': counts,
'digest': digest,
'start': start,
'end': end,
}
add_unsubscribe_link(context)
return MailPreview(
html_template='sentry/emails/digests/body.html',
text_template='sentry/emails/digests/body.txt',
context=context,
).render(request)
@login_required
def report(request):
from sentry.tasks import reports
random = get_random(request)
duration = 60 * 60 * 24 * 7
timestamp = to_timestamp(
reports.floor_to_utc_day(
to_datetime(
random.randint(
to_timestamp(datetime(2015, 6, 1, 0, 0, 0, tzinfo=timezone.utc)),
to_timestamp(datetime(2016, 7, 1, 0, 0, 0, tzinfo=timezone.utc)),
)
)
)
)
start, stop = interval = reports._to_interval(timestamp, duration)
organization = Organization(
id=1,
slug='example',
name='Example',
)
team = Team(
id=1,
slug='example',
name='Example',
organization=organization,
)
projects = []
for i in xrange(0, random.randint(1, 8)):
name = ' '.join(
random.sample(
WORDS,
random.randint(1, 4)
)
)
projects.append(
Project(
id=i,
organization=organization,
team=team,
slug=slugify(name),
name=name,
date_added=start - timedelta(days=random.randint(0, 120)),
)
)
def make_release_generator():
id_sequence = itertools.count(1)
while True:
dt = to_datetime(
random.randint(
timestamp - (30 * 24 * 60 * 60),
timestamp,
),
)
p = random.choice(projects)
yield Release(
id=next(id_sequence),
project=p,
organization_id=p.organization_id,
version=''.join([
random.choice('0123456789abcdef') for _ in range(40)
]),
date_added=dt,
date_started=dt,
)
def build_issue_summaries():
summaries = []
for i in range(3):
summaries.append(
int(random.weibullvariate(10, 1) * random.paretovariate(0.5))
)
return summaries
def build_usage_summary():
return (
int(random.weibullvariate(3, 1) * random.paretovariate(0.2)),
int(random.weibullvariate(5, 1) * random.paretovariate(0.2)),
)
def build_calendar_data(project):
start, stop = reports.get_calendar_query_range(interval, 3)
rollup = 60 * 60 * 24
series = []
weekend = frozenset((5, 6))
value = int(random.weibullvariate(5000, 3))
for timestamp in tsdb.get_optimal_rollup_series(start, stop, rollup)[1]:
damping = random.uniform(0.2, 0.6) if to_datetime(timestamp).weekday in weekend else 1
jitter = random.paretovariate(1.2)
series.append((timestamp, int(value * damping * jitter)))
value = value * random.uniform(0.25, 2)
return reports.clean_calendar_data(
project,
series,
start,
stop,
rollup,
stop
)
def build_report(project):
daily_maximum = random.randint(1000, 10000)
rollup = 60 * 60 * 24
series = [(
timestamp + (i * rollup),
(random.randint(0, daily_maximum), random.randint(0, daily_maximum))
) for i in xrange(0, 7)]
aggregates = [
random.randint(0, daily_maximum * 7) if random.random() < 0.9 else None for _ in xrange(0, 4)
]
return reports.Report(
series,
aggregates,
build_issue_summaries(),
build_usage_summary(),
build_calendar_data(project),
)
if random.random() < 0.85:
personal = {
'resolved': random.randint(0, 100),
'users': int(random.paretovariate(0.2)),
}
else:
personal = {
'resolved': 0,
'users': 0,
}
return MailPreview(
html_template='sentry/emails/reports/body.html',
text_template='sentry/emails/reports/body.txt',
context={
'duration': reports.durations[duration],
'interval': {
'start': reports.date_format(start),
'stop': reports.date_format(stop),
},
'report': reports.to_context(
organization,
interval,
{project: build_report(project) for project in projects}
),
'organization': organization,
'personal': personal,
'user': request.user,
},
).render(request)
@login_required
def request_access(request):
org = Organization(
id=1,
slug='example',
name='Example',
)
team = Team(
id=1,
slug='example',
name='Example',
organization=org,
)
return MailPreview(
html_template='sentry/emails/request-team-access.html',
text_template='sentry/emails/request-team-access.txt',
context={
'email': '[email protected]',
'name': 'George Bush',
'organization': org,
'team': team,
'url': absolute_uri(reverse('sentry-organization-members', kwargs={
'organization_slug': org.slug,
}) + '?ref=access-requests'),
},
).render(request)
@login_required
def invitation(request):
org = Organization(
id=1,
slug='example',
name='Example',
)
om = OrganizationMember(
id=1,
email='[email protected]',
organization=org,
)
return MailPreview(
html_template='sentry/emails/member-invite.html',
text_template='sentry/emails/member-invite.txt',
context={
'email': '[email protected]',
'organization': org,
'url': absolute_uri(reverse('sentry-accept-invite', kwargs={
'member_id': om.id,
'token': om.token,
})),
},
).render(request)
@login_required
def access_approved(request):
org = Organization(
id=1,
slug='example',
name='Example',
)
team = Team(
id=1,
slug='example',
name='Example',
organization=org,
)
return MailPreview(
html_template='sentry/emails/access-approved.html',
text_template='sentry/emails/access-approved.txt',
context={
'email': '[email protected]',
'name': 'George Bush',
'organization': org,
'team': team,
},
).render(request)
@login_required
def confirm_email(request):
email = request.user.emails.first()
email.set_hash()
email.save()
return MailPreview(
html_template='sentry/emails/confirm_email.html',
text_template='sentry/emails/confirm_email.txt',
context={
'confirm_email': '[email protected]',
'user': request.user,
'url': absolute_uri(reverse(
'sentry-account-confirm-email',
args=[request.user.id, email.validation_hash]
)),
'is_new_user': True,
},
).render(request)
@login_required
def recover_account(request):
return MailPreview(
html_template='sentry/emails/recover_account.html',
text_template='sentry/emails/recover_account.txt',
context={
'user': request.user,
'url': absolute_uri(reverse(
'sentry-account-confirm-email',
args=[request.user.id, 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX']
)),
'domain': get_server_hostname(),
},
).render(request)
@login_required
def org_delete_confirm(request):
from sentry.models import AuditLogEntry
org = Organization.get_default()
entry = AuditLogEntry(
organization=org,
actor=request.user,
ip_address=request.META['REMOTE_ADDR'],
)
return MailPreview(
html_template='sentry/emails/org_delete_confirm.html',
text_template='sentry/emails/org_delete_confirm.txt',
context={
'organization': org,
'audit_log_entry': entry,
'eta': timezone.now() + timedelta(days=1),
'url': absolute_uri(reverse(
'sentry-restore-organization',
args=[org.slug],
)),
},
).render(request)
| bsd-3-clause | -3,899,513,468,241,302,000 | 27.298529 | 115 | 0.543938 | false |
sabajt/Dinos-In-Space | endMessage.py | 1 | 25058 | """
EndMessage.py
message / menu that appears upon completion or failure of puzzle
EndMessage.win is where puzzle profile data is modified after completion
"""
import pygame
import dinosInSpace
import static56
import infoGraphic56
import tween
import soundFx56
import dino56
import dataStorage56
import snack
import random
import dinostein
import sparkleTrail
import spriteBasic
OFFSCREEN = (-1000,-1000)
ALPHA = 200
BLACK = (0,0,0)
YELLOW = (255,255, 0)
BLUE = (0,0,255)
GREY = (150,150,150)
WHITE = (255,255,255)
COL_BTEXT = BLACK
COL_MTEXT = WHITE
SIZE_BTEXT = 15
SIZE_MTEXT = 15
BTTN_HEIGHT = 50
BTTN_XBUF = 12
BTTN_YBUF = 8
BTTN_MIDJUST = 3
MSG_TEXT_YREL1 = -65
MSG_TEXT_YREL2 = -30
MSG_TEXT_YREL3 = 5
MSG_TWEENSTART = (1100,300)
MSG_TWEENEND = (400,300)
MSG_TWEENSPD = 60 #45
MSG_TWEENMODE = "EXP"
MSG_TWEENDCLVAL = 0.80 #0.55
MSG_TWEENDCLLEM = 4 #3
SPIRAL_DIRECTION = -1
SPIRAL_ROTATESTEP = 6
SPIRAL_SCALE_STEP = -10
SPIRAL_TERMINATEAFTER = 20
SPARK_SIZE = (6,6)
SPARK_COLOR = BLUE
SPARK_BOUNDS = (20,20)
SPARK_FREQUENCY = 1
SPARK_FADESPEED = 10
LAST_TUTORIAL = "tut7" # used in win to check if last %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% LAST_TUTORIAL %%%%%%%%%%%%%%%%%%%%%%%%%%
def getExlaim():
wordList = [
"Yowza!",
"Check it yo!",
"Wowzies!",
"Yowzies!",
"Look!",
"Jeepers!",
"OoOoOoOo!",
]
return wordList[random.randint(0, len(wordList) - 1)]
class ImgLib(object):
""" image library to load and access local images """
imgDict = None
def __init__(self):
if not ImgLib.imgDict:
_talkbox = dinosInSpace.loadImage("talkBoxBlack.png", "2X", (0,0), ALPHA)
_buttonSize = (_talkbox.get_width()/2 - BTTN_XBUF, BTTN_HEIGHT)
ImgLib.imgDict = {
"CURSORSTD" : dinosInSpace.loadImage("controlCursor.png", "2X", (21,21)),
# "BTTN_0" : dinosInSpace.loadImage("button0.png", "2X", (0,0)),
# "BTTN_1" : dinosInSpace.loadImage("button1.png", "2X", (0,0)),
##"TALKBOX" : dinosInSpace.loadImage("talkBoxBlack.png", "2X", None, ALPHA),
"TALKBOX" : dinosInSpace.loadImage("talkBoxBlack.png", "2X", (0,0), ALPHA),
"BTTN_0" : pygame.Surface(_buttonSize),
"BTTN_1" : pygame.Surface(_buttonSize)
}
ImgLib.imgDict["BTTN_0"].fill(GREY)
ImgLib.imgDict["BTTN_1"].fill(WHITE)
@staticmethod
def getImage(name):
if name in ImgLib.imgDict:
return ImgLib.imgDict[name].copy()
else:
print "image, " + name + " not found"
def initImgLib():
ImgLib()
class BonusDelegate(object):
"""
a simple object to hold snax collected or alt exits taken during puzzle
- data stored here and retrieved by EndMessage if player wins
"""
snax = []
@staticmethod
def wipe():
BonusDelegate.snax = []
@staticmethod
def quickReset():
BonusDelegate.wipe()
class EndMessage(tween.TweenMenu):
"""
message displayed upon failing or completing puzzle
- message is subclass of tween sprite and message frame is image with rect
- creates and controlls features determined by puzzle outcome
"""
me = None
def __init__(self, stateObj, mustSave, profileName, curPuzzle, _fps):
tween.TweenLeader.__init__(self)
EndMessage.me = self
self.IMG = ImgLib.getImage # shortcut to img lib
self.minRatio = [3,1] # quick fix image ratio must be hardcoded
self.game = stateObj
self.screen = self.game.getScreen()
self.image = self.IMG("TALKBOX")
self.original = self.image.copy()
self.spiralSnap = None #image to be fed into sprialOut
self.rect = self.image.get_rect()
self.rect.center = OFFSCREEN
self.centerScreen = (self.screen.get_width()/2, self.screen.get_height()/2)
self.mustSave = mustSave
self.end = False
self.endMessageGroup = pygame.sprite.OrderedUpdates()
self.profileName = profileName
self.curPuzzle = curPuzzle
self.currentDinostein = None
self.currentSteinFrame = None
self._fps = _fps
self.isGoingOut = False
self.isGoingOutFrameCount = 0
self.terminate = False
self.isActive = False # activate after screenshot for state change
self.firstCycle = True # state change bug
self.speed = MSG_TWEENSPD # 45
self.dclval = MSG_TWEENDCLVAL # 0.55
self.dcllim = MSG_TWEENDCLLEM # 3
if self._fps == 30:
self.speed *= 2
self.dclval = .60
self.dcllim = 2
self.endMessageGroup.add(self)
def update(self):
self.checkEndCondition()
if self.isActive:
if not self.firstCycle:
tween.TweenMenu.update(self)
if self.isGoingOut:
self.isGoingOutFrameCount += 1
rotateStep = SPIRAL_ROTATESTEP
scaleStep = SPIRAL_SCALE_STEP
termAfter = SPIRAL_TERMINATEAFTER
if self._fps == 30:
rotateStep *= 2
scaleStep *= 2
termAfter /= 2
#spr, directionAsInt, rotateStep, scaleStep, terminateAfter, frameCount, minRatio, ORIGINAL
self.terminate = spiralOut(
self,
SPIRAL_DIRECTION,
rotateStep,
scaleStep,
termAfter,
self.isGoingOutFrameCount,
self.minRatio,
self.spiralSnap
)
else:
self.firstCycle = False
def updateEnding(self):
""" for state change identification purpose """
pass
@staticmethod
def wipe():
EndMessage.me = None
def checkEndCondition(self):
if not self.end:
if static56.Goal.getSaved() >= self.mustSave:
self.win()
if dino56.Dino.getLost() > 0: ## instead return coord and pic?
self.lose()
def generateMessage(self, didWin, data=None, newSnax=None):
line1 = ""
line2 = ""
line3 = ""
if not didWin:
line1 = ""
line2 = "Dino Down, try again!"
line3 = ""
elif data == "_user":
line1 = ""
line2 = "Good work, you got all dinos to a station"
elif data == "TUTORIAL":
line1 = "Good job"
else:
assert(data)
if not newSnax:
line1 = "Good work, you got all dinos to a Station."
else:
line1 = "you got all dinos to a station and " + str(len(newSnax)) + " new snax!"
snax = data[4]
snaxLeft = 0
itLooks_theyLook = "it looks"
theres_thereAre = "there's"
if snax:
for s in snax:
if s == 0:
snaxComp = False
snaxLeft += 1
if snaxLeft > 1:
theres_thereAre = "there are"
itLooks_theyLook = "they look"
if snaxLeft:
line2 = "It appears " + theres_thereAre + " " + str(snaxLeft) + " snax still in the area..."
line3 = "and " + itLooks_theyLook + " REALLY tasty!"
else:
line2 = ""
return line1, line2, line3
# # puzzle name : [file name, locked, complete, difficulty, snacks collected, secret exit found]
# #
# # -0 (string) _file name_ : passed as 'dest' to map selector (level)
# # -1 (bool) _locked_ : controlls player access / preview
# # -2 (bool) _complete_ : displays if complete, adds to global profile completed count
# # -3 (int) _difficulty_ : displays difficulty level
# # -4 (list) _snacks_ : displays how many snacks collected as fraction, pass 'None' if n/a
@staticmethod
def checkButtonPressed():
dest = None
for s in EndMessage.me.endMessageGroup:
if s.__class__ == EndMessageButton:
dest = s.requestDest()
if dest:
break
return dest
def bind(self, followers):
for f in followers:
self.endMessageGroup.add(f)
self.addFollower(f)
def lose(self):
endDinoImage, endDinoCenter = dino56.Dino.getLastDinoImageAndCenter()
## soundFx56.SoundPlayer.requestSound("lose")
line1, line2, line3 = self.generateMessage(False)
# init features from data and register for access
if self.curPuzzle[:3] != "tut" or self.profileName == "_user":
self.retryButton = EndMessageButton(
(-self.rect.width/4 + BTTN_MIDJUST, self.rect.height/2 - self.IMG("BTTN_0").get_height()/2 - BTTN_YBUF),
self.IMG("BTTN_0"),
self.IMG("BTTN_1"),
"Retry (any key)",
SIZE_BTEXT,
COL_BTEXT,
"QR"
)
self.exitButton = EndMessageButton(
(self.rect.width/4 - BTTN_MIDJUST, self.rect.height/2 - self.IMG("BTTN_0").get_height()/2 - BTTN_YBUF),
self.IMG("BTTN_0"),
self.IMG("BTTN_1"),
"choose another puzzle",
SIZE_BTEXT,
COL_BTEXT,
"EXIT"
)
else:
self.retryButton = EndMessageButton(
(0, self.rect.height/2 - self.IMG("BTTN_0").get_height()/2 - BTTN_YBUF),
self.IMG("BTTN_0"),
self.IMG("BTTN_1"),
"Retry",
SIZE_BTEXT,
COL_BTEXT,
"QR"
)
##(170,30), self.IMG("BTTN_0"), self.IMG("BTTN_1"), "Leave Area", SIZE_BTEXT, COL_BTEXT, "EXIT")
self.text1 = EndMessageText((0, MSG_TEXT_YREL1), line1, SIZE_MTEXT, COL_MTEXT)
self.text2 = EndMessageText((0, MSG_TEXT_YREL2), line2, SIZE_MTEXT, COL_MTEXT)
self.text3 = EndMessageText((0, MSG_TEXT_YREL3), line3, SIZE_MTEXT, COL_MTEXT)
# dinostein ***
self.currentDinostein = dinostein.Dinostein(self._fps)
self.currentSteinFrame = dinostein.Frame(self._fps)
self.currentDinostein.addFollower(self.currentSteinFrame)
# *************
cursor = EndMessageCursor([self.IMG("CURSORSTD")]) # true for withTrail - winning condition only
if self.curPuzzle[:3] != "tut" or self.profileName == "_user":
self.bind([self.retryButton, self.exitButton, self.text1, self.text2, self.text3])
else:
self.bind([self.retryButton, self.text1, self.text2, self.text3])
self.endMessageGroup.add(self.currentSteinFrame)
self.endMessageGroup.add(self.currentDinostein)
self.endMessageGroup.add(cursor)
#self.setTween((1000,300), (400,300), 35, "EXP", 0.5, 3) # sp, ep, speed, dclMode, dclVal, dclLim
self.setTween(MSG_TWEENSTART, MSG_TWEENEND, self.speed, MSG_TWEENMODE, self.dclval, self.dcllim)
self.currentDinostein.setAndStartTween()
self.startTween()
self.end = True
self.game.setLastDinoDown(endDinoImage, endDinoCenter)
self.game.setIsEnding()
def win(self):
soundFx56.SoundPlayer.requestSound("win")
snax = BonusDelegate.snax
if self.profileName != "_user":
### case for tuts
if self.curPuzzle[:3] == "tut":
self.game.wonTutorialStage = True # tell game instance so level56 can access for returning next stage
if self.curPuzzle == LAST_TUTORIAL:
dataStorage56.modProfile(self.profileName, "tutorial", True)
puzzleData = "TUTORIAL"
else:
dataStorage56.modProfile(self.profileName, self.curPuzzle, True, snax) # modify file & and add snack to archive
puzzleData = dataStorage56.getPuzzleData(self.profileName, self.curPuzzle)
if self.curPuzzle == "gateway":
self.game.wonLastStage = True # tell game instance so level56 can access for returning flag for ending scene
else:
puzzleData = "_user"
dataStorage56.logUserMapsComplete(self.curPuzzle)
if snax:
for s in snax:
s.unregister()
line1, line2, line3 = self.generateMessage(True, puzzleData, snax)
if self.curPuzzle[:3] == "tut" and self.profileName != "_user":
# self.retryButton = EndMessageButton((-170,30), self.IMG("BTTN_0"), self.IMG("BTTN_1"), "next", SIZE_BTEXT, COL_BTEXT, "NEXT")
self.retryButton = EndMessageButton(
(0, self.rect.height/2 - self.IMG("BTTN_0").get_height()/2 - BTTN_YBUF),
self.IMG("BTTN_0"),
self.IMG("BTTN_1"),
">",
SIZE_BTEXT,
COL_BTEXT,
"NEXT"
)
line1 = ""; line2 = "Good work, lets move on"; line3 = ""
if self.curPuzzle == LAST_TUTORIAL:
line1 = "Alright, that's it for the training,"
line2 = "you're ready for the real puzzles!"
elif self.curPuzzle == "gateway" and self.profileName != "_user":
self.exitButton = EndMessageButton(
(0, self.rect.height/2 - self.IMG("BTTN_0").get_height()/2 - BTTN_YBUF),
self.IMG("BTTN_0"),
self.IMG("BTTN_1"),
"!",
SIZE_BTEXT,
COL_BTEXT,
"EXIT"
)
line1 = "i'm so happy i could cry..."
line2 = "excellent work, you got "
line3 = "all dinos to a station!"
else:
self.retryButton = EndMessageButton(
(-self.rect.width/4 + BTTN_MIDJUST, self.rect.height/2 - self.IMG("BTTN_0").get_height()/2 - BTTN_YBUF),
self.IMG("BTTN_0"),
self.IMG("BTTN_1"),
"Retry (any key)",
SIZE_BTEXT,
COL_BTEXT,
"QR"
)
self.exitButton = EndMessageButton(
(self.rect.width/4 - BTTN_MIDJUST, self.rect.height/2 - self.IMG("BTTN_0").get_height()/2 - BTTN_YBUF),
self.IMG("BTTN_0"),
self.IMG("BTTN_1"),
"choose another puzzle",
SIZE_BTEXT,
COL_BTEXT,
"EXIT"
)
# self.retryButton = EndMessageButton((-170,30), self.IMG("BTTN_0"), self.IMG("BTTN_1"), "Quick Reset", SIZE_BTEXT, COL_BTEXT, "QR")
# self.exitButton = EndMessageButton((170,30), self.IMG("BTTN_0"), self.IMG("BTTN_1"), "Leave Area", SIZE_BTEXT, COL_BTEXT, "EXIT")
self.text1 = EndMessageText((0, MSG_TEXT_YREL1), line1, SIZE_MTEXT, COL_MTEXT)
self.text2 = EndMessageText((0, MSG_TEXT_YREL2), line2, SIZE_MTEXT, COL_MTEXT)
self.text3 = EndMessageText((0, MSG_TEXT_YREL3), line3, SIZE_MTEXT, COL_MTEXT)
cursor = EndMessageCursor([self.IMG("CURSORSTD")], True)
# dinostein ***
self.currentDinostein = dinostein.Dinostein(self._fps)
self.currentSteinFrame = dinostein.Frame(self._fps)
self.currentDinostein.addFollower(self.currentSteinFrame)
# *************
if self.curPuzzle[:3] == "tut" and self.profileName != "_user":
self.bind([self.retryButton, self.text1, self.text2, self.text3])
elif self.curPuzzle == "gateway" and self.profileName != "_user":
self.bind([self.exitButton, self.text1, self.text2, self.text3])
else:
self.bind([self.retryButton, self.exitButton, self.text1, self.text2, self.text3])
self.endMessageGroup.add(self.currentSteinFrame)
self.endMessageGroup.add(self.currentDinostein)
self.endMessageGroup.add(cursor)
self.setTween(MSG_TWEENSTART, MSG_TWEENEND, self.speed, MSG_TWEENMODE, self.dclval, self.dcllim)
self.currentDinostein.setAndStartTween()
self.startTween()
self.end = True
self.game.setIsEnding()
def blitMinions(self):
topleft = self.rect.topleft
bbtopleft = self.retryButton.rect.topleft
if self.curPuzzle[:3] != "tut" or self.profileName == "_user":
fbtopleft = self.exitButton.rect.topleft
m1topleft = self.text1.rect.topleft
m2topleft = self.text2.rect.topleft
m3topleft = self.text3.rect.topleft
bbBlitX = bbtopleft[0] - topleft[0]
bbBlitY = bbtopleft[1] - topleft[1]
if self.curPuzzle[:3] != "tut" or self.profileName == "_user":
fbBlitX = fbtopleft[0] - topleft[0]
fbBlitY = fbtopleft[1] - topleft[1]
m1BlitX = m1topleft[0] - topleft[0]
m1BlitY = m1topleft[1] - topleft[1]
m2BlitX = m2topleft[0] - topleft[0]
m2BlitY = m2topleft[1] - topleft[1]
m3BlitX = m3topleft[0] - topleft[0]
m3BlitY = m3topleft[1] - topleft[1]
self.spiralSnap = self.original.copy()
self.spiralSnap.blit(self.retryButton.image, (bbBlitX, bbBlitY))
if self.curPuzzle[:3] != "tut" or self.profileName == "_user":
self.spiralSnap.blit(self.exitButton.image, (fbBlitX, fbBlitY))
self.spiralSnap.blit(self.text1.image, (m1BlitX, m1BlitY))
self.spiralSnap.blit(self.text2.image, (m2BlitX, m2BlitY))
self.spiralSnap.blit(self.text3.image, (m3BlitX, m3BlitY))
self.hideRealMinions()
def hideRealMinions(self):
self.retryButton.rect.center = (2000,2000)
if self.curPuzzle[:3] != "tut" or self.profileName == "_user":
self.exitButton.rect.center = (2000,2000)
self.text1.rect.center = (2000,2000)
self.text2.rect.center = (2000,2000)
self.text3.rect.center = (2000,2000)
@staticmethod
def setIsGoingOut(isGoingOut):
if isGoingOut and not EndMessage.me.isGoingOut:
soundFx56.SoundPlayer.requestSound("woosh_a")
EndMessage.me.isGoingOutFrameCount = 0
EndMessage.me.isGoingOut = isGoingOut
EndMessage.me.blitMinions()
# dinostein
if EndMessage.me.currentDinostein:
EndMessage.me.currentDinostein.kill()
EndMessage.me.currentSteinFrame.closing = isGoingOut
@staticmethod
def quickReset():
if EndMessage.me.currentDinostein:
EndMessage.me.currentDinostein.kill()
if EndMessage.me.currentSteinFrame:
EndMessage.me.currentSteinFrame.kill()
if EndMessageCursor.me:
EndMessageCursor.me.kill()
EndMessageCursor.wipe()
EndMessage.me.reset()
EndMessage.me.image = EndMessage.me.original
EndMessage.me.rect = EndMessage.me.image.get_rect()
EndMessage.me.rect.center = OFFSCREEN
EndMessage.me.end = False
EndMessage.me.isGoingOut = False
EndMessage.me.isGoingOutFrameCount = 0
EndMessage.me.terminate = False
class EndMessageText(tween.TweenFollower):
def __init__(self, relPos, text, fontSize, fontColor):
tween.TweenFollower.__init__(self, relPos)
self.image = infoGraphic56.TextObject(text, fontSize, fontColor).image
self.rect = self.image.get_rect()
def updateEnding(self):
pass
class EndMessageButton(tween.TweenFollower):
"""
Button that belongs to EndMessage
- is a TweenFollower *** don't override moveFollower()
- returns dest when clicked
- dest can be restart, quick restart, continue (study solution), back (select screen)
"""
def __init__(self, relPos, imageOff, imageOver, text, textSize, textColor, dest):
tween.TweenFollower.__init__(self, relPos)
self.imageOff = self.makeButton(imageOff, text, textSize, textColor)
self.imageOver = self.makeButton(imageOver, text, textSize, textColor)
self.image = self.imageOff
self.rect = self.image.get_rect()
self.dest = dest
self.mouseOver = False
def update(self):
tween.TweenFollower.update(self)
self.checkCursorOver()
def updateEnding(self):
pass
def makeButton(self, image, text, textSize, textColor):
textSurf = infoGraphic56.TextObject(text, textSize, textColor).image
xBlit = (image.get_width() - textSurf.get_width())/2
yBlit = (image.get_height() - textSurf.get_height())/2
image.blit(textSurf, (xBlit, yBlit))
return image
def checkCursorOver(self):
""" if cursor over button set respective image and mouseOver """
if pygame.sprite.collide_rect(self, EndMessageCursor.me):
self.image = self.imageOver
self.mouseOver = True
else:
self.image = self.imageOff
self.mouseOver = False
def requestDest(self):
""" returns dest if mouseOver """
dest = None
if self.mouseOver:
dest = self.dest
return dest
class EndMessageCursor(pygame.sprite.Sprite):
""" cursor used during end message """
me = None
def __init__(self, frames, withTrail=False):
pygame.sprite.Sprite.__init__(self)
if len(frames) > 1:
self.hasFrames = True
self.setFrames(frames)
self.currentFrame = 0
else:
self.hasFrames = False
self.image = frames[0]
self.currentFrame = None
self.rect = pygame.rect.Rect((0,0,1,1))
self.rect.center = (-2000,2000)
self.isActive = False
self.firstCycle = True
# create sparkle trail #
########################
self.withTrail = withTrail
self.trail = None
if self.withTrail:
self.image.set_alpha(0, pygame.RLEACCEL)
self.trail = sparkleTrail.SparkleTrail(SPARK_SIZE, SPARK_COLOR, SPARK_BOUNDS, SPARK_FREQUENCY, SPARK_FADESPEED, self)
########################
########################
EndMessageCursor.me = self
def update(self):
if self.isActive:
if not self.firstCycle:
self.rect.center = pygame.mouse.get_pos()
if self.hasFrames:
self.stepFrames()
else:
self.firstCycle = False
def updateEnding(self):
pass
def getTrailGroup(self):
return self.trail.myGroup
def stepFrames():
self.image = self.frames[self.currentFrame]
self.currentFrame += 1
if self.currentFrame >= len(self.frames):
self.currentFrame = 0
def setFrames(self, frames):
self.frames = frames
@staticmethod
def wipe():
EndMessageCursor.me = None
# quick fix: copied from gfx56 because of circular import
def spiralOut(spr, directionAsInt, rotateStep, scaleStep, terminateAfter, frameCount, minRatio, ORIGINAL):
""" update callback for a sprite to 'spiral out' of view in place using a set image
returns false if spiral hasn't terminated
- directionAsInt -> rotate direction: -1 for right, 1 for left
- rotateStep -> degrees to rotate every frame
- scaleStep -> degrees to scale every frame (takes positive or negative)
- terminateAfter -> returns image as None after this many frames
- frameCount -> expects an iterable count from calling environment: should inc by 1 ever call
- ORIGINAL -> should be a constant of the pre-rotated image
"""
terminate = True
if frameCount <= terminateAfter:
center = spr.rect.center
newImg = pygame.transform.scale(
ORIGINAL,
(ORIGINAL.get_width() + scaleStep*minRatio[0]*frameCount, ORIGINAL.get_height() + scaleStep*minRatio[1]*frameCount)
)
spr.image = pygame.transform.rotate(newImg, directionAsInt*rotateStep*frameCount)
spr.rect = spr.image.get_rect()
spr.rect.center = center
terminate = False
return terminate
def wipe():
EndMessage.wipe()
EndMessageCursor.wipe()
BonusDelegate.wipe()
| mit | -114,837,720,802,108,850 | 33.997207 | 143 | 0.559901 | false |
DMSalesman/Nemris | nemris.py | 1 | 6514 | """Core of the Nemris tool, APK extractor."""
import argparse
import os
# Custom modules necessary for Nemris to work
from modules import apkutils
from modules import configutils
from modules import dirutils
from modules import pkgutils
from modules import utils
##########
# Path of the configuratipn file and default configuration dict
config_path = os.path.dirname(os.path.abspath(__file__)) + "/nemris_config.pkl"
config = {
"aapt": "",
"nougat": None,
"dir": "",
"substratum": None,
"md5sums": []
}
##########
# Commandline args handling
ap = argparse.ArgumentParser(description = "APK file extractor.")
apps = ap.add_mutually_exclusive_group(required = True)
apps.add_argument("-u", "--user", action = "store_true", help = "user apps")
apps.add_argument("-s", "--system", action = "store_true", help = "system apps")
apps.add_argument("-d", "--disabled", action = "store_true", help = "disabled apps")
apps.add_argument("-a", "--all", action = "store_true", help = "any app")
ap.add_argument("-r", "--reset", action = "store_true", required = False, help = "reset Nemris' configuration")
ap.add_argument("--keep-overlays", action = "store_true", required = False, help = "extract Substratum overlays")
ap.add_argument("--keep-arcus", action = "store_true", required = False, help = "extract theme variants compiled with Arcus")
args = ap.parse_args()
##########
if not args.user:
if not args.all:
if args.keep_overlays or args.keep_arcus:
ap.error("one of the arguments -u/--user -a/--all is required when using --keep-overlays or --keep-arcus")
print("************************")
print(" NEMRIS - APK extractor ")
print(" 2017-09-25 ")
print(" by Death Mask Salesman ")
print("************************")
start_time = utils.get_current_time() # store current time for computing elapsed time
if args.reset:
print("[ I ] Resetting configuration...", end = " ", flush = True)
if configutils.reset_config(config_path):
print("done.\n")
else:
print("done.\n[ W ] The configuration was not present.\n")
else:
if configutils.check_config(config_path):
print("[ I ] Loading configuration...", end = " ", flush = True)
config = configutils.load_config(config_path)
print("done.\n")
# Checks for aapt and aopt (as fallback on Nougat)
if not config.get("aapt"):
print("[ I ] Checking if either aapt or aopt is present...", end = " ", flush = True)
aapt_aopt_exist = utils.check_aapt_aopt()
print("done.\n")
if aapt_aopt_exist[0]:
config["aapt"] = "/system/bin/aapt"
elif aapt_aopt_exist[1]:
config["aapt"] = "/system/bin/aopt"
elif aapt_aopt_exist[2]:
config["aapt"] = "/data/data/com.termux/files/usr/bin/aapt"
else:
print("[ F ] Neither aapt nor aopt is installed. Aborting.")
utils.save_exit(config, config_path, 1)
# Checks if the Android version is Nougat
if config.get("nougat") == None:
print("[ I ] Checking the Android version...", end = " ")
config["nougat"] = utils.check_nougat()
print("done.\n")
# Prompts user to set target dir
if not config.get("dir"):
config["dir"] = dirutils.ask_dir()
print()
(dir_exists, dir_has_apks) = dirutils.check_dir(config.get("dir"))
if not dir_exists:
print("[ I ] Creating \"{0}\"...".format(config.get("dir")), end = " ", flush = True)
dir_exists = dirutils.create_dir(config.get("dir"))
print("done.\n")
if not dir_exists:
print("[ F ] Unable to create \"{0}\". Aborting.".format(config.get("dir")))
utils.save_exit(config, config_path, 1)
# Creates a MD5 list to speed up subsequent executions
if not config.get("md5sums"):
if dir_has_apks:
print("[ I ] Generating MD5 checksums...", end = " ", flush = True)
config["md5sums"] = dirutils.calculate_md5(config.get("dir"))
print("done.\n")
# Creates an optimized APK/path dictionary to avoid the sluggish "pm path"
print("[ I ] Creating paths dictionary...", end = " ", flush = True)
pkgdict = pkgutils.create_pkgdict()
print("done.\n")
if not pkgdict:
print("[ F ] Unable to create paths dictionary. Aborting.")
utils.save_exit(config, config_path, 1)
if config.get("nougat") == True:
pkgs = pkgutils.list_installed_pkgs_nougat(args)
if not pkgs: config["nougat"] == False
if config.get("nougat") == False:
pkgs = pkgutils.list_installed_pkgs(args)
if not args.keep_overlays:
if config.get("substratum") == None:
config["substratum"] = pkgutils.check_substratum(config.get("nougat"))
if config.get("substratum"):
print("[ I ] Excluding Substratum overlays...", end = " ", flush = True)
pkgutils.exclude_overlays(config.get("aapt"), pkgdict, pkgs)
print("done.\n")
if not args.keep_arcus and not config.get("substratum"):
print("[ I ] Excluding Arcus theme variants...", end = " ", flush = True)
pkgutils.exclude_arcus_variants(pkgs)
print("done.\n")
# Extract APKs to the target directory and append MD5 checksums to MD5 list
print("[ I ] Extracting previously unextracted packages...", end = " ", flush = True)
n_extracted = 0
n_ignored = 0
extracted = []
for i in pkgs:
pkgpath = pkgdict.get(i)
(already_extracted, pkgsum) = pkgutils.check_already_extracted(pkgpath, config.get("md5sums"))
if already_extracted:
n_ignored += 1
else:
(out, err) = apkutils.get_pkginfo(config.get("aapt"), pkgpath)
pkginfo = out.decode("utf-8")
pkgname = apkutils.get_pkgname(pkginfo)
pkgver = apkutils.get_pkgver(pkginfo)
dest = "{0}/{1}_{2}.apk".format(config.get("dir"), pkgname, pkgver)
dirutils.extract(pkgpath, dest)
config["md5sums"].append(pkgsum)
extracted.append(pkgname)
n_extracted += 1
print("done.")
elapsed_time = utils.get_current_time() - start_time
extracted.sort()
print("\n[ I ] Operations completed in {0:.0f} hours, {1:.0f} minutes and {2:.0f} seconds.".format(elapsed_time / 60 / 60, elapsed_time / 60 % 60, elapsed_time % 60))
if extracted:
print("\n[ I ] Extracted packages:")
for i in extracted:
print(" - {0}".format(i))
print("\n[ I ] Extracted: {0} | Ignored: {1}".format(n_extracted, n_ignored))
print("[ I ] Goodbye!")
utils.save_exit(config, config_path, 0)
| unlicense | -1,698,508,673,163,878,700 | 30.317308 | 166 | 0.618514 | false |
Chiheb-Nexus/Calculatrice-PyGtk | pango_fonts.py | 1 | 3186 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Calculatrice PyGtk
#
# Copyright 2014 Chiheb Nexus
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
################################################################################
from gi.repository import Gtk,Pango
class PyApp(Gtk.Window):
def __init__(self,widget):
Gtk.Window.__init__(self,title="Choisir un font")
self.set_resizable(False) # Fenêtre à taille fixe
#self.set_size_request(350, 250)
self.set_border_width(8)
self.set_icon_from_file("images/icon.png")
context = self.create_pango_context()
self.fam = context.list_families()
self.combo = Gtk.ComboBoxText() # Un ComboBoxText qui contient les fonts
self.size = Gtk.ComboBoxText() # UnComboBoxText qui contient les tailles
label_font = Gtk.Label("Veuillez choisir un font")
label_size = Gtk.Label("Veuillez choisir la taille")
label_default = Gtk.Label("Font par défaut : Ubuntu | Taille par défaut : 17")
for ff in self.fam:
self.combo.append_text(ff.get_name()) # Générer les fonts et les ajouter au ComboBoxText
for ss in range(31) :
self.size.append_text(str(ss)) # Générer les tailles de 1 à 30 et les ajouter au ComboBoxText
button = Gtk.Button("Valider")
button2 = Gtk.Button("Annuler")
button2.connect("clicked",self.annuler)
button.connect("clicked",self.get_status)
vbox = Gtk.VBox()
hbox = Gtk.HBox()
vbox.pack_start(label_font,False,False,0)
vbox.pack_start(self.combo,False,False,0)
vbox.pack_start(label_size,False,False,0)
vbox.pack_start(self.size,False,False,0)
vbox.pack_start(label_default,False,False,0)
hbox.pack_start(button2,True,True,0)
hbox.pack_start(button,True,True,0)
vbox.pack_end(hbox,True,False,0)
self.add(vbox)
self.set_position(Gtk.WindowPosition.CENTER)
self.show_all()
def get_status(self,widget) :
"Font et taille choisies"
#PyApp.font et PyApp.taille deux variables
# qui peuvent être utilisés par les autres classes
PyApp.font = self.combo.get_active_text()
PyApp.taille = self.size.get_active_text()
self.destroy() # Détruire la fenêtre
def annuler(self,widget) :
"Annuler la saisie du font et de la taille"
self.destroy()
| gpl-3.0 | 2,591,903,167,614,648,300 | 35.895349 | 106 | 0.624645 | false |
pavel-paulau/perfrunner | perfrunner/tests/dcp.py | 1 | 1670 | from perfrunner.helpers import local
from perfrunner.helpers.cbmonitor import timeit, with_stats
from perfrunner.tests import PerfTest
class DCPThroughputTest(PerfTest):
def _report_kpi(self, time_elapsed: float):
self.reporter.post(
*self.metrics.dcp_throughput(time_elapsed)
)
@with_stats
@timeit
def access(self, *args):
username, password = self.cluster_spec.rest_credentials
for target in self.target_iterator:
local.run_dcptest(
host=target.node,
username=username,
password=password,
bucket=target.bucket,
num_items=self.test_config.load_settings.items,
num_connections=self.test_config.dcp_settings.num_connections
)
def run(self):
self.load()
self.wait_for_persistence()
time_elapsed = self.access()
self.report_kpi(time_elapsed)
class JavaDCPThroughputTest(DCPThroughputTest):
def init_java_dcp_client(self):
local.clone_git_repo(repo=self.test_config.java_dcp_settings.repo,
branch=self.test_config.java_dcp_settings.branch)
local.build_java_dcp_client()
@with_stats
@timeit
def access(self, *args):
for target in self.target_iterator:
local.run_java_dcp_client(
connection_string=target.connection_string,
messages=self.test_config.load_settings.items,
config_file=self.test_config.java_dcp_settings.config,
)
def run(self):
self.init_java_dcp_client()
super().run()
| apache-2.0 | -4,941,375,111,631,714,000 | 28.298246 | 78 | 0.610778 | false |
ToonTownInfiniteRepo/ToontownInfinite | toontown/suit/DistributedSuitBaseAI.py | 1 | 4836 | from otp.ai.AIBaseGlobal import *
from otp.avatar import DistributedAvatarAI
import SuitPlannerBase
import SuitBase
import SuitDNA
from direct.directnotify import DirectNotifyGlobal
from toontown.battle import SuitBattleGlobals
class DistributedSuitBaseAI(DistributedAvatarAI.DistributedAvatarAI, SuitBase.SuitBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedSuitBaseAI')
def __init__(self, air, suitPlanner):
DistributedAvatarAI.DistributedAvatarAI.__init__(self, air)
SuitBase.SuitBase.__init__(self)
self.sp = suitPlanner
self.maxHP = 10
self.currHP = 10
self.zoneId = 0
self.dna = SuitDNA.SuitDNA()
self.virtual = 0
self.skeleRevives = 0
self.maxSkeleRevives = 0
self.reviveFlag = 0
self.buildingHeight = None
return
def generate(self):
DistributedAvatarAI.DistributedAvatarAI.generate(self)
def delete(self):
self.sp = None
del self.dna
DistributedAvatarAI.DistributedAvatarAI.delete(self)
return
def requestRemoval(self):
if self.sp != None:
self.sp.removeSuit(self)
else:
self.requestDelete()
return
def setLevel(self, lvl = None):
attributes = SuitBattleGlobals.SuitAttributes[self.dna.name]
if lvl:
self.level = lvl - attributes['level'] - 1
else:
self.level = SuitBattleGlobals.pickFromFreqList(attributes['freq'])
self.notify.debug('Assigning level ' + str(lvl))
if hasattr(self, 'doId'):
self.d_setLevelDist(self.level)
hp = attributes['hp'][self.level]
self.maxHP = hp
self.currHP = hp
def getLevelDist(self):
return self.getLevel()
def d_setLevelDist(self, level):
self.sendUpdate('setLevelDist', [level])
def setupSuitDNA(self, level, type, track):
dna = SuitDNA.SuitDNA()
dna.newSuitRandom(type, track)
self.dna = dna
self.track = track
self.setLevel(level)
return None
def getDNAString(self):
if self.dna:
return self.dna.makeNetString()
else:
self.notify.debug('No dna has been created for suit %d!' % self.getDoId())
return ''
def b_setBrushOff(self, index):
self.setBrushOff(index)
self.d_setBrushOff(index)
return None
def d_setBrushOff(self, index):
self.sendUpdate('setBrushOff', [index])
def setBrushOff(self, index):
pass
def d_denyBattle(self, toonId):
self.sendUpdateToAvatarId(toonId, 'denyBattle', [])
def b_setSkeleRevives(self, num):
if num == None:
num = 0
self.setSkeleRevives(num)
self.d_setSkeleRevives(self.getSkeleRevives())
return
def d_setSkeleRevives(self, num):
self.sendUpdate('setSkeleRevives', [num])
def getSkeleRevives(self):
return self.skeleRevives
def setSkeleRevives(self, num):
if num == None:
num = 0
self.skeleRevives = num
if num > self.maxSkeleRevives:
self.maxSkeleRevives = num
return
def getMaxSkeleRevives(self):
return self.maxSkeleRevives
def useSkeleRevive(self):
self.skeleRevives -= 1
self.currHP = self.maxHP
self.reviveFlag = 1
def reviveCheckAndClear(self):
returnValue = 0
if self.reviveFlag == 1:
returnValue = 1
self.reviveFlag = 0
return returnValue
def getHP(self):
return self.currHP
def setHP(self, hp):
if hp > self.maxHP:
self.currHP = self.maxHP
else:
self.currHP = hp
return None
def b_setHP(self, hp):
self.setHP(hp)
self.d_setHP(hp)
def d_setHP(self, hp):
self.sendUpdate('setHP', [hp])
def releaseControl(self):
return None
def getDeathEvent(self):
return 'cogDead-%s' % self.doId
def resume(self):
self.notify.debug('resume, hp=%s' % self.currHP)
if self.currHP <= 0:
messenger.send(self.getDeathEvent())
self.requestRemoval()
return None
def prepareToJoinBattle(self):
pass
def b_setSkelecog(self, flag):
self.setSkelecog(flag)
self.d_setSkelecog(flag)
def setSkelecog(self, flag):
SuitBase.SuitBase.setSkelecog(self, flag)
def d_setSkelecog(self, flag):
self.sendUpdate('setSkelecog', [flag])
def isForeman(self):
return 0
def isSupervisor(self):
return 0
def setVirtual(self, virtual):
pass
def getVirtual(self):
return 0
def isVirtual(self):
return self.getVirtual() | mit | -5,873,388,987,407,674,000 | 25.431694 | 88 | 0.605873 | false |
JMoravec/unkRadnet | fitToCurve/pyeq2/Models_3D/Trigonometric.py | 1 | 38712 | # pyeq2 is a collection of equations expressed as Python classes
#
# Copyright (C) 2012 James R. Phillips
# 2548 Vera Cruz Drive
# Birmingham, AL 35235 USA
#
# email: [email protected]
# web: http://zunzun.com
#
# License: BSD-style (see LICENSE.txt in main source directory)
# Version info: $Id: Trigonometric.py 1 2012-01-07 22:20:43Z [email protected] $
import sys, os
if os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..') not in sys.path:
sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..'))
import pyeq2
import numpy
numpy.seterr(over = 'raise', divide = 'raise', invalid = 'raise', under = 'ignore') # numpy raises warnings, convert to exceptions to trap them
import pyeq2.Model_3D_BaseClass
class CoshA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Cosh A [radians]"
_HTML = 'z = a * cosh(x) + b * cosh(y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.CoshX(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.CoshY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
CoshX = inDataCacheDictionary['CoshX'] # only need to perform this dictionary look-up once
CoshY = inDataCacheDictionary['CoshY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * CoshX + b * CoshY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * cosh(x_in) + b * cosh(y_in);\n"
return s
class CoshA_Transform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Cosh A [radians] Transform"
_HTML = 'z = a * cosh(bx+c) + d * cosh(fy+g)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
try:
temp = a * numpy.cosh(b * x_in + c) + d * numpy.cosh(f * y_in + g)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * cosh(b * x_in + c) + d * cosh(f * y_in + g);\n"
return s
class CoshB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Cosh B [radians]"
_HTML = 'z = a * cosh(x) * cosh(y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.CoshXCoshY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
CoshXCoshY = inDataCacheDictionary['CoshXCoshY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = a * CoshXCoshY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * cosh(x_in) * cosh(y_in);\n"
return s
class CoshB_Transform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Cosh B [radians] Transform"
_HTML = 'z = a * cosh(bx+c) * cosh(dy+f)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = a * numpy.cosh(b * x_in + c) * numpy.cosh(d * y_in + f)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * cosh(b * x_in + c) * cosh(f * y_in + g);\n"
return s
class CoshXY(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Cosh XY [radians]"
_HTML = 'z = a * cosh(xy)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.CoshXY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
CoshXY = inDataCacheDictionary['CoshXY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = a * CoshXY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * cosh(x_in * y_in);\n"
return s
class CoshXYTransform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Cosh XY [radians] Transform"
_HTML = 'z = a * cosh(b * xy + c)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.XY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
XY = inDataCacheDictionary['XY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * numpy.cosh(b * XY + c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * cosh(b * x_in * y_in + c);\n"
return s
class RezaCustomOne(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Reza's Custom Equation One [radians]"
_HTML = 'z = (cos(a*x - b*y) + sin(c*x - d*y))<sup>n</sup> - (cos(f*x - g*y) + sin(h*x- i*y))<sup>n</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g', 'h', 'i', 'n']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
h = inCoeffs[6]
i = inCoeffs[7]
n = inCoeffs[8]
try:
temp = numpy.power(numpy.cos(a*x_in - b*y_in) + numpy.sin(c*x_in - d*y_in), n)
temp -= numpy.power(numpy.cos(f*x_in - g*y_in) + numpy.sin(h*x_in - i*y_in), n)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow(cos(a*x_in - b*y_in) + sin(c*x_in - d*y_in), n);\n"
s += "\ttemp -= pow(cos(f*x_in - g*y_in) + sin(h*x_in - i*y_in), n);\n"
return s
class RezaCustomTwo(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Reza's Custom Equation Two [radians]"
_HTML = 'z = abs(cos((A*(x+B)) + C*(y+D))) + abs(cos((A*(x+B)) - C*(y+D))) - (sin(E*x+F))<sup>2</sup> - (sin(E*y+G))<sup>2</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
A = inCoeffs[0]
B = inCoeffs[1]
C = inCoeffs[2]
D = inCoeffs[3]
E = inCoeffs[4]
F = inCoeffs[5]
G = inCoeffs[6]
try:
temp = abs(numpy.cos((A*(x_in+B)) + C*(y_in+D))) + abs(numpy.cos((A*(x_in+B)) - C*(y_in+D))) - numpy.power(numpy.sin(E*x_in+F), 2.0) - numpy.power(numpy.sin(E*y_in+G), 2.0)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = abs(cos((A*(x_in+B)) + C*(y_in+D))) + abs(cos((A*(x_in+B)) - C*(y_in+D))) - pow(sin(E*x_in+F), 2.0) - pow(sin(E*y_in+G), 2.0);\n"
return s
class SineA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Sine A [radians]"
_HTML = 'z = a * sin(x) + b * sin(y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.SinX(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.SinY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
SinX = inDataCacheDictionary['SinX'] # only need to perform this dictionary look-up once
SinY = inDataCacheDictionary['SinY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * SinX + b * SinY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * sin(x_in) + b * sin(y_in);\n"
return s
class SineA_Transform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Sine A [radians] Transform"
_HTML = 'z = a * sin(bx+c) + d * sin(fy+g)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
try:
temp = a * numpy.sin(b * x_in + c) + d * numpy.sin(f * y_in + g)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * sin(b * x_in + c) + d * sin(f * y_in + g);\n"
return s
class SineB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Sine B [radians]"
_HTML = 'z = a * sin(x) * sin(y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.SinXSinY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
SinXSinY = inDataCacheDictionary['SinXSinY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = a * SinXSinY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * sin(x_in) * sin(y_in);\n"
return s
class SineB_Transform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Sine B [radians] Transform"
_HTML = 'z = a * sin(bx+c) * sin(dy+f)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = a * numpy.sin(b * x_in + c) * numpy.sin(d * y_in + f)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * sin(b * x_in + c) * sin(d * y_in + f);\n"
return s
class SineXY(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Sine XY [radians]"
_HTML = 'z = a * sin(xy)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.SinXY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
SinXY = inDataCacheDictionary['SinXY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = a * SinXY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * sin(x_in * y_in);\n"
return s
class SineXYTransform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Sine XY [radians] Transform"
_HTML = 'z = a * sin(b * xy + c)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.XY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
XY = inDataCacheDictionary['XY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * numpy.sin(b * XY + c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * sin(b * x_in * y_in + c);\n"
return s
class TanA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Tan A [radians]"
_HTML = 'z = a * tan(x) + b * tan(y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.TanX(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.TanY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
TanX = inDataCacheDictionary['TanX'] # only need to perform this dictionary look-up once
TanY = inDataCacheDictionary['TanY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * TanX + b * TanY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * tan(x_in) + b * tan(y_in);\n"
return s
class TanATransform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Tan A [radians] Transform"
_HTML = 'z = a * tan(bx + c) + d * tan(fy + g)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
try:
temp = a * numpy.tan(b * x_in + c) + d * numpy.tan(f * y_in + g)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * tan(b * x_in + c) + d * tan(f * y_in + g);\n"
return s
class TanB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Tan B [radians]"
_HTML = 'z = a * tan(x) * tan(y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.TanXTanY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
TanXTanY = inDataCacheDictionary['TanXTanY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = a * TanXTanY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * tan(x_in) * tan(y_in);\n"
return s
class TanBTransform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Tan B [radians] Transform"
_HTML = 'z = a * tan(bx + c) * tan(dy + f)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = a * numpy.tan(b * x_in + c) * numpy.tan(d * y_in + f)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * tan(b * x_in + c) * tan(d * y_in + f);\n"
return s
class TanXY(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Tan XY [radians]"
_HTML = 'z = a * tan(xy)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.TanXY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
TanXY = inDataCacheDictionary['TanXY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = a * TanXY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * tan(x_in * y_in);\n"
return s
class TanXYTransform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Tan XY [radians] Transform"
_HTML = 'z = a * tan(b * xy + c)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = False
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.XY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
XY = inDataCacheDictionary['XY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * numpy.tan(b * XY + c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * tan(b * x_in * y_in + c);\n"
return s
| bsd-3-clause | -6,684,742,983,431,546,000 | 35.624409 | 184 | 0.692524 | false |
lukaszb/django-guardian | example_project/articles/models.py | 2 | 1851 | from django.db import models
from django.urls import reverse
from guardian.models import GroupObjectPermissionBase, UserObjectPermissionBase
class Article(models.Model):
title = models.CharField('title', max_length=64)
slug = models.SlugField(max_length=64)
content = models.TextField('content')
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
default_permissions = ('add', 'change', 'delete')
permissions = (
('view_article', 'Can view article'),
)
get_latest_by = 'created_at'
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('articles:details', kwargs={'slug': self.slug})
class ArticleUserObjectPermission(UserObjectPermissionBase):
content_object = models.ForeignKey(Article, on_delete=models.CASCADE)
class ArticleGroupObjectPermission(GroupObjectPermissionBase):
content_object = models.ForeignKey(Article, on_delete=models.CASCADE)
from guardian.models import UserObjectPermissionAbstract, GroupObjectPermissionAbstract
class BigUserObjectPermission(UserObjectPermissionAbstract):
id = models.BigAutoField(editable=False, unique=True, primary_key=True)
class Meta(UserObjectPermissionAbstract.Meta):
abstract = False
indexes = [
*UserObjectPermissionAbstract.Meta.indexes,
models.Index(fields=['content_type', 'object_pk', 'user']),
]
class BigGroupObjectPermission(GroupObjectPermissionAbstract):
id = models.BigAutoField(editable=False, unique=True, primary_key=True)
class Meta(GroupObjectPermissionAbstract.Meta):
abstract = False
indexes = [
*GroupObjectPermissionAbstract.Meta.indexes,
models.Index(fields=['content_type', 'object_pk', 'group']),
]
| bsd-2-clause | -4,053,829,553,794,031,000 | 32.053571 | 87 | 0.705024 | false |
joshfriend/sqlalchemy-utils | tests/types/test_weekdays.py | 1 | 1526 | from babel import Locale
import sqlalchemy as sa
from sqlalchemy_utils.types import WeekDaysType
from sqlalchemy_utils.primitives import WeekDays
from sqlalchemy_utils import i18n
from tests import TestCase
class WeekDaysTypeTestCase(TestCase):
def setup_method(self, method):
TestCase.setup_method(self, method)
i18n.get_locale = lambda: Locale('en')
def create_models(self):
class Schedule(self.Base):
__tablename__ = 'schedule'
id = sa.Column(sa.Integer, primary_key=True)
working_days = sa.Column(WeekDaysType)
def __repr__(self):
return 'Schedule(%r)' % self.id
self.Schedule = Schedule
def test_color_parameter_processing(self):
schedule = self.Schedule(
working_days='0001111'
)
self.session.add(schedule)
self.session.commit()
schedule = self.session.query(self.Schedule).first()
assert isinstance(schedule.working_days, WeekDays)
def test_scalar_attributes_get_coerced_to_objects(self):
schedule = self.Schedule(working_days=u'1010101')
assert isinstance(schedule.working_days, WeekDays)
class TestWeekDaysTypeOnSQLite(WeekDaysTypeTestCase):
dns = 'sqlite:///:memory:'
class TestWeekDaysTypeOnPostgres(WeekDaysTypeTestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
class TestWeekDaysTypeOnMySQL(WeekDaysTypeTestCase):
dns = 'mysql+pymysql://travis@localhost/sqlalchemy_utils_test'
| bsd-3-clause | -6,210,199,919,171,764,000 | 28.921569 | 66 | 0.686107 | false |
nitely/Spirit | spirit/category/migrations/0001_initial.py | 1 | 1333 | # -*- coding: utf-8 -*-
from django.db import models, migrations
import spirit.core.utils.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('title', models.CharField(verbose_name='title', max_length=75)),
('slug', spirit.core.utils.models.AutoSlugField(db_index=False, populate_from='title', blank=True)),
('description', models.CharField(verbose_name='description', max_length=255, blank=True)),
('is_closed', models.BooleanField(verbose_name='closed', default=False)),
('is_removed', models.BooleanField(verbose_name='removed', default=False)),
('is_private', models.BooleanField(verbose_name='private', default=False)),
('parent', models.ForeignKey(null=True, verbose_name='category parent', to='spirit_category.Category', blank=True, on_delete=models.CASCADE)),
],
options={
'ordering': ['title', 'pk'],
'verbose_name': 'category',
'verbose_name_plural': 'categories',
},
),
]
| mit | -5,217,428,606,251,673,000 | 43.433333 | 158 | 0.582146 | false |
ganeti-github-testing/ganeti-test-1 | lib/rpc_defs.py | 1 | 31147 | #
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""RPC definitions for communication between master and node daemons.
RPC definition fields:
- Name as string
- L{SINGLE} for single-node calls, L{MULTI} for multi-node
- Name resolver option(s), can be callable receiving all arguments in a tuple
- Timeout (e.g. L{constants.RPC_TMO_NORMAL}), or callback receiving all
arguments in a tuple to calculate timeout
- List of arguments as tuples
- Name as string
- Argument kind used for encoding/decoding
- Description for docstring (can be C{None})
- Custom body encoder (e.g. for preparing per-node bodies)
- Return value wrapper (e.g. for deserializing into L{objects}-based objects)
- Short call description for docstring
"""
from ganeti import constants
from ganeti import utils
from ganeti import objects
# Guidelines for choosing timeouts:
# - call used during watcher: timeout of 1min, constants.RPC_TMO_URGENT
# - trivial (but be sure it is trivial)
# (e.g. reading a file): 5min, constants.RPC_TMO_FAST
# - other calls: 15 min, constants.RPC_TMO_NORMAL
# - special calls (instance add, etc.):
# either constants.RPC_TMO_SLOW (1h) or huge timeouts
SINGLE = "single-node"
MULTI = "multi-node"
ACCEPT_OFFLINE_NODE = object()
# Constants for encoding/decoding
(ED_OBJECT_DICT,
ED_OBJECT_DICT_LIST,
ED_INST_DICT,
ED_INST_DICT_HVP_BEP_DP,
ED_NODE_TO_DISK_DICT_DP,
ED_INST_DICT_OSP_DP,
ED_IMPEXP_IO,
ED_FILE_DETAILS,
ED_FINALIZE_EXPORT_DISKS,
ED_COMPRESS,
ED_BLOCKDEV_RENAME,
ED_DISKS_DICT_DP,
ED_MULTI_DISKS_DICT_DP,
ED_SINGLE_DISK_DICT_DP,
ED_NIC_DICT,
ED_DEVICE_DICT) = range(1, 17)
def _Prepare(calls):
"""Converts list of calls to dictionary.
"""
return utils.SequenceToDict(calls)
def _MigrationStatusPostProc(result):
"""Post-processor for L{rpc.node.RpcRunner.call_instance_get_migration_status}
"""
if not result.fail_msg and result.payload is not None:
result.payload = objects.MigrationStatus.FromDict(result.payload)
return result
def _BlockdevFindPostProc(result):
"""Post-processor for L{rpc.node.RpcRunner.call_blockdev_find}.
"""
if not result.fail_msg and result.payload is not None:
result.payload = objects.BlockDevStatus.FromDict(result.payload)
return result
def _BlockdevGetMirrorStatusPostProc(result):
"""Post-processor for call_blockdev_getmirrorstatus.
"""
if not result.fail_msg:
result.payload = map(objects.BlockDevStatus.FromDict, result.payload)
return result
def _BlockdevGetMirrorStatusMultiPreProc(node, args):
"""Prepares the appropriate node values for blockdev_getmirrorstatus_multi.
"""
# there should be only one argument to this RPC, already holding a
# node->disks dictionary, we just need to extract the value for the
# current node
assert len(args) == 1
return [args[0][node]]
def _BlockdevGetMirrorStatusMultiPostProc(result):
"""Post-processor for call_blockdev_getmirrorstatus_multi.
"""
if not result.fail_msg:
for idx, (success, status) in enumerate(result.payload):
if success:
result.payload[idx] = (success, objects.BlockDevStatus.FromDict(status))
return result
def _NodeInfoPreProc(node, args):
"""Prepare the storage_units argument for node_info calls."""
assert len(args) == 2
# The storage_units argument is either a dictionary with one value for each
# node, or a fixed value to be used for all the nodes
if type(args[0]) is dict:
return [args[0][node], args[1]]
else:
return args
def _ImpExpStatusPostProc(result):
"""Post-processor for import/export status.
@rtype: Payload containing list of L{objects.ImportExportStatus} instances
@return: Returns a list of the state of each named import/export or None if
a status couldn't be retrieved
"""
if not result.fail_msg:
decoded = []
for i in result.payload:
if i is None:
decoded.append(None)
continue
decoded.append(objects.ImportExportStatus.FromDict(i))
result.payload = decoded
return result
def _TestDelayTimeout((duration, )):
"""Calculate timeout for "test_delay" RPC.
"""
return int(duration + 5)
_FILE_STORAGE_CALLS = [
("file_storage_dir_create", SINGLE, None, constants.RPC_TMO_FAST, [
("file_storage_dir", None, "File storage directory"),
], None, None, "Create the given file storage directory"),
("file_storage_dir_remove", SINGLE, None, constants.RPC_TMO_FAST, [
("file_storage_dir", None, "File storage directory"),
], None, None, "Remove the given file storage directory"),
("file_storage_dir_rename", SINGLE, None, constants.RPC_TMO_FAST, [
("old_file_storage_dir", None, "Old name"),
("new_file_storage_dir", None, "New name"),
], None, None, "Rename file storage directory"),
]
_STORAGE_CALLS = [
("storage_list", MULTI, None, constants.RPC_TMO_NORMAL, [
("su_name", None, None),
("su_args", None, None),
("name", None, None),
("fields", None, None),
], None, None, "Get list of storage units"),
("storage_modify", SINGLE, None, constants.RPC_TMO_NORMAL, [
("su_name", None, None),
("su_args", None, None),
("name", None, None),
("changes", None, None),
], None, None, "Modify a storage unit"),
("storage_execute", SINGLE, None, constants.RPC_TMO_NORMAL, [
("su_name", None, None),
("su_args", None, None),
("name", None, None),
("op", None, None),
], None, None, "Executes an operation on a storage unit"),
]
_INSTANCE_CALLS = [
("instance_info", SINGLE, None, constants.RPC_TMO_URGENT, [
("instance", None, "Instance name"),
("hname", None, "Hypervisor type"),
("hvparams", None, "Hypervisor parameters"),
], None, None, "Returns information about a single instance"),
("all_instances_info", MULTI, None, constants.RPC_TMO_URGENT, [
("hypervisor_list", None, "Hypervisors to query for instances"),
("all_hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
], None, None,
"Returns information about all instances on the given nodes"),
("instance_list", MULTI, None, constants.RPC_TMO_URGENT, [
("hypervisor_list", None, "Hypervisors to query for instances"),
("hvparams", None, "Hvparams of all hypervisors"),
], None, None, "Returns the list of running instances on the given nodes"),
("instance_reboot", SINGLE, None, constants.RPC_TMO_NORMAL, [
("inst", ED_INST_DICT, "Instance object"),
("reboot_type", None, None),
("shutdown_timeout", None, None),
("reason", None, "The reason for the reboot"),
], None, None, "Returns the list of running instances on the given nodes"),
("instance_shutdown", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("timeout", None, None),
("reason", None, "The reason for the shutdown"),
], None, None, "Stops an instance"),
("instance_balloon_memory", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("memory", None, None),
], None, None, "Modify the amount of an instance's runtime memory"),
("instance_run_rename", SINGLE, None, constants.RPC_TMO_SLOW, [
("instance", ED_INST_DICT, "Instance object"),
("old_name", None, None),
("debug", None, None),
], None, None, "Run the OS rename script for an instance"),
("instance_migratable", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
], None, None, "Checks whether the given instance can be migrated"),
("migration_info", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
], None, None,
"Gather the information necessary to prepare an instance migration"),
("accept_instance", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("info", None, "Result for the call_migration_info call"),
("target", None, "Target hostname (usually an IP address)"),
], None, None, "Prepare a node to accept an instance"),
("instance_finalize_migration_dst", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("info", None, "Result for the call_migration_info call"),
("success", None, "Whether the migration was a success or failure"),
], None, None, "Finalize any target-node migration specific operation"),
("instance_migrate", SINGLE, None, constants.RPC_TMO_SLOW, [
("cluster_name", None, "Cluster name"),
("instance", ED_INST_DICT, "Instance object"),
("target", None, "Target node name"),
("live", None, "Whether the migration should be done live or not"),
], None, None, "Migrate an instance"),
("instance_finalize_migration_src", SINGLE, None, constants.RPC_TMO_SLOW, [
("instance", ED_INST_DICT, "Instance object"),
("success", None, "Whether the migration succeeded or not"),
("live", None, "Whether the user requested a live migration or not"),
], None, None, "Finalize the instance migration on the source node"),
("instance_get_migration_status", SINGLE, None, constants.RPC_TMO_SLOW, [
("instance", ED_INST_DICT, "Instance object"),
], None, _MigrationStatusPostProc, "Report migration status"),
("instance_start", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance_hvp_bep", ED_INST_DICT_HVP_BEP_DP, None),
("startup_paused", None, None),
("reason", None, "The reason for the startup"),
], None, None, "Starts an instance"),
("instance_os_add", SINGLE, None, constants.RPC_TMO_1DAY, [
("instance_osp", ED_INST_DICT_OSP_DP, "Tuple: (target instance,"
" temporary OS parameters"
" overriding configuration)"),
("reinstall", None, "Whether the instance is being reinstalled"),
("debug", None, "Debug level for the OS install script to use"),
], None, None, "Installs an operative system onto an instance"),
("hotplug_device", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("action", None, "Hotplug Action"),
("dev_type", None, "Device type"),
("device", ED_DEVICE_DICT, "Device dict"),
("extra", None, "Extra info for device (dev_path for disk)"),
("seq", None, "Device seq"),
], None, None, "Hoplug a device to a running instance"),
("hotplug_supported", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
], None, None, "Check if hotplug is supported"),
("instance_metadata_modify", SINGLE, None, constants.RPC_TMO_URGENT, [
("instance", None, "Instance object"),
], None, None, "Modify instance metadata"),
]
_IMPEXP_CALLS = [
("import_start", SINGLE, None, constants.RPC_TMO_NORMAL, [
("opts", ED_OBJECT_DICT, None),
("instance", ED_INST_DICT, None),
("component", None, None),
("dest", ED_IMPEXP_IO, "Import destination"),
], None, None, "Starts an import daemon"),
("export_start", SINGLE, None, constants.RPC_TMO_NORMAL, [
("opts", ED_OBJECT_DICT, None),
("host", None, None),
("port", None, None),
("instance", ED_INST_DICT, None),
("component", None, None),
("source", ED_IMPEXP_IO, "Export source"),
], None, None, "Starts an export daemon"),
("impexp_status", SINGLE, None, constants.RPC_TMO_FAST, [
("names", None, "Import/export names"),
], None, _ImpExpStatusPostProc, "Gets the status of an import or export"),
("impexp_abort", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Import/export name"),
], None, None, "Aborts an import or export"),
("impexp_cleanup", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Import/export name"),
], None, None, "Cleans up after an import or export"),
("export_info", SINGLE, None, constants.RPC_TMO_FAST, [
("path", None, None),
], None, None, "Queries the export information in a given path"),
("finalize_export", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, None),
("snap_disks", ED_FINALIZE_EXPORT_DISKS, None),
], None, None, "Request the completion of an export operation"),
("export_list", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Gets the stored exports list"),
("export_remove", SINGLE, None, constants.RPC_TMO_FAST, [
("export", None, None),
], None, None, "Requests removal of a given export"),
]
_X509_CALLS = [
("x509_cert_create", SINGLE, None, constants.RPC_TMO_NORMAL, [
("validity", None, "Validity in seconds"),
], None, None, "Creates a new X509 certificate for SSL/TLS"),
("x509_cert_remove", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Certificate name"),
], None, None, "Removes a X509 certificate"),
]
_BLOCKDEV_CALLS = [
("bdev_sizes", MULTI, None, constants.RPC_TMO_URGENT, [
("devices", None, None),
], None, None,
"Gets the sizes of requested block devices present on a node"),
("blockdev_create", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("size", None, None),
("owner", None, None),
("on_primary", None, None),
("info", None, None),
("exclusive_storage", None, None),
], None, None, "Request creation of a given block device"),
("blockdev_convert", SINGLE, None, constants.RPC_TMO_SLOW, [
("bdev_src", ED_SINGLE_DISK_DICT_DP, None),
("bdev_dest", ED_SINGLE_DISK_DICT_DP, None),
], None, None,
"Request the copy of the source block device to the destination one"),
("blockdev_image", SINGLE, None, constants.RPC_TMO_SLOW, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("image", None, None),
("size", None, None),
], None, None,
"Request to dump an image with given size onto a block device"),
("blockdev_wipe", SINGLE, None, constants.RPC_TMO_SLOW, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("offset", None, None),
("size", None, None),
], None, None,
"Request wipe at given offset with given size of a block device"),
("blockdev_remove", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
], None, None, "Request removal of a given block device"),
("blockdev_pause_resume_sync", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
("pause", None, None),
], None, None, "Request a pause/resume of given block device"),
("blockdev_assemble", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
("instance", ED_INST_DICT, None),
("on_primary", None, None),
("idx", None, None),
], None, None, "Request assembling of a given block device"),
("blockdev_shutdown", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
], None, None, "Request shutdown of a given block device"),
("blockdev_addchildren", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("ndevs", ED_DISKS_DICT_DP, None),
], None, None,
"Request adding a list of children to a (mirroring) device"),
("blockdev_removechildren", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("ndevs", ED_DISKS_DICT_DP, None),
], None, None,
"Request removing a list of children from a (mirroring) device"),
("blockdev_close", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance_name", None, None),
("disks", ED_DISKS_DICT_DP, None),
], None, None, "Closes the given block devices"),
("blockdev_getdimensions", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_MULTI_DISKS_DICT_DP, None),
], None, None, "Returns size and spindles of the given disks"),
("drbd_disconnect_net", MULTI, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
], None, None,
"Disconnects the network of the given drbd devices"),
("drbd_attach_net", MULTI, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
("instance_name", None, None),
("multimaster", None, None),
], None, None, "Connects the given DRBD devices"),
("drbd_wait_sync", MULTI, None, constants.RPC_TMO_SLOW, [
("disks", ED_DISKS_DICT_DP, None),
], None, None,
"Waits for the synchronization of drbd devices is complete"),
("drbd_needs_activation", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_MULTI_DISKS_DICT_DP, None),
], None, None,
"Returns the drbd disks which need activation"),
("blockdev_grow", SINGLE, None, constants.RPC_TMO_NORMAL, [
("cf_bdev", ED_SINGLE_DISK_DICT_DP, None),
("amount", None, None),
("dryrun", None, None),
("backingstore", None, None),
("es_flag", None, None),
], None, None, "Request growing of the given block device by a"
" given amount"),
("blockdev_snapshot", SINGLE, None, constants.RPC_TMO_NORMAL, [
("cf_bdev", ED_SINGLE_DISK_DICT_DP, None),
("snap_name", None, None),
("snap_size", None, None),
], None, None, "Export a given disk to another node"),
("blockdev_rename", SINGLE, None, constants.RPC_TMO_NORMAL, [
("devlist", ED_BLOCKDEV_RENAME, None),
], None, None, "Request rename of the given block devices"),
("blockdev_find", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
], None, _BlockdevFindPostProc,
"Request identification of a given block device"),
("blockdev_getmirrorstatus", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
], None, _BlockdevGetMirrorStatusPostProc,
"Request status of a (mirroring) device"),
("blockdev_getmirrorstatus_multi", MULTI, None, constants.RPC_TMO_NORMAL, [
("node_disks", ED_NODE_TO_DISK_DICT_DP, None),
], _BlockdevGetMirrorStatusMultiPreProc,
_BlockdevGetMirrorStatusMultiPostProc,
"Request status of (mirroring) devices from multiple nodes"),
("blockdev_setinfo", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
("info", None, None),
], None, None, "Sets metadata information on a given block device"),
]
_OS_CALLS = [
("os_diagnose", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Request a diagnose of OS definitions"),
("os_validate", MULTI, None, constants.RPC_TMO_FAST, [
("required", None, None),
("name", None, None),
("checks", None, None),
("params", None, None),
("force_variant", None, None),
], None, None, "Run a validation routine for a given OS"),
("os_export", SINGLE, None, constants.RPC_TMO_FAST, [
("instance", ED_INST_DICT, None),
("override_env", None, None),
], None, None, "Export an OS for a given instance"),
]
_EXTSTORAGE_CALLS = [
("extstorage_diagnose", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Request a diagnose of ExtStorage Providers"),
]
_NODE_CALLS = [
("node_has_ip_address", SINGLE, None, constants.RPC_TMO_FAST, [
("address", None, "IP address"),
], None, None, "Checks if a node has the given IP address"),
("node_info", MULTI, None, constants.RPC_TMO_URGENT, [
("storage_units", None,
"List of tuples '<storage_type>,<key>,[<param>]' to ask for disk space"
" information; the parameter list varies depending on the storage_type"),
("hv_specs", None,
"List of hypervisor specification (name, hvparams) to ask for node "
"information"),
], _NodeInfoPreProc, None, "Return node information"),
("node_verify", MULTI, None, constants.RPC_TMO_NORMAL, [
("checkdict", None, "What to verify"),
("cluster_name", None, "Cluster name"),
("all_hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
("node_groups", None, "node names mapped to their group uuids"),
("groups_cfg", None,
"a dictionary mapping group uuids to their configuration"),
], None, None, "Request verification of given parameters"),
("node_volumes", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Gets all volumes on node(s)"),
("node_demote_from_mc", SINGLE, None, constants.RPC_TMO_FAST, [], None, None,
"Demote a node from the master candidate role"),
("node_powercycle", SINGLE, ACCEPT_OFFLINE_NODE, constants.RPC_TMO_NORMAL, [
("hypervisor", None, "Hypervisor type"),
("hvparams", None, "Hypervisor parameters"),
], None, None, "Tries to powercycle a node"),
("node_configure_ovs", SINGLE, None, constants.RPC_TMO_NORMAL, [
("ovs_name", None, "Name of the OpenvSwitch to create"),
("ovs_link", None, "Link of the OpenvSwitch to the outside"),
], None, None, "This will create and setup the OpenvSwitch"),
("node_crypto_tokens", SINGLE, None, constants.RPC_TMO_NORMAL, [
("token_request", None,
"List of tuples of requested crypto token types, actions"),
], None, None, "Handle crypto tokens of the node."),
("node_ensure_daemon", MULTI, None, constants.RPC_TMO_URGENT, [
("daemon", None, "Daemon name"),
("run", None, "Whether the daemon should be running or stopped"),
], None, None, "Ensure daemon is running on the node."),
("node_ssh_key_add", MULTI, None, constants.RPC_TMO_URGENT, [
("node_uuid", None, "UUID of the node whose key is distributed"),
("node_name", None, "Name of the node whose key is distributed"),
("potential_master_candidates", None, "Potential master candidates"),
("ssh_port_map", None, "Map of nodes' SSH ports to be used for transfers"),
("to_authorized_keys", None, "Whether the node's key should be added"
" to all nodes' 'authorized_keys' file"),
("to_public_keys", None, "Whether the node's key should be added"
" to all nodes' public key file"),
("get_public_keys", None, "Whether the node should get the other nodes'"
" public keys")],
None, None, "Distribute a new node's public SSH key on the cluster."),
("node_ssh_key_remove", MULTI, None, constants.RPC_TMO_URGENT, [
("node_uuid", None, "UUID of the node whose key is removed"),
("node_name", None, "Name of the node whose key is removed"),
("master_candidate_uuids", None, "List of UUIDs of master candidates."),
("potential_master_candidates", None, "Potential master candidates"),
("ssh_port_map", None, "Map of nodes' SSH ports to be used for transfers"),
("from_authorized_keys", None,
"If the key should be removed from the 'authorized_keys' file."),
("from_public_keys", None,
"If the key should be removed from the public key file."),
("clear_authorized_keys", None,
"If the 'authorized_keys' file of the node should be cleared."),
("clear_public_keys", None,
"If the 'ganeti_pub_keys' file of the node should be cleared.")],
None, None, "Remove a node's SSH key from the other nodes' key files."),
("node_ssh_keys_renew", MULTI, None, constants.RPC_TMO_URGENT, [
("node_uuids", None, "UUIDs of the nodes whose key is renewed"),
("node_names", None, "Names of the nodes whose key is renewed"),
("ssh_port_map", None, "Map of nodes' SSH ports to be used for transfers"),
("master_candidate_uuids", None, "List of UUIDs of master candidates."),
("potential_master_candidates", None, "Potential master candidates")],
None, None, "Renew all SSH key pairs of all nodes nodes."),
]
_MISC_CALLS = [
("lv_list", MULTI, None, constants.RPC_TMO_URGENT, [
("vg_name", None, None),
], None, None, "Gets the logical volumes present in a given volume group"),
("vg_list", MULTI, None, constants.RPC_TMO_URGENT, [], None, None,
"Gets the volume group list"),
("bridges_exist", SINGLE, None, constants.RPC_TMO_URGENT, [
("bridges_list", None, "Bridges which must be present on remote node"),
], None, None, "Checks if a node has all the bridges given"),
("etc_hosts_modify", SINGLE, None, constants.RPC_TMO_NORMAL, [
("mode", None,
"Mode to operate; currently L{constants.ETC_HOSTS_ADD} or"
" L{constants.ETC_HOSTS_REMOVE}"),
("name", None, "Hostname to be modified"),
("ip", None, "IP address (L{constants.ETC_HOSTS_ADD} only)"),
], None, None, "Modify hosts file with name"),
("drbd_helper", MULTI, None, constants.RPC_TMO_URGENT, [],
None, None, "Gets DRBD helper"),
("restricted_command", MULTI, None, constants.RPC_TMO_SLOW, [
("cmd", None, "Command name"),
], None, None, "Runs restricted command"),
("run_oob", SINGLE, None, constants.RPC_TMO_NORMAL, [
("oob_program", None, None),
("command", None, None),
("remote_node", None, None),
("timeout", None, None),
], None, None, "Runs out-of-band command"),
("hooks_runner", MULTI, None, constants.RPC_TMO_NORMAL, [
("hpath", None, None),
("phase", None, None),
("env", None, None),
], None, None, "Call the hooks runner"),
("iallocator_runner", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Iallocator name"),
("idata", None, "JSON-encoded input string"),
("default_iallocator_params", None, "Additional iallocator parameters"),
], None, None, "Call an iallocator on a remote node"),
("test_delay", MULTI, None, _TestDelayTimeout, [
("duration", None, None),
], None, None, "Sleep for a fixed time on given node(s)"),
("hypervisor_validate_params", MULTI, None, constants.RPC_TMO_NORMAL, [
("hvname", None, "Hypervisor name"),
("hvfull", None, "Parameters to be validated"),
], None, None, "Validate hypervisor params"),
("get_watcher_pause", SINGLE, None, constants.RPC_TMO_URGENT, [],
None, None, "Get watcher pause end"),
("set_watcher_pause", MULTI, None, constants.RPC_TMO_URGENT, [
("until", None, None),
], None, None, "Set watcher pause end"),
("get_file_info", SINGLE, None, constants.RPC_TMO_FAST, [
("file_path", None, None),
], None, None, "Checks if a file exists and reports on it"),
]
CALLS = {
"RpcClientDefault":
_Prepare(_IMPEXP_CALLS + _X509_CALLS + _OS_CALLS + _NODE_CALLS +
_FILE_STORAGE_CALLS + _MISC_CALLS + _INSTANCE_CALLS +
_BLOCKDEV_CALLS + _STORAGE_CALLS + _EXTSTORAGE_CALLS),
"RpcClientJobQueue": _Prepare([
("jobqueue_update", MULTI, None, constants.RPC_TMO_URGENT, [
("file_name", None, None),
("content", ED_COMPRESS, None),
], None, None, "Update job queue file"),
("jobqueue_purge", SINGLE, None, constants.RPC_TMO_NORMAL, [], None, None,
"Purge job queue"),
("jobqueue_rename", MULTI, None, constants.RPC_TMO_URGENT, [
("rename", None, None),
], None, None, "Rename job queue file"),
("jobqueue_set_drain_flag", MULTI, None, constants.RPC_TMO_URGENT, [
("flag", None, None),
], None, None, "Set job queue drain flag"),
]),
"RpcClientBootstrap": _Prepare([
("node_start_master_daemons", SINGLE, None, constants.RPC_TMO_FAST, [
("no_voting", None, None),
], None, None, "Starts master daemons on a node"),
("node_activate_master_ip", SINGLE, None, constants.RPC_TMO_FAST, [
("master_params", ED_OBJECT_DICT, "Network parameters of the master"),
("use_external_mip_script", None,
"Whether to use the user-provided master IP address setup script"),
], None, None,
"Activates master IP on a node"),
("node_stop_master", SINGLE, None, constants.RPC_TMO_FAST, [], None, None,
"Deactivates master IP and stops master daemons on a node"),
("node_deactivate_master_ip", SINGLE, None, constants.RPC_TMO_FAST, [
("master_params", ED_OBJECT_DICT, "Network parameters of the master"),
("use_external_mip_script", None,
"Whether to use the user-provided master IP address setup script"),
], None, None,
"Deactivates master IP on a node"),
("node_change_master_netmask", SINGLE, None, constants.RPC_TMO_FAST, [
("old_netmask", None, "The old value of the netmask"),
("netmask", None, "The new value of the netmask"),
("master_ip", None, "The master IP"),
("master_netdev", None, "The master network device"),
], None, None, "Change master IP netmask"),
("node_leave_cluster", SINGLE, None, constants.RPC_TMO_NORMAL, [
("modify_ssh_setup", None, None),
], None, None,
"Requests a node to clean the cluster information it has"),
("master_node_name", MULTI, None, constants.RPC_TMO_URGENT, [], None, None,
"Returns the master node name"),
]),
"RpcClientDnsOnly": _Prepare([
("version", MULTI, ACCEPT_OFFLINE_NODE, constants.RPC_TMO_URGENT, [], None,
None, "Query node version"),
("node_verify_light", MULTI, None, constants.RPC_TMO_NORMAL, [
("checkdict", None, "What to verify"),
("cluster_name", None, "Cluster name"),
("hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
("node_groups", None, "node names mapped to their group uuids"),
("groups_cfg", None,
"a dictionary mapping group uuids to their configuration"),
], None, None, "Request verification of given parameters"),
]),
"RpcClientConfig": _Prepare([
("upload_file", MULTI, None, constants.RPC_TMO_NORMAL, [
("file_name", ED_FILE_DETAILS, None),
], None, None, "Upload files"),
("upload_file_single", MULTI, None, constants.RPC_TMO_NORMAL, [
("file_name", None, "The name of the file"),
("content", ED_COMPRESS, "The data to be uploaded"),
("mode", None, "The mode of the file or None"),
("uid", None, "The owner of the file"),
("gid", None, "The group of the file"),
("atime", None, "The file's last access time"),
("mtime", None, "The file's last modification time"),
], None, None, "Upload files"),
("write_ssconf_files", MULTI, None, constants.RPC_TMO_NORMAL, [
("values", None, None),
], None, None, "Write ssconf files"),
]),
}
| bsd-2-clause | -4,034,353,141,925,082,600 | 43.180142 | 80 | 0.654477 | false |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/core/dtypes/base.py | 1 | 13190 | """
Extend pandas with custom array types.
"""
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union
import numpy as np
from pandas._typing import DtypeObj
from pandas.errors import AbstractMethodError
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
if TYPE_CHECKING:
from pandas.core.arrays import ExtensionArray
class ExtensionDtype:
"""
A custom data type, to be paired with an ExtensionArray.
See Also
--------
extensions.register_extension_dtype: Register an ExtensionType
with pandas as class decorator.
extensions.ExtensionArray: Abstract base class for custom 1-D array types.
Notes
-----
The interface includes the following abstract methods that must
be implemented by subclasses:
* type
* name
The following attributes and methods influence the behavior of the dtype in
pandas operations
* _is_numeric
* _is_boolean
* _get_common_dtype
Optionally one can override construct_array_type for construction
with the name of this dtype via the Registry. See
:meth:`extensions.register_extension_dtype`.
* construct_array_type
The `na_value` class attribute can be used to set the default NA value
for this type. :attr:`numpy.nan` is used by default.
ExtensionDtypes are required to be hashable. The base class provides
a default implementation, which relies on the ``_metadata`` class
attribute. ``_metadata`` should be a tuple containing the strings
that define your data type. For example, with ``PeriodDtype`` that's
the ``freq`` attribute.
**If you have a parametrized dtype you should set the ``_metadata``
class property**.
Ideally, the attributes in ``_metadata`` will match the
parameters to your ``ExtensionDtype.__init__`` (if any). If any of
the attributes in ``_metadata`` don't implement the standard
``__eq__`` or ``__hash__``, the default implementations here will not
work.
.. versionchanged:: 0.24.0
Added ``_metadata``, ``__hash__``, and changed the default definition
of ``__eq__``.
For interaction with Apache Arrow (pyarrow), a ``__from_arrow__`` method
can be implemented: this method receives a pyarrow Array or ChunkedArray
as only argument and is expected to return the appropriate pandas
ExtensionArray for this dtype and the passed values::
class ExtensionDtype:
def __from_arrow__(
self, array: Union[pyarrow.Array, pyarrow.ChunkedArray]
) -> ExtensionArray:
...
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
provided for registering virtual subclasses.
"""
_metadata: Tuple[str, ...] = ()
def __str__(self) -> str:
return self.name
def __eq__(self, other: Any) -> bool:
"""
Check whether 'other' is equal to self.
By default, 'other' is considered equal if either
* it's a string matching 'self.name'.
* it's an instance of this type and all of the attributes
in ``self._metadata`` are equal between `self` and `other`.
Parameters
----------
other : Any
Returns
-------
bool
"""
if isinstance(other, str):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
return all(
getattr(self, attr) == getattr(other, attr) for attr in self._metadata
)
return False
def __hash__(self) -> int:
return hash(tuple(getattr(self, attr) for attr in self._metadata))
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
@property
def na_value(self) -> object:
"""
Default NA value to use for this type.
This is used in e.g. ExtensionArray.take. This should be the
user-facing "boxed" version of the NA value, not the physical NA value
for storage. e.g. for JSONArray, this is an empty dictionary.
"""
return np.nan
@property
def type(self) -> Type:
"""
The scalar type for the array, e.g. ``int``
It's expected ``ExtensionArray[item]`` returns an instance
of ``ExtensionDtype.type`` for scalar ``item``, assuming
that value is valid (not NA). NA values do not need to be
instances of `type`.
"""
raise AbstractMethodError(self)
@property
def kind(self) -> str:
"""
A character code (one of 'biufcmMOSUV'), default 'O'
This should match the NumPy dtype used when the array is
converted to an ndarray, which is probably 'O' for object if
the extension type cannot be represented as a built-in NumPy
type.
See Also
--------
numpy.dtype.kind
"""
return "O"
@property
def name(self) -> str:
"""
A string identifying the data type.
Will be used for display in, e.g. ``Series.dtype``
"""
raise AbstractMethodError(self)
@property
def names(self) -> Optional[List[str]]:
"""
Ordered list of field names, or None if there are no fields.
This is for compatibility with NumPy arrays, and may be removed in the
future.
"""
return None
@classmethod
def construct_array_type(cls) -> Type["ExtensionArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
raise NotImplementedError
@classmethod
def construct_from_string(cls, string: str):
r"""
Construct this type from a string.
This is useful mainly for data types that accept parameters.
For example, a period dtype accepts a frequency parameter that
can be set as ``period[H]`` (where H means hourly frequency).
By default, in the abstract class, just the name of the type is
expected. But subclasses can overwrite this method to accept
parameters.
Parameters
----------
string : str
The name of the type, for example ``category``.
Returns
-------
ExtensionDtype
Instance of the dtype.
Raises
------
TypeError
If a class cannot be constructed from this 'string'.
Examples
--------
For extension dtypes with arguments the following may be an
adequate implementation.
>>> @classmethod
... def construct_from_string(cls, string):
... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
... match = pattern.match(string)
... if match:
... return cls(**match.groupdict())
... else:
... raise TypeError(
... f"Cannot construct a '{cls.__name__}' from '{string}'"
... )
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
# error: Non-overlapping equality check (left operand type: "str", right
# operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap]
assert isinstance(cls.name, str), (cls, type(cls.name))
if string != cls.name:
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
return cls()
@classmethod
def is_dtype(cls, dtype: object) -> bool:
"""
Check if we match 'dtype'.
Parameters
----------
dtype : object
The object to check.
Returns
-------
bool
Notes
-----
The default implementation is True if
1. ``cls.construct_from_string(dtype)`` is an instance
of ``cls``.
2. ``dtype`` is an object and is an instance of ``cls``
3. ``dtype`` has a ``dtype`` attribute, and any of the above
conditions is true for ``dtype.dtype``.
"""
dtype = getattr(dtype, "dtype", dtype)
if isinstance(dtype, (ABCSeries, ABCIndexClass, ABCDataFrame, np.dtype)):
# https://github.com/pandas-dev/pandas/issues/22960
# avoid passing data to `construct_from_string`. This could
# cause a FutureWarning from numpy about failing elementwise
# comparison from, e.g., comparing DataFrame == 'category'.
return False
elif dtype is None:
return False
elif isinstance(dtype, cls):
return True
if isinstance(dtype, str):
try:
return cls.construct_from_string(dtype) is not None
except TypeError:
return False
return False
@property
def _is_numeric(self) -> bool:
"""
Whether columns with this dtype should be considered numeric.
By default ExtensionDtypes are assumed to be non-numeric.
They'll be excluded from operations that exclude non-numeric
columns, like (groupby) reductions, plotting, etc.
"""
return False
@property
def _is_boolean(self) -> bool:
"""
Whether this dtype should be considered boolean.
By default, ExtensionDtypes are assumed to be non-numeric.
Setting this to True will affect the behavior of several places,
e.g.
* is_bool
* boolean indexing
Returns
-------
bool
"""
return False
def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
"""
Return the common dtype, if one exists.
Used in `find_common_type` implementation. This is for example used
to determine the resulting dtype in a concat operation.
If no common dtype exists, return None (which gives the other dtypes
the chance to determine a common dtype). If all dtypes in the list
return None, then the common dtype will be "object" dtype (this means
it is never needed to return "object" dtype from this method itself).
Parameters
----------
dtypes : list of dtypes
The dtypes for which to determine a common dtype. This is a list
of np.dtype or ExtensionDtype instances.
Returns
-------
Common dtype (np.dtype or ExtensionDtype) or None
"""
if len(set(dtypes)) == 1:
# only itself
return self
else:
return None
def register_extension_dtype(cls: Type[ExtensionDtype]) -> Type[ExtensionDtype]:
"""
Register an ExtensionType with pandas as class decorator.
.. versionadded:: 0.24.0
This enables operations like ``.astype(name)`` for the name
of the ExtensionDtype.
Returns
-------
callable
A class decorator.
Examples
--------
>>> from pandas.api.extensions import register_extension_dtype
>>> from pandas.api.extensions import ExtensionDtype
>>> @register_extension_dtype
... class MyExtensionDtype(ExtensionDtype):
... name = "myextension"
"""
registry.register(cls)
return cls
class Registry:
"""
Registry for dtype inference.
The registry allows one to map a string repr of a extension
dtype to an extension dtype. The string alias can be used in several
places, including
* Series and Index constructors
* :meth:`pandas.array`
* :meth:`pandas.Series.astype`
Multiple extension types can be registered.
These are tried in order.
"""
def __init__(self):
self.dtypes: List[Type[ExtensionDtype]] = []
def register(self, dtype: Type[ExtensionDtype]) -> None:
"""
Parameters
----------
dtype : ExtensionDtype class
"""
if not issubclass(dtype, ExtensionDtype):
raise ValueError("can only register pandas extension dtypes")
self.dtypes.append(dtype)
def find(
self, dtype: Union[Type[ExtensionDtype], str]
) -> Optional[Type[ExtensionDtype]]:
"""
Parameters
----------
dtype : Type[ExtensionDtype] or str
Returns
-------
return the first matching dtype, otherwise return None
"""
if not isinstance(dtype, str):
dtype_type = dtype
if not isinstance(dtype, type):
dtype_type = type(dtype)
if issubclass(dtype_type, ExtensionDtype):
return dtype
return None
for dtype_type in self.dtypes:
try:
return dtype_type.construct_from_string(dtype)
except TypeError:
pass
return None
registry = Registry()
| gpl-2.0 | -4,566,161,597,726,716,000 | 28.909297 | 86 | 0.59022 | false |
miguelinux/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/Object/Parser/InfLibraryClassesObject.py | 1 | 10522 | ## @file
# This file is used to define class objects of INF file [LibraryClasses] section.
# It will consumed by InfParser.
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
'''
InfLibraryClassesObject
'''
from Logger import StringTable as ST
from Logger import ToolError
import Logger.Log as Logger
from Library import GlobalData
from Library.Misc import Sdict
from Object.Parser.InfCommonObject import CurrentLine
from Library.ExpressionValidate import IsValidFeatureFlagExp
from Library.ParserValidate import IsValidLibName
## GetArchModuleType
#
# Get Arch List and ModuleType List
#
def GetArchModuleType(KeyList):
__SupArchList = []
__SupModuleList = []
for (ArchItem, ModuleItem) in KeyList:
#
# Validate Arch
#
if (ArchItem == '' or ArchItem == None):
ArchItem = 'COMMON'
if (ModuleItem == '' or ModuleItem == None):
ModuleItem = 'COMMON'
if ArchItem not in __SupArchList:
__SupArchList.append(ArchItem)
List = ModuleItem.split('|')
for Entry in List:
if Entry not in __SupModuleList:
__SupModuleList.append(Entry)
return (__SupArchList, __SupModuleList)
class InfLibraryClassItem():
def __init__(self, LibName='', FeatureFlagExp='', HelpString=None):
self.LibName = LibName
self.FeatureFlagExp = FeatureFlagExp
self.HelpString = HelpString
self.CurrentLine = CurrentLine()
self.SupArchList = []
self.SupModuleList = []
self.FileGuid = ''
self.Version = ''
def SetLibName(self, LibName):
self.LibName = LibName
def GetLibName(self):
return self.LibName
def SetHelpString(self, HelpString):
self.HelpString = HelpString
def GetHelpString(self):
return self.HelpString
def SetFeatureFlagExp(self, FeatureFlagExp):
self.FeatureFlagExp = FeatureFlagExp
def GetFeatureFlagExp(self):
return self.FeatureFlagExp
def SetSupArchList(self, SupArchList):
self.SupArchList = SupArchList
def GetSupArchList(self):
return self.SupArchList
def SetSupModuleList(self, SupModuleList):
self.SupModuleList = SupModuleList
def GetSupModuleList(self):
return self.SupModuleList
#
# As Build related information
#
def SetFileGuid(self, FileGuid):
self.FileGuid = FileGuid
def GetFileGuid(self):
return self.FileGuid
def SetVersion(self, Version):
self.Version = Version
def GetVersion(self):
return self.Version
## INF LibraryClass Section
#
#
#
class InfLibraryClassObject():
def __init__(self):
self.LibraryClasses = Sdict()
#
# Macro defined in this section should be only used in this section.
#
self.Macros = {}
##SetLibraryClasses
#
#
# @param HelpString: It can be a common comment or contain a recommend
# instance.
#
def SetLibraryClasses(self, LibContent, KeyList=None):
#
# Validate Arch
#
(__SupArchList, __SupModuleList) = GetArchModuleType(KeyList)
for LibItem in LibContent:
LibItemObj = InfLibraryClassItem()
if not GlobalData.gIS_BINARY_INF:
HelpStringObj = LibItem[1]
LibItemObj.CurrentLine.SetFileName(LibItem[2][2])
LibItemObj.CurrentLine.SetLineNo(LibItem[2][1])
LibItemObj.CurrentLine.SetLineString(LibItem[2][0])
LibItem = LibItem[0]
if HelpStringObj != None:
LibItemObj.SetHelpString(HelpStringObj)
if len(LibItem) >= 1:
if LibItem[0].strip() != '':
if IsValidLibName(LibItem[0].strip()):
if LibItem[0].strip() != 'NULL':
LibItemObj.SetLibName(LibItem[0])
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_LIB_NAME_INVALID,
File=GlobalData.gINF_MODULE_NAME,
Line=LibItemObj.CurrentLine.GetLineNo(),
ExtraData=LibItemObj.CurrentLine.GetLineString())
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_FROMAT_INVALID % (LibItem[0]),
File=GlobalData.gINF_MODULE_NAME,
Line=LibItemObj.CurrentLine.GetLineNo(),
ExtraData=LibItemObj.CurrentLine.GetLineString())
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_LIBRARY_SECTION_LIBNAME_MISSING,
File=GlobalData.gINF_MODULE_NAME,
Line=LibItemObj.CurrentLine.GetLineNo(),
ExtraData=LibItemObj.CurrentLine.GetLineString())
if len(LibItem) == 2:
if LibItem[1].strip() == '':
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_MISSING,
File=GlobalData.gINF_MODULE_NAME,
Line=LibItemObj.CurrentLine.GetLineNo(),
ExtraData=LibItemObj.CurrentLine.GetLineString())
#
# Validate FFE
#
FeatureFlagRtv = IsValidFeatureFlagExp(LibItem[1].strip())
if not FeatureFlagRtv[0]:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_SYNTAX_INVLID % (FeatureFlagRtv[1]),
File=GlobalData.gINF_MODULE_NAME,
Line=LibItemObj.CurrentLine.GetLineNo(),
ExtraData=LibItemObj.CurrentLine.GetLineString())
LibItemObj.SetFeatureFlagExp(LibItem[1].strip())
#
# Invalid strings
#
if len(LibItem) < 1 or len(LibItem) > 2:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_LIBRARY_SECTION_CONTENT_ERROR,
File=GlobalData.gINF_MODULE_NAME,
Line=LibItemObj.CurrentLine.GetLineNo(),
ExtraData=LibItemObj.CurrentLine.GetLineString())
LibItemObj.SetSupArchList(__SupArchList)
LibItemObj.SetSupModuleList(__SupModuleList)
#
# Determine Library class duplicate. Follow below rule:
#
# A library class keyword must not be duplicated within a
# [LibraryClasses] section. Library class keywords may appear in
# multiple architectural and module type [LibraryClasses] sections.
# A library class keyword listed in an architectural or module type
# [LibraryClasses] section must not be listed in the common
# architectural or module type [LibraryClasses] section.
#
# NOTE: This check will not report error now. But keep code for future enhancement.
#
# for Item in self.LibraryClasses:
# if Item.GetLibName() == LibItemObj.GetLibName():
# ItemSupArchList = Item.GetSupArchList()
# ItemSupModuleList = Item.GetSupModuleList()
# for ItemArch in ItemSupArchList:
# for ItemModule in ItemSupModuleList:
# for LibItemObjArch in __SupArchList:
# for LibItemObjModule in __SupModuleList:
# if ItemArch == LibItemObjArch and LibItemObjModule == ItemModule:
# #
# # ERR_INF_PARSER_ITEM_DUPLICATE
# #
# pass
# if (ItemArch.upper() == 'COMMON' or LibItemObjArch.upper() == 'COMMON') \
# and LibItemObjModule == ItemModule:
# #
# # ERR_INF_PARSER_ITEM_DUPLICATE_COMMON
# #
# pass
else:
#
# Assume the file GUID is well formatted.
#
LibItemObj.SetFileGuid(LibItem[0])
LibItemObj.SetVersion(LibItem[1])
LibItemObj.SetSupArchList(__SupArchList)
if self.LibraryClasses.has_key((LibItemObj)):
LibraryList = self.LibraryClasses[LibItemObj]
LibraryList.append(LibItemObj)
self.LibraryClasses[LibItemObj] = LibraryList
else:
LibraryList = []
LibraryList.append(LibItemObj)
self.LibraryClasses[LibItemObj] = LibraryList
return True
def GetLibraryClasses(self):
return self.LibraryClasses
| gpl-2.0 | -620,824,427,983,713,900 | 40.588933 | 114 | 0.519958 | false |
atztogo/phonopy | example/Si-gruneisen/Si-gruneisen.py | 1 | 2004 | import numpy as np
from phonopy import Phonopy, PhonopyGruneisen
from phonopy.interface.vasp import read_vasp
from phonopy.file_IO import parse_FORCE_SETS
def append_band(bands, q_start, q_end):
band = []
for i in range(51):
band.append(np.array(q_start) +
(np.array(q_end) - np.array(q_start)) / 50 * i)
bands.append(band)
phonons = {}
for vol in ("orig", "plus", "minus"):
unitcell = read_vasp("%s/POSCAR-unitcell" % vol)
phonon = Phonopy(unitcell,
[[2, 0, 0],
[0, 2, 0],
[0, 0, 2]],
primitive_matrix=[[0, 0.5, 0.5],
[0.5, 0, 0.5],
[0.5, 0.5, 0]])
force_sets = parse_FORCE_SETS(filename="%s/FORCE_SETS" % vol)
phonon.set_displacement_dataset(force_sets)
phonon.produce_force_constants()
phonons[vol] = phonon
gruneisen = PhonopyGruneisen(phonons["orig"],
phonons["plus"],
phonons["minus"])
gruneisen.set_mesh([2, 2, 2])
q_points, _, frequencies, _, gammas = gruneisen.get_mesh()
for q, freq, g in zip(q_points, frequencies, gammas):
print(("%5.2f %5.2f %5.2f " + (" %7.3f" * len(freq)))
% ((q[0], q[1], q[2]) + tuple(freq)))
print(((" " * 18) + (" %7.3f" * len(g))) % tuple(g))
bands = []
append_band(bands, [0.5, 0.5, 0.0], [0.0, 0.0, 0.0])
append_band(bands, [0.0, 0.0, 0.0], [0.5, 0.5, 0.5])
gruneisen.set_band_structure(bands)
q_points, distances, frequencies, _, gammas = gruneisen.get_band_structure()
for q_path, d_path, freq_path, g_path in zip(q_points, distances,
frequencies, gammas):
for q, d, freq, g in zip(q_path, d_path, freq_path, g_path):
print(("%10.5f %5.2f %5.2f %5.2f " + (" %7.3f" * len(freq)))
% ((d, q[0], q[1], q[2]) + tuple(freq)))
print(((" " * 30) + (" %7.3f" * len(g))) % tuple(g))
| bsd-3-clause | -4,193,420,619,038,875,000 | 39.08 | 76 | 0.505988 | false |
danpozmanter/monitor_requests | monitor_requests/__init__.py | 1 | 4730 | """Monitor Requests."""
import datetime
import re
import sys
import traceback
import mock
from requests.utils import urlparse
from .data import DataHandler
from .output import OutputHandler
__version__ = '2.1.1'
class Monitor(object):
"""Monitor class to handle patching."""
# Libraries which mock requests by patching it:
# unittest.mock / mock and responses will not show up in tracebacks.
MOCKING_LIBRARIES = ('requests_mock',)
def __init__(self, domains=[], server_port=None, mocking=True):
"""Initialize Monitor, hot patch requests.
:param domains: List. Regex patterns to match against.
:param server_port: Int. Server mode: witn monitor_requests_server
running on the specified port.
:param mocking: Boolean. Mock requests. Default True, set to False
when running in server mode from the test suite/session level.
"""
self.domain_patterns = [
re.compile(domain_pattern) for domain_pattern in domains
]
self.data = DataHandler(server_port=server_port)
# Mocking
self.mocking = mocking
if mocking:
from requests.adapters import HTTPAdapter
self.stock_send = HTTPAdapter.send
self.send_patcher = mock.patch.object(
HTTPAdapter,
'send',
side_effect=self._generate_mocked_send(),
autospec=True
)
self.send_patcher.start()
def _generate_mocked_send(self):
"""Generate mock function for http request.
:return: Mocked send method for HTTPAdapter.
"""
def mock_send(instance, request, *args, **kwargs):
start = datetime.datetime.now()
response = self.stock_send(instance, request, *args, **kwargs)
duration = (datetime.datetime.now() - start).total_seconds()
self._log_request(request.url, request.method, response, duration)
return response
return mock_send
def _check_domain(self, domain):
if not self.domain_patterns:
return True
matched = False
for pattern in self.domain_patterns:
if pattern.search(domain):
matched = True
return matched
def _check_mocked(self, tb_list):
traceback = str(tb_list)
for library in self.MOCKING_LIBRARIES:
if '/{}/'.format(library) in traceback:
return True
return False
def _log_request(self, url, method, response, duration):
"""Log request, store traceback/response data and update counts."""
domain = urlparse(url).netloc
if not self._check_domain(domain):
return
m_init = 'monitor_requests/__init__.py'
tb_list = [f for f in traceback.format_stack() if m_init not in f]
if self._check_mocked(tb_list):
return
self.data.log(url, domain, method, response, tb_list, duration)
def refresh(self):
"""Refresh data from store (server or instance)."""
self.logged_requests, self.analysis = self.data.retrieve()
def report(
self,
urls=False,
tracebacks=False,
responses=False,
debug=False,
inspect_limit=None,
output=sys.stdout,
tear_down=True
):
"""Print out the requests, general analysis, and optionally unique tracebacks.
If debug is True, show urls, tracebacks, and responses.
If tracebacks or responses are set to True, urls will be output.
:param urls: Boolean. Display unique urls requested.
:param tracebacks: Boolean. Display unique tracebacks per url.
:param responses: Boolean. Display response/request info per url.
:param debug: Boolean. Convenience to display tracebacks and responses.
:param inspect_limit: Integer. How deep the stack trace should be.
:param output: Stream. Output destination.
:param tear_down: Undo the hotpatching (True by default), delete data.
"""
tracebacks = tracebacks or debug
responses = responses or debug
self.refresh()
output_handler = OutputHandler(
output, urls, tracebacks, responses, debug, inspect_limit,
self.logged_requests, self.analysis
)
output_handler.write()
if tear_down:
self.stop(delete=True)
def stop(self, delete=False):
"""Undo the hotpatching.
:param delete: Boolean. Delete data (only with server mode).
"""
if delete:
self.data.delete()
if not self.mocking:
return
self.send_patcher.stop()
| bsd-3-clause | 4,755,888,305,894,752,000 | 34.56391 | 86 | 0.611628 | false |
gameduell/pysupplies | tests/test_descr.py | 1 | 1053 | from supplies.annotate import attr, delay, refer
__author__ = 'dwae'
class Test:
def __init__(self, cnt=0):
self.cnt = cnt
@attr
def foo(self):
cnt = self.cnt
self.cnt += 1
return cnt
@delay
def bar(self):
cnt = self.cnt
self.cnt += 1
return cnt
@refer
def baz(self):
cnt = self.cnt
self.cnt += 1
return cnt
def test_attr():
assert isinstance(Test.foo, attr)
t = Test()
assert t.foo == 0
assert t.foo == 1
t.foo = 42
assert t.foo == 42
assert t.foo == 42
assert t.bar == 2
assert t.bar == 2
del t.foo
assert t.foo == 3
assert t.foo == 4
assert t.bar == 2
del t.bar
assert t.bar == 5
assert t.bar == 5
assert t.foo == 6
assert t.baz == 7
assert t.baz == 7
import pickle
t_ = pickle.loads(pickle.dumps(t))
assert t.foo == 8
assert t_.foo == 8
assert t.bar == 5
assert t_.bar == 5
assert t.baz == 7
assert t_.baz == 9
| mit | -216,468,087,574,851,420 | 14.042857 | 48 | 0.503324 | false |
moskytw/mosql | setup.py | 2 | 1363 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import mosql
with open('README.rst', 'rb') as f:
README = f.read()
# We want README to be a str, no matter it is byte or text. 'rb' reads bytes,
# so we need extra conversion on Python 3. On Python 2 bytes is synonym to str,
# and we're good.
if not isinstance(README, str):
README = README.decode('utf-8')
setup(
name='mosql',
version=mosql.__version__,
description='Build SQL with native Python data structure smoothly.',
long_description=README,
author='Mosky',
author_email='[email protected]',
url='http://mosql.mosky.tw/',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=find_packages(exclude=['oldtests']),
zip_safe=True,
)
| mit | -8,485,311,383,154,085,000 | 29.977273 | 79 | 0.623624 | false |
gbrammer/grizli | grizli/fake_image.py | 1 | 17610 | """
Tools for generating *very* basic fake images for HST/JWST/Roman simulations
"""
import os
import numpy as np
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
from . import GRIZLI_PATH
def rotate_CD_matrix(cd, pa_aper):
"""Rotate CD matrix
Parameters
----------
cd : (2,2) array
CD matrix
pa_aper : float
Position angle, in degrees E from N, of y axis of the detector
Returns
-------
cd_rot : (2,2) array
Rotated CD matrix
Comments
--------
`astropy.wcs.WCS.rotateCD` doesn't work for non-square pixels in that it
doesn't preserve the pixel scale! The bug seems to come from the fact
that `rotateCD` assumes a transposed version of its own CD matrix.
For example:
>>> import astropy.wcs as pywcs
>>>
>>> ## Nominal rectangular WFC3/IR pixel
>>> cd_wfc3 = np.array([[ 2.35945978e-05, 2.62448998e-05],
>>> [ 2.93050803e-05, -2.09858771e-05]])
>>>
>>> ## Square pixel
>>> cd_square = np.array([[0.1/3600., 0], [0, 0.1/3600.]])
>>>
>>> for cd, label in zip([cd_wfc3, cd_square], ['WFC3/IR', 'Square']):
>>> wcs = pywcs.WCS()
>>> wcs.wcs.cd = cd
>>> wcs.rotateCD(45.)
>>> print '%s pixel: pre=%s, rot=%s' %(label,
>>> np.sqrt((cd**2).sum(axis=0))*3600,
>>> np.sqrt((wcs.wcs.cd**2).sum(axis=0))*3600)
WFC3/IR pixel: pre=[ 0.1354 0.121 ], rot=[ 0.1282 0.1286]
Square pixel: pre=[ 0.1 0.1], rot=[ 0.1 0.1]
"""
rad = np.deg2rad(-pa_aper)
mat = np.zeros((2, 2))
mat[0, :] = np.array([np.cos(rad), -np.sin(rad)])
mat[1, :] = np.array([np.sin(rad), np.cos(rad)])
cd_rot = np.dot(mat, cd)
return cd_rot
def niriss_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589,
filter='F150W', grism='GR150R'):
"""Make JWST/NIRISS image header
Parameters
----------
ra, dec : float, float
Coordinates of the center of the image
pa_aper : float
Position angle of the y-axis of the detector
filter : str
Blocking filter to use.
grism : str
Grism to use
Returns
--------
h : `astropy.io.fits.Header`
FITS header with appropriate keywords
wcs : `astropy.wcs.WCS`
WCS specification (computed from keywords in ``h``).
Comments
--------
NIRISS: 0.065"/pix, requires filter & grism specification
"""
naxis = 2048, 2048
crpix = 1024, 1024
cd = np.array([[-0.0658, 0], [0, 0.0654]])/3600.
cd_rot = rotate_CD_matrix(cd, pa_aper)
h = pyfits.Header()
h['CRVAL1'] = ra
h['CRVAL2'] = dec
h['WCSAXES'] = 2
h['CTYPE1'] = 'RA---TAN'
h['CTYPE2'] = 'DEC--TAN'
for i in range(2):
h['NAXIS%d' % (i+1)] = naxis[i]
h['CRPIX%d' % (i+1)] = crpix[i]
h['CDELT%d' % (i+1)] = 1.0
for j in range(2):
h['CD%d_%d' % (i+1, j+1)] = cd_rot[i, j]
# Backgrounds
# http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf
bg = {'F090W': 0.50, 'F115W': 0.47, 'F140M': 0.23, 'F150W': 0.48, 'F158M': 0.25, 'F200W': 0.44}
h['INSTRUME'] = 'NIRISS'
h['TELESCOP'] = 'JWST'
h['DETECTOR'] = 'NIS'
if grism == 'GR150R':
h['FILTER'] = 'GR150R', 'Spectral trace along Y'
else:
h['FILTER'] = 'GR150C', 'Spectral trace along X'
h['PUPIL'] = filter
h['BACKGR'] = bg[filter], 'Total, e/s'
h['READN'] = 6, 'Rough, per pixel per 1 ks exposure' # e/pix/per
h['PHOTFLAM'] = 1.
h['PHOTPLAM'] = 1.
wcs = pywcs.WCS(h)
h['EXTVER'] = 1
return h, wcs
def nircam_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589,
filter='F444W', grism='GRISMR', module='A'):
"""Make JWST/NIRCAM image header
Parameters
----------
ra, dec : float, float
Coordinates of the center of the image
pa_aper : float
Position angle of the y-axis of the detector
filter : str
Blocking filter to use.
grism : str
Grism to use ('GRISMR', 'GRISMC')
module : str
Instrument module ('A','B')
Returns
--------
h : `astropy.io.fits.Header`
FITS header with appropriate keywords
wcs : `astropy.wcs.WCS`
WCS specification (computed from keywords in ``h``).
Comments
--------
NIRCAM, 0.0648"/pix, requires filter specification
"""
naxis = 2048, 2048
crpix = 1024, 1024
cd = np.array([[-0.0648, 0], [0, 0.0648]])/3600.
cd_rot = rotate_CD_matrix(cd, pa_aper)
h = pyfits.Header()
h['CRVAL1'] = ra
h['CRVAL2'] = dec
h['WCSAXES'] = 2
h['CTYPE1'] = 'RA---TAN'
h['CTYPE2'] = 'DEC--TAN'
for i in range(2):
h['NAXIS%d' % (i+1)] = naxis[i]
h['CRPIX%d' % (i+1)] = crpix[i]
h['CDELT%d' % (i+1)] = 1.0
for j in range(2):
h['CD%d_%d' % (i+1, j+1)] = cd_rot[i, j]
# Backgrounds
# http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf
bg = {'F277W': 0.30, 'F356W': 0.90, 'F444W': 3.00, 'F322W2': 1.25,
'F430M': 0.65, 'F460M': 0.86, 'F410M': 0.5} # F410M is a hack, no number
h['BACKGR'] = bg[filter], 'Total, e/s'
h['INSTRUME'] = 'NIRCAM'
h['TELESCOP'] = 'JWST'
h['DETECTOR'] = f'NRC{module}LONG'
h['MODULE'] = module
h['CHANNEl'] = 'LONG'
if grism == 'GRISMR':
h['PUPIL'] = 'GRISMR', 'Spectral trace along X'
else:
h['PUPIL'] = 'GRISMC', 'Spectral trace along Y'
h['FILTER'] = filter
h['READN'] = 9, 'Rough, per pixel per 1 ks exposure' # e/pix/per
h['PHOTFLAM'] = 1.
h['PHOTPLAM'] = 1.
wcs = pywcs.WCS(h)
h['EXTVER'] = 1
return h, wcs
def wfc3ir_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589,
flt='ibhj34h6q_flt.fits', filter='G141'):
"""Make HST/WFC3-IR image header
Parameters
----------
ra, dec : float, float
Coordinates of the center of the image
pa_aper : float
Position angle of the y-axis of the detector
flt : str
Filename of a WFC3/IR FLT file that will be used to provide the
SIP geometric distortion keywords.
filter : str
Grism/filter to use.
Returns
--------
h : `astropy.io.fits.Header`
FITS header with appropriate keywords
wcs : `astropy.wcs.WCS`
WCS specification (computed from keywords in ``h``).
Comments
--------
WFC3 IR, requires reference FLT file for the SIP header
"""
import numpy as np
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
im = pyfits.open(flt)
wcs = pywcs.WCS(im[1].header, relax=True)
thet0 = np.arctan2(im[1].header['CD2_2'], im[1].header['CD2_1'])/np.pi*180
wcs.wcs.crval = np.array([ra, dec])
# Rotate the CD matrix
theta = im[1].header['PA_APER'] - pa_aper
cd_rot = rotate_CD_matrix(wcs.wcs.cd, theta)
wcs.wcs.cd = cd_rot
h = wcs.to_header(relax=True)
for i in [1, 2]:
for j in [1, 2]:
h['CD%d_%d' % (i, j)] = h['PC%d_%d' % (i, j)]
h.remove('PC%d_%d' % (i, j))
h['BACKGR'] = 1.
h['FILTER'] = filter
h['INSTRUME'] = 'WFC3'
h['READN'] = im[0].header['READNSEA']
h['NAXIS1'] = h['NAXIS2'] = 1014
h['DETECTOR'] = 'IR'
h['PHOTFLAM'] = 1.
h['PHOTPLAM'] = 1.
return h, wcs
def wfirst_header(**kwargs):
"""
Alias to `~grizli.fake_image.roman_header`
"""
res = roman_header(**kwargs)
return res
def roman_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, naxis=(4096, 4096), **kwargs):
"""
Make WFIRST/Roman WFI header
Parameters
----------
ra, dec : float, float
Coordinates of the center of the image
pa_aper : float
Position angle of the y-axis of the detector
filter : str
Blocking filter to use.
naxis : (int,int)
Image dimensions
Returns
--------
h : `astropy.io.fits.Header`
FITS header with appropriate keywords
wcs : `astropy.wcs.WCS`
WCS specification (computed from keywords in `h`).
Comments
--------
WFIRST/Roman G150 Grism
Current config file has no field dependence, so field size can be
anything you want in ``naxis``.
"""
#naxis = 2048, 2048
crpix = naxis[0]/2., naxis[0]/2.
cd = np.array([[-0.11, 0], [0, 0.11]])/3600.
cd_rot = rotate_CD_matrix(cd, pa_aper)
h = pyfits.Header()
h['CRVAL1'] = ra
h['CRVAL2'] = dec
h['WCSAXES'] = 2
h['CTYPE1'] = 'RA---TAN'
h['CTYPE2'] = 'DEC--TAN'
for i in range(2):
h['NAXIS%d' % (i+1)] = naxis[i]
h['CRPIX%d' % (i+1)] = crpix[i]
h['CDELT%d' % (i+1)] = 1.0
for j in range(2):
h['CD%d_%d' % (i+1, j+1)] = cd_rot[i, j]
#h['BACKGR'] = 0.17+0.49, 'Total, e/s SDT Report A-1'
h['BACKGR'] = 1.12, 'Pandeia minzodi/benchmark 20210528'
h['FILTER'] = 'G150', 'WFIRST/Roman grism'
h['INSTRUME'] = 'WFI'
#h['READN'] = 17, 'SDT report Table 3-3' # e/pix/per
# https://roman.gsfc.nasa.gov/science/RRI/Roman_WFI_Reference_Information_20210125.pdf
h['READN'] = 16., 'WFI Reference 20210125' # e/pix/per
h['PHOTFLAM'] = 1.
h['PHOTPLAM'] = 1.
wcs = pywcs.WCS(h)
h['EXTVER'] = 1
return h, wcs
def roman_hls_image(exptime=661.932, nexp=13, background=1.12, output='roman.fits', **kwargs):
"""
Make a simple FITS file for a Roman High Latitude Survey Image
Parameters
----------
exptime, nexp, background : float, int, float
Defaults specified to roughly match the variance in the `pandeia`
2D simulation result (ignoring Poisson from the source)
kwargs : dict
Positional keywords passed to `~grizli.fake_image.roman_header`
Returns
-------
hdu : `astropy.io.fits.HDUList`
HDU with SCI, ERR, DQ extensions
wcs : `astropy.wcs.WCS`
WCS
"""
header, wcs = roman_header(**kwargs)
hdu = make_fake_image(header, output=output, background=background,
exptime=exptime, nexp=nexp)
return hdu, wcs
def make_fake_image(header, output='direct.fits', background=None, exptime=1.e4, nexp=10, obsdate=None, seed=None):
"""
Use the header from NIRISS, WFC3/IR or WFIRST/Roman and make an ``FLT``-like image that `grizli` can read as a reference.
Parameters
----------
header : `astropy.io.fits.Header`
Header created by one of the generating functions, such as
`~grizli.fake_image.niriss_header`.
output : str
Filename of the output FITS file. Will have extensions 'SCI', 'ERR',
and 'DQ'. The 'ERR' extension is populated with a read-noise +
background error model using
>>> var = nexp*header['READN'] + background*exptime
The 'SCI' extension is filled with gaussian deviates with standard
deviation `sqrt(var)`.
The 'DQ' extension is filled with (int) zeros.
background : None or float
Background value to use for sky noise. If None, then read from
`header['BACKGR']`.
exptime : float
Exposure time to use for background sky noise.
obsdate : `~astropy.time.Time`
Date of observation. If None, then use `astropy.time.Time.now`
nexp : int
Number of exposures to use for read noise.
seed : int
If specified, use as `numpy.random.seed`
Returns
-------
hdu : `astropy.io.fits.HDUList`
Image HDU (also saved to ``output`` FITS file)
"""
import astropy.time
import astropy.units as u
hdu = pyfits.HDUList()
header['EXPTIME'] = exptime
header['NEXP'] = nexp
header['BUNIT'] = 'ELECTRONS/S'
hdu.append(pyfits.PrimaryHDU(header=header))
naxis = (header['NAXIS1'], header['NAXIS2'])
if background is None:
background = header['BACKGR']
header['BACKGR'] = background
if obsdate is None:
obsdate = astropy.time.Time.now()
header['DATE-OBS'] = obsdate.iso.split()[0]
header['TIME-OBS'] = obsdate.iso.split()[1]
header['EXPSTART'] = obsdate.mjd
header['EXPEND'] = (obsdate + exptime*u.second).mjd
# Simple error model of read noise and sky background
var = nexp*header['READN'] + background*exptime
# electrons / s
rms = np.sqrt(var)/exptime
header['CALCRMS'] = rms, 'Variance used for random noise'
for name, dtype in zip(['SCI', 'ERR', 'DQ'],
[np.float32, np.float32, np.int32]):
hdu.append(pyfits.ImageHDU(header=header,
data=np.zeros(np.array(naxis).T,
dtype=dtype), name=name))
hdu['ERR'].data += rms
if seed is not None:
np.random.seed(seed)
hdu['ERR'].header['SEED'] = seed, 'Random seed'
hdu['SCI'].data = np.random.normal(size=np.array(naxis).T)*rms
if output is not None:
hdu.writeto(output, overwrite=True, output_verify='fix')
return hdu
def make_roman_config(save_to_conf=True):
"""
Use `pandeia` to calculate a Roman/G150 configuration file and sensitivity curve for `grizli`
https://github.com/spacetelescope/roman_tools/blob/develop/notebooks/Pandeia-Roman.ipynb
Parameters
----------
save_to_conf : bool
Write sensitivity and configuration files to ``[GRIZLI_PATH]/CONF``
Returns
-------
sens : `~astropy.table.Table`
Sensitivity table
conf : str
Grism configuration
"""
from astropy.table import Table
import astropy.time
import pandeia.engine
from pandeia.engine.perform_calculation import perform_calculation
from pandeia.engine.calc_utils import (get_telescope_config,
get_instrument_config,
build_default_calc,
build_default_source)
from pandeia.engine.io_utils import read_json, write_json
calc = build_default_calc('roman','wfi','spectroscopy')
# HLS simulation
calc['configuration']['instrument']['filter'] = None
calc['configuration']['instrument']['aperture'] = "any"
calc['configuration']['instrument']['disperser'] = "g150"
calc['configuration']['detector']['ngroup'] = 13 # groups per integration
calc['configuration']['detector']['nint'] = 1 # integrations per exposure
calc['configuration']['detector']['nexp'] = 1 # exposures
calc['configuration']['detector']['readmode'] = "medium8"
calc['configuration']['detector']['subarray'] = "1024x1024"
calc['scene'][0]['spectrum']['normalization']['norm_fluxunit'] = 'flam'
input_flux = 1.e-19
calc['scene'][0]['spectrum']['normalization']['norm_flux'] = input_flux
calc['scene'][0]['spectrum']['sed']['unit'] = 'flam'
# x,y location to extract, in arcsec
calc['strategy']['target_xy'] = [0.0,0.0]
# radius of extraction aperture, in arcsec
calc['strategy']['aperture_size'] = 0.6
# inner and outer radii of background subtraction annulus, in arcsec
calc['strategy']['sky_annulus'] = [0.8,1.2]
results = perform_calculation(calc)
sens = Table()
wave = np.arange(9000, 1.95e4, 2.)
sens_value = results['1d']['extracted_flux'][1]/input_flux
sens_value /= np.gradient(results['1d']['extracted_flux'][0]*1.e4)
sens['WAVELENGTH'] = wave
sens['SENSITIVITY'] = np.interp(wave,
results['1d']['extracted_flux'][0]*1.e4,
sens_value,
left=0, right=0)
sens['ERROR'] = 0.01*sens_value.max()
sens.meta['pandeia'] = pandeia.engine.__version__
sens.meta['created'] = astropy.time.Time.now().iso
sens_file = f'Roman.G150.v{pandeia.engine.__version__}.sens.fits'
if save_to_conf:
if isinstance(save_to_conf, str):
path = save_to_conf
else:
path = os.path.join(GRIZLI_PATH, 'CONF')
print('Sensitivity file: ', os.path.join(path, sens_file))
sens.write(os.path.join(path, sens_file), overwrite=True)
npix = len(results['1d']['extracted_flux'][0])
pad = 20
i0 = npix//2
w0 = results['1d']['extracted_flux'][0][i0]*1.e4
dlam = np.diff(results['1d']['extracted_flux'][0])[i0]*1.e4
config = f"""INSTRUMENT WFI
GFILTER G150
# First order (BEAM A)
# BEAMA and DLDP assume spectrum is centered on the imaging position
BEAMA {-npix//2-pad} {npix//2+pad+1}
MMAG_EXTRACT_A 30
MMAG_MARK_A 30
#
# Trace description
# (flat along rows)
DYDX_ORDER_A 0
DYDX_A_0 0
#
# X and Y Offsets
#
XOFF_A 0.0
YOFF_A 0.0
#
# Dispersion solution
#
DISP_ORDER_A 1
DLDP_A_0 {w0}
DLDP_A_1 {dlam}
SENSITIVITY_A {sens_file}
"""
if save_to_conf:
print('Config file: ', os.path.join(path, 'Roman.G150.conf'))
with open(os.path.join(path, 'Roman.G150.conf'), 'w') as fp:
fp.write(config)
return sens, config | mit | -2,853,333,402,354,187,300 | 26.99841 | 125 | 0.559966 | false |
spcui/virt-test | virttest/libvirt_xml/nodedev_xml.py | 1 | 10435 | """
Module simplifying manipulation of XML described at
http://libvirt.org/formatnode.html
"""
import os
from virttest.libvirt_xml import base, xcepts, accessors
class CAPXML(base.LibvirtXMLBase):
"""
The base class for capability.
"""
def get_sysfs_sub_path(self):
"""
return the sub path store the info of capibility.
"""
raise NotImplementedError('get_sysfs_sub_path is not implemented.')
@staticmethod
def get_key2filename_dict():
"""
Return a dict which contain the key and the name
of info file.
"""
raise NotImplementedError('get_key2filename_dict is not implemeneted.')
def get_key2value_dict(self):
"""
Reutn a dict which contain the key and the value
in capability xml.
"""
raise NotImplementedError('get_key2value_dict is not implemented.')
class SystemXML(CAPXML):
"""
class for capability which type is system.
"""
__slots__ = CAPXML.__slots__ + ('product', 'hdware_vendor',
'hdware_serial', 'hdware_uuid',
'firmware_vendor', 'firmversion'
'firm_release_date')
__sysfs_sub_path__ = 'dmi/id/'
__key2filename_dict__ = {'product': 'product_name',
'hdware_vendor': 'sys_vendor',
'hdware_serial': 'product_serial',
'hdware_uuid': 'product_uuid',
'firmware_vendor': 'bios_vendor',
'firmversion': 'bios_version',
'firm_release_date': 'bios_date'}
@staticmethod
def get_key2filename_dict():
"""
Return a dict which contain the key and the name
of info file for System node device.
"""
return SystemXML.__key2filename_dict__
def get_key2value_dict(self):
"""
return the dict key2value
key: the key in xml need to check.
value: value in xml for this key.
"""
key2value_dict = {}
for key in SystemXML.__key2filename_dict__:
key2value_dict[key] = self[key]
return key2value_dict
@staticmethod
def make_sysfs_sub_path():
"""
return __sysfs_sub_path__ immediately.
"""
return SystemXML.__sysfs_sub_path__
def get_sysfs_sub_path(self):
"""
Return the sysfs_subdir.
"""
return self.make_sysfs_sub_path()
class PCIXML(CAPXML):
"""
class for capability whose type is pci.
"""
__slots__ = CAPXML.__slots__ + ('domain', 'bus', 'slot',
'function', 'product_id',
'vendor_id')
def __init__(self, virsh_instance=base.virsh):
accessors.XMLElementInt('domain', self, parent_xpath='/',
tag_name='domain')
accessors.XMLElementInt('bus', self, parent_xpath='/',
tag_name='bus')
accessors.XMLElementInt('slot', self, parent_xpath='/',
tag_name='slot')
accessors.XMLElementInt('function', self, parent_xpath='/',
tag_name='function')
accessors.XMLAttribute('product_id', self, parent_xpath='/',
tag_name='product', attribute='id')
accessors.XMLAttribute('vendor_id', self, parent_xpath='/',
tag_name='vendor', attribute='id')
super(PCIXML, self).__init__(virsh_instance=virsh_instance)
self.xml = (' <capability type=\'pci\'></capability>')
@staticmethod
def make_sysfs_sub_path(domain, bus, slot, function):
"""
Make sysfs_sub_path for pci by domain,bus,slot and function.
"""
pci_bus_path = ("%04x:%02x" % (domain, bus))
pci_device_path = ("%04x:%02x:%02x.%01x" % (domain, bus,
slot, function))
pci_sysfs_sub_path = ("pci_bus/%s/device/%s"
% (pci_bus_path, pci_device_path))
return pci_sysfs_sub_path
def get_sysfs_sub_path(self):
"""
Return the sysfs_subdir in .
Example:
pci_bus/0000\:00/device/0000\:00\:00.0/
"""
domain = self.domain
bus = self.bus
slot = self.slot
function = self.function
return PCIXML.make_sysfs_sub_path(domain, bus, slot, function)
__key2filename_dict__ = {'product_id': 'device',
'vendor_id': 'vendor'}
@staticmethod
def get_key2filename_dict():
"""
return the dict key2filename.
key: the keys in pcixml need to check.
filename: the name of file stored info for this key.
"""
return PCIXML.__key2filename_dict__
def get_key2value_dict(self):
"""
return the dict key2value
key: the key in xml need to check.
value: value in xml for this key.
"""
key2value_dict = {}
for key in PCIXML.__key2filename_dict__:
key2value_dict[key] = self[key]
return key2value_dict
class NodedevXMLBase(base.LibvirtXMLBase):
"""
Accessor methods for NodedevXML class.
"""
__slots__ = base.LibvirtXMLBase.__slots__ + ('name', 'parent',
'cap_type', 'cap',
'sysfs_main_path')
__schema_name__ = "nodedev"
__sysfs_dir__ = "/sys/class"
__type2class_dict__ = {'system': 'SystemXML',
'pci': 'PCIXML',
'usb_device': 'USBDeviceXML',
'usb': 'USBXML',
'net': 'NetXML',
'scsi_host': 'SCSIHostXML',
'scsi': 'SCSIXML',
'storage': 'StorageXML'}
def __init__(self, virsh_instance=base.virsh):
accessors.XMLElementText('name', self, parent_xpath='/',
tag_name='name')
accessors.XMLElementText('parent', self, parent_xpath='/',
tag_name='parent')
accessors.XMLAttribute('cap_type', self, parent_xpath='/',
tag_name='capability', attribute='type')
super(NodedevXMLBase, self).__init__(virsh_instance=virsh_instance)
self.xml = '<device></device>'
@staticmethod
def get_cap_by_type(cap_type):
"""
Init a cap class for a specific type.
:param cap_type: the type of capability.
:return: instanse of the cap.
"""
cap_class_name = NodedevXMLBase.__type2class_dict__[cap_type]
cap_class = globals()[cap_class_name]
capxml = cap_class()
return capxml
def get_cap(self):
"""
Return the capability of nodedev_xml.
"""
try:
cap_root = self.xmltreefile.reroot('/capability')
except KeyError, detail:
raise xcepts.LibvirtXMLError(detail)
capxml = NodedevXMLBase.get_cap_by_type(self.cap_type)
capxml.xmltreefile = cap_root
return capxml
def set_cap(self, value):
"""
Set the capability by value.
"""
if not issubclass(type(value), CAPXML):
raise xcepts.LibvirtXMLError("value must be a CAPXML or subclass")
# remove any existing capability block
self.del_cap()
root = self.xmltreefile.getroot()
root.append(value.getroot())
self.xmltreefile.write()
def del_cap(self):
"""
Delete the capability from nodedev xml.
"""
element = self.xmltreefile.find('/capability')
if element is not None:
self.mltreefile.remove(element)
self.xmltreefile.write()
def get_sysfs_sub_path(self):
"""
Get the sub sysfs path of the capability.
"""
capxml = self.cap
sysfs_sub_path = capxml.get_sysfs_sub_path()
return sysfs_sub_path
def get_sysfs_path(self):
"""
Get the abs path of the capability info.
"""
sysfs_main_path = self.__sysfs_dir__
sysfs_sub_path = self.get_sysfs_sub_path()
sysfs_path = os.path.join(sysfs_main_path, sysfs_sub_path)
return sysfs_path
class NodedevXML(NodedevXMLBase):
"""
class for Node device XML.
"""
__slots__ = NodedevXMLBase.__slots__
def __init__(self, virsh_instance=base.virsh):
"""
Initialize new instance.
"""
super(NodedevXML, self).__init__(virsh_instance=virsh_instance)
self.xml = ('<device></device>')
@staticmethod
def new_from_dumpxml(dev_name, virsh_instance=base.virsh):
"""
Get a instance of NodedevXML by dumpxml dev_name.
"""
nodedevxml = NodedevXML(virsh_instance=virsh_instance)
dumpxml_result = virsh_instance.nodedev_dumpxml(dev_name)
if dumpxml_result.exit_status:
raise xcepts.LibvirtXMLError("Nodedev_dumpxml %s failed.\n"
"Error: %s."
% (dev_name, dumpxml_result.stderr))
nodedevxml.xml = dumpxml_result.stdout
return nodedevxml
def get_key2value_dict(self):
"""
Get the dict which contain key and value in xml.
key: keys in nodedev xml need to check.
value: value in xml for the key.
"""
capxml = self.cap
key2value_dict = capxml.get_key2value_dict()
return key2value_dict
def get_key2syspath_dict(self):
"""
Get the dict which contains key and path.
key: keys in nodedev xml need to check.
syspath: the abs path for the file stores info for the key.
"""
sysfs_path = self.get_sysfs_path()
capxml = self.cap
key2filename_dict = capxml.__class__.get_key2filename_dict()
key2syspath_dict = {}
for key in key2filename_dict:
filename = key2filename_dict[key]
abs_syspath = os.path.join(sysfs_path, filename)
key2syspath_dict[key] = abs_syspath
return key2syspath_dict
| gpl-2.0 | -8,236,994,242,723,432,000 | 30.717325 | 79 | 0.530331 | false |
galactose/wviews | program/program.py | 1 | 13353 | """
program.py: Program structures for worldview solving
Copyright (C) 2014 Michael Kelly
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from itertools import combinations
from collections import defaultdict
from atom import EpistemicModality, Atom, EpistemicAtom, NegationAsFailureAtom
from rule import IndexedRule
class LogicProgram(object):
def __init__(self, file_handle):
self._label_id = 1
self._label_set = set()
self._label_cache = {}
self._label_id_lookup = {}
self.label_to_epistemic_atom_id = defaultdict(list)
self._atom_id = 1
self._atom_set = set()
self._atom_cache = {}
self._atom_id_lookup = {}
self.epistemic_atom_cache = {}
self.program = []
self.epistemic_atom_id_to_valuation_index_map = None
def get_program(self):
"""
Returns the program as a list of strings that can be output to
file.
"""
return [str(rule) for rule in self.program]
def index_atoms(self, program_handle):
"""
index_epistemic_atoms: indexes atoms in program rules so that
we can simplify rules and build atom and epistemic atom lookup tables,
to speed up the process of applying epistemic valuations and determine
if a coherent world view is possible from a disjunctive logic program.
Returns:
- atom_index_cache (dict) -
- epistemic_atom_index_cache (dict) -
- indexed_program (set) -
"""
for rule in program_handle: # loop over new rules
new_rule = IndexedRule(head=set(), tail=set(),
atom_dict=self._atom_cache)
if rule.head:
for atom_token in rule.head: # check rule head
atom = self.get_atom_information(atom_token)
new_rule.head.add(atom.atom_id)
if rule.tail:
for atom_token in rule.tail: # check rule body
atom = self.get_atom_information(atom_token)
new_rule.tail.add(atom.atom_id)
self.program.append(new_rule)
# here we map each epistemic id to a number in an order so that when we
# apply a valuation it's consistent and unique so we're not testing the
# same set of valuations twice
self.epistemic_atom_id_to_valuation_index_map = {
epistemic_id: valuation_index
for valuation_index, epistemic_id in
enumerate(self.epistemic_atom_cache.keys())
}
def get_or_create_atom(self, atom):
"""
Given a newly created logical atom, check to see if one exists of the
given type. If it doesn't assign it a unique ID and add it to the atoms
that exist for the program. If it is an epistemic atom add it to the
epistemic atom cache. This allows fast access to atom information.
Also identify unique labels and index them here.
Arguments:
* atom (Atom/EpistemicAtom/NegationAsFailureAtom)
an object representing an atom in an epistemic logic program
"""
if str(atom) in self._atom_set:
return False
if atom.label not in self._label_id_lookup:
atom.label_id = self._label_id
self._label_cache[self._label_id] = atom.label
self._label_id_lookup[atom.label] = self._label_id
self._label_id += 1
else:
atom.label_id = self._label_id_lookup[atom.label]
atom.atom_id = self._atom_id
self._atom_set.add(str(atom))
self._atom_id_lookup[str(atom)] = atom.atom_id
self._atom_cache[atom.atom_id] = atom
self._atom_id += 1
if isinstance(atom, EpistemicAtom):
self.epistemic_atom_cache[atom.atom_id] = atom
self.label_to_epistemic_atom_id[atom.label].append(atom.atom_id)
return True
def get_atom_information(self, atom_token):
"""
Given a logical atom represented as a string of characters, determine
if it is an epistemic atom, if the atom has strong negation, what
kind of epistemic modality is used and if it is negated, and whether
or not negation as failure is used. Finally return an Atom instance
which holds all this information and assign it an atom ID and if
applicable an epistemic ID.
Arguments:
* atom_token (str) - a logical atom represented as a string.
"""
atom_negation = False
epistemic_negation = False
negation_as_failure = False
# it's an epistemic atom
if atom_token.find('K') != -1 or atom_token.find('M') != -1:
modality = EpistemicModality.BELIEVE
epistemic_modality_index = atom_token.find('M')
label = atom_token[1:]
if epistemic_modality_index == -1:
epistemic_modality_index = atom_token.find('K')
modality = EpistemicModality.KNOW
if epistemic_modality_index != 0 and \
atom_token[epistemic_modality_index - 1] in ('-', '~'):
epistemic_negation = True
label = atom_token[epistemic_modality_index + 1:]
if atom_token[epistemic_modality_index + 1] in ('-', '~'):
atom_negation = True
label = atom_token[epistemic_modality_index + 2:]
atom = EpistemicAtom(label, modality, atom_negation=atom_negation,
epistemic_negation=epistemic_negation)
else:
label = atom_token
if atom_token[0] in ('-', '~'):
atom_negation = True
label = atom_token[1:]
if atom_token.startswith('not '):
if '-' in atom_token or '~' in atom_token:
raise ValueError
negation_as_failure = True
label = atom_token[4:]
if negation_as_failure:
atom = NegationAsFailureAtom(label, atom_negation)
else:
atom = Atom(label, atom_negation)
created = self.get_or_create_atom(atom)
if not created:
atom.atom_id = self._atom_id_lookup[str(atom)]
return atom
def get_evaluated_program_and_apply_valuation(self, valuation_tuple):
"""
Given a tuple of valuations to apply to the epistemic atoms, run
through each rule, apply the valuations and determine the consequences
of the valuations to each rule. Here, if a valuation is true for an
epistemic atom it is removed from the rule meaning that we're
considering true for the purposes of determining if it leads to a
valid worldview. If a valuation is false for an epistemic atom the
entire rule is removed, indicating that since one atom is false in the
body of a rule its whole rule is unsatisfiable.
If a rules entire body is true we take the head and say that the head
is therefore true for the evaluated program.
Arguments:
* valuation_tuple (tuple(bool))
- a tuple of boolean values representing valuations to apply to the
epistemic atoms in the program.
"""
evaluated_program = []
for rule in self.program:
evaluated_rule = self.get_evaluated_rule_and_apply_valuation(rule, valuation_tuple)
if evaluated_rule:
evaluated_program.append(evaluated_rule)
return evaluated_program
def get_evaluated_rule_and_apply_valuation(self, rule, valuation_tuple):
"""
At a rule level go through the rule and check for epistemic atoms, if
you find one find its index number in the valuation string. Apply it's
valuation in the atom and work out what that means to the rule in the
evaluated program. If True remove the atom from the rule body,
otherwise remove the rule from the program.
Return the rule string if all True valuations for the epistemic atoms.
Arguments:
* valuation_tuple (tuple(bool))
- a tuple of boolean values representing valuations to apply to the
epistemic atoms in the program.
"""
false_valuation = False
modal_atom_in_rule = False
for atom_id in rule.tail:
atom = self._atom_cache[atom_id]
if not isinstance(atom, EpistemicAtom):
continue
# apply the valuation
modal_atom_in_rule = True
valuation_index = self.epistemic_atom_id_to_valuation_index_map[atom_id]
atom.valuation = valuation_tuple[valuation_index]
if not atom.valuation:
false_valuation = True
if not false_valuation or not modal_atom_in_rule:
return rule.get_rule_string(apply_valuation=True)
return ''
def check_optimisations(self):
"""
Search the label to epistemic atom dictionary and identify any labels
which appear in an epistemic atom more than once. If they have
negations or modalities which conflict valuations can be simplified
to not process these cases.
"""
optimisation_atom_pairs = []
for label, e_atom_id_list in self.label_to_epistemic_atom_id:
if not e_atom_id_list or len(e_atom_id_list) == 1:
continue
e_combinations = combinations(e_atom_id_list, 2)
for e_atom_id_a, e_atom_id_b in e_combinations:
e_atom_a = self._atom_cache[e_atom_id_a]
e_atom_b = self._atom_cache[e_atom_id_b]
if self.check_optimisation(e_atom_a, e_atom_b):
optimisation_atom_pairs.append(e_atom_a, e_atom_b)
return optimisation_atom_pairs
def check_optimisation(e_atom_a, e_atom_b):
"""
"""
return (
self.check_conflicts(e_atom_a, e_atom_b) and
self.check_conflicts(e_atom_b, e_atom_a)
) or \
self.check_conflicting_negation(e_atom_a, e_atom_b) or \
self.check_different_modality(e_atom_a, e_atom_b) or \
self.check_different_modality(e_atom_b, e_atom_a)
@staticmethod
def check_conflicts(atom_a, atom_b):
"""
Given two epistemic atoms, if one is K and doesnt have epistemic
negation and the other is M and doesnt have epistemic negation
and their atom negations do not agree we can safely say that any
valuation where they are both true or both false can't be satisfied.
Argument:
* atom_a (EpistemicAtom) - an epistemic atom
* atom_b (EpistemicAtom) - another epistemic atom
"""
return (atom_a.modality == EpistemicModality.KNOW and
atom_b.modality == EpistemicModality.BELIEVE and
not atom_a.epistemic_negation and
not atom_b.epistemic_negation and
atom_a.atom_negation != atom_b.atom_negation)
@staticmethod
def check_different_modality(atom_a, atom_b):
"""
Given two epistemic atoms, if one is K and has epistemic negation and
the other is M and hasn't and their atom negation is equal we can say
that any valuation that agrees for both of them cannot be true.
Argument:
* atom_a (EpistemicAtom) - an epistemic atom
* atom_b (EpistemicAtom) - another epistemic atom
"""
return (atom_a.modality == EpistemicModality.KNOW and
not atom_a.epistemic_negation and
atom_b.modality == EpistemicModality.BELIEVE and
atom_b.epistemic_negation and
atom_a.atom_negation == atom_b.atom_negation)
@staticmethod
def check_conflicting_negation(atom_a, atom_b):
"""
Given two epistemic atoms, if they have the same modality
(rather K or M) but they have a conflicting negation status for their
modality or for their atom (but not both) then we can safely say that
any valuation which say both of these things are true will be false
valuations.
Argument:
* atom_a (EpistemicAtom) - an epistemic atom
* atom_b (EpistemicAtom) - another epistemic atom
"""
return (atom_a.modality == atom_b.modality and
((atom_a.atom_negation != atom_b.atom_negation and
atom_a.epistemic_negation == atom_b.epistemic_negation) or
(atom_a.atom_negation == atom_b.atom_negation and
atom_a.epistemic_negation != atom_b.epistemic_negation)))
| gpl-3.0 | -5,458,440,010,236,815,000 | 42.353896 | 95 | 0.609002 | false |
reubano/hdxscraper-undp-climate | config.py | 1 | 2244 | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
config
~~~~~~
Provides app configuration settings
"""
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
from os import path as p
BASEDIR = p.dirname(__file__)
PARENTDIR = p.dirname(BASEDIR)
DB_NAME = 'scraperwiki.sqlite'
RECIPIENT = '[email protected]'
class Config(object):
base = 'http://www.geog.ox.ac.uk'
BASE_URL = '%s/research/climate/projects/undp-cp/UNDP_data' % base
FILE_EXT = 'ts.obs.precip.ts.ensemblemean.abs.txt'
DIR = 'Observed/Mean/Timeseries/Absolute'
loc = [
'Afghanistan', 'Angola', 'Antigua and Barbuda', 'Argentina', 'Armenia',
'Bangladesh', 'Barbados', 'Belize', 'Benin', 'Cambodia', 'Cameroon',
'Cape Verde', 'Chad', 'Chile', 'China', 'Colombia', 'Comoros', 'Cuba',
'Dominica', 'Dominican Republic', 'Equatorial Guinea', 'Eritrea',
'Ethiopia', 'Gabon', 'Gambia', 'Ghana', 'Grenada', 'Guinea', 'Guyana',
'Indonesia', 'Jamaica', 'Kenya', 'Liberia', 'Malawi', 'Mali',
'Mauritania', 'Mauritius', 'Mexico', 'Morocco', 'Mozambique', 'Nepal',
'Nicaragua', 'Pakistan', 'Sao Tome and Principe', 'Senegal',
'Sierra Leone', 'St Kitts and Nevis', 'St Lucia',
'St Vincent and the Grenadines', 'Suriname', 'Tanzania', 'The Bahamas',
'Togo', 'Trinidad and Tobago', 'Uganda', 'Vietnam', 'Yemen', 'Zambia']
TABLES = [{'name': 'climate', 'location': l, 'rid': 'rid'} for l in loc]
SQLALCHEMY_DATABASE_URI = 'sqlite:///%s' % p.join(BASEDIR, DB_NAME)
API_LIMIT = 1000
SW = False
DEBUG = False
TESTING = False
PROD = False
CHUNK_SIZE = 2 ** 14
ROW_LIMIT = None
LOGFILE = p.join(BASEDIR, 'http', 'log.txt')
class Scraper(Config):
PROD = True
SW = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///%s' % p.join(PARENTDIR, DB_NAME)
LOGFILE = p.join(PARENTDIR, 'http', 'log.txt')
class Production(Config):
PROD = True
class Development(Config):
DEBUG = True
CHUNK_SIZE = 2 ** 4
ROW_LIMIT = 16
class Test(Config):
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:'
DEBUG = True
CHUNK_SIZE = 2 ** 4
ROW_LIMIT = 10
TESTING = True
| mit | -7,815,678,056,294,117,000 | 28.92 | 79 | 0.610517 | false |
wtpayne/hiai | a3_src/h70_internal/da/lwc/discover.py | 1 | 8741 | # -*- coding: utf-8 -*-
"""
Local working copy path aliasing.
---
type:
python_module
validation_level:
v00_minimum
protection:
k00_public
copyright:
"Copyright 2016 High Integrity Artificial Intelligence Systems"
license:
"Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."
...
"""
import os
import da.lwc.search
import da.memo
LWC_DIR_EXCLUDE_EXPR_LIST = [r'^\..*$',
r'^\.git$',
r'^\.cache$',
r'^\.vagrant',
r'^__pycache__']
LWC_EXT_INCLUDE_EXPR_LIST = [r'^.*\.bash$',
r'^.*\.css$',
r'^.*\.template.html$',
r'^.*\.template.docx$',
r'^.*\.py$',
r'^.*\.md$',
r'^.*\.json$',
r'^.*\.yaml$']
LWC_PROJECT_DIR_EXPR = r'^.*p[0-9]{4}_[a-z0-9_]{2,64}$'
LWC_COUNTERPARTY_DIR_EXPR = r'^.*c[0-9]{3}_[a-z0-9_]{2,64}$'
LWC_RESEARCHER_DIR_EXPR = r'^.*t[0-9]{3}_[a-z0-9_]{2,64}$'
_LWC_TAB = {
'env': ('a0_env', ),
'cfg': ('a1_cfg', ),
'dat': ('a2_dat', ),
'src': ('a3_src', ),
'tmp': ('a4_tmp', ),
'cms': ('a5_cms', ),
'resource': ('a3_src', 'h10_resource' ),
'daybook': ('a3_src', 'h10_resource', 'daybook' ),
'registry': ('a3_src', 'h10_resource', 'registry' ),
'capability': ('a3_src', 'h20_capability' ),
'product': ('a3_src', 'h30_product' ),
'project': ('a3_src', 'h40_project' ),
'research': ('a3_src', 'h50_research' ),
'demo': ('a3_src', 'h60_demo' ),
'internal': ('a3_src', 'h70_internal' ),
'bldcfg': ('a3_src', 'h70_internal', 'da', 'bldcfg' ),
'doc': ('a3_src', 'h80_doc' )
}
# -----------------------------------------------------------------------------
def gen_product_dirs(dirpath_lwc_root = None):
"""
Generate all product dirs in the local working copy.
"""
return da.lwc.search.filtered_dirpath_generator(
root = path(key = 'product',
dirpath_lwc_root = dirpath_lwc_root),
direxcl = da.lwc.discover.LWC_DIR_EXCLUDE_EXPR_LIST,
pathincl = None,
pathexcl = None)
# -----------------------------------------------------------------------------
def gen_counterparty_dirs(dirpath_lwc_root = None):
"""
Generate all project counterparty dirs in the local working copy.
"""
return da.lwc.search.filtered_dirpath_generator(
root = path(key = 'project',
dirpath_lwc_root = dirpath_lwc_root),
direxcl = da.lwc.discover.LWC_DIR_EXCLUDE_EXPR_LIST,
pathincl = [da.lwc.discover.LWC_COUNTERPARTY_DIR_EXPR],
pathexcl = None)
# -----------------------------------------------------------------------------
def gen_project_dirs(dirpath_lwc_root = None):
"""
Generate all project dirs in the local working copy.
"""
return da.lwc.search.filtered_dirpath_generator(
root = path(key = 'project',
dirpath_lwc_root = dirpath_lwc_root),
direxcl = da.lwc.discover.LWC_DIR_EXCLUDE_EXPR_LIST,
pathincl = [da.lwc.discover.LWC_PROJECT_DIR_EXPR],
pathexcl = None)
# -----------------------------------------------------------------------------
def gen_research_dirs(dirpath_lwc_root = None):
"""
Generate all research (per team member) dirs in the local working copy.
"""
return da.lwc.search.filtered_dirpath_generator(
root = path(key = 'research',
dirpath_lwc_root = dirpath_lwc_root),
direxcl = da.lwc.discover.LWC_DIR_EXCLUDE_EXPR_LIST,
pathincl = [da.lwc.discover.LWC_RESEARCHER_DIR_EXPR],
pathexcl = None)
# -----------------------------------------------------------------------------
def gen_demo_dirs(dirpath_lwc_root = None):
"""
Generate all demo dirs in the local working copy.
"""
return da.lwc.search.filtered_dirpath_generator(
root = path(key = 'demo',
dirpath_lwc_root = dirpath_lwc_root),
direxcl = da.lwc.discover.LWC_DIR_EXCLUDE_EXPR_LIST,
pathincl = None,
pathexcl = None)
# -----------------------------------------------------------------------------
def gen_src_files(dirpath_lwc_root = None):
"""
Generate all source files in the local working copy.
"""
if dirpath_lwc_root is None:
dirpath_lwc_root = _lwc_root(__file__)
return da.lwc.search.filtered_filepath_generator(
root = path(key = 'src',
dirpath_lwc_root = dirpath_lwc_root),
direxcl = da.lwc.discover.LWC_DIR_EXCLUDE_EXPR_LIST,
pathincl = da.lwc.discover.LWC_EXT_INCLUDE_EXPR_LIST)
# -----------------------------------------------------------------------------
@da.memo.var
def path(key, dirpath_lwc_root = None):
"""
Return the directory path corresponding to the specified key.
"""
# Get lwc_root if it is not defined
if dirpath_lwc_root is None:
dirpath_lwc_root = _lwc_root(__file__)
# LWC root
if key == 'root':
return dirpath_lwc_root
# Handle 'heavyweight' folders that can't get copied to tmp
if (key == 'env') or (key == 'cfg') or (key == 'dat'):
dirname_tmp = _LWC_TAB['tmp'][0]
is_tmp_lwc = dirname_tmp in dirpath_lwc_root
if is_tmp_lwc:
dirpath_outer_lwc_root = _lwc_root(dirpath_lwc_root)
else:
dirpath_outer_lwc_root = dirpath_lwc_root
return os.path.join(dirpath_outer_lwc_root, *_LWC_TAB[key])
# Env dir for the current runtime environment?
if key == 'current_env':
import da.machine as _machine
dirpath_env = path(key = 'env', dirpath_lwc_root = dirpath_lwc_root)
env_id = _machine.env_id()
return os.path.join(dirpath_env, env_id)
# Config directory for the current user & machine?
if key == 'current_cfg':
import da.team as _team
import da.machine as _machine
dirpath_cfg = path(key = 'cfg', dirpath_lwc_root = dirpath_lwc_root)
member_id = _team.member_id(dirpath_lwc_root = dirpath_lwc_root)
machine_id = _machine.machine_id(dirpath_lwc_root = dirpath_lwc_root)
return os.path.join(dirpath_cfg, member_id, machine_id)
# Key is an entry in the static table above?
if key in _LWC_TAB:
return os.path.join(dirpath_lwc_root, *_LWC_TAB[key])
raise RuntimeError(
'Could not identify path for key: {key}'.format(
key = key))
# -----------------------------------------------------------------------------
@da.memo.var
def _lwc_root(filepath_self):
"""
Return the directory path to the root of the local working copy.
"""
marker_file_name = 'da'
dirpath_self = os.path.dirname(filepath_self)
dirpath_lwc_root = da.lwc.search.find_ancestor_dir_containing(
dirpath_self, marker_file_name, allow_dir = False)
dirpath_normalised = os.path.normpath(dirpath_lwc_root)
dirpath_real = os.path.realpath(dirpath_normalised)
return dirpath_real
| apache-2.0 | -1,385,293,988,689,755,400 | 37.170306 | 79 | 0.467795 | false |
bolkedebruin/airflow | tests/providers/google/cloud/hooks/test_video_intelligence.py | 1 | 4133 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import mock
from google.cloud.videointelligence_v1 import enums
from airflow.providers.google.cloud.hooks.video_intelligence import CloudVideoIntelligenceHook
from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id
INPUT_URI = "gs://bucket-name/input-file"
OUTPUT_URI = "gs://bucket-name/output-file"
FEATURES = [enums.Feature.LABEL_DETECTION]
ANNOTATE_VIDEO_RESPONSE = {'test': 'test'}
class TestCloudVideoIntelligenceHook(unittest.TestCase):
def setUp(self):
with mock.patch(
"airflow.providers.google.cloud.hooks.video_intelligence.CloudVideoIntelligenceHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = CloudVideoIntelligenceHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.cloud.hooks.video_intelligence.CloudVideoIntelligenceHook.client_info",
new_callable=mock.PropertyMock
)
@mock.patch(
"airflow.providers.google.cloud.hooks.video_intelligence.CloudVideoIntelligenceHook._get_credentials"
)
@mock.patch("airflow.providers.google.cloud.hooks.video_intelligence.VideoIntelligenceServiceClient")
def test_video_intelligence_service_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.hook.get_conn()
mock_client.assert_called_once_with(
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.hook._conn, result)
@mock.patch("airflow.providers.google.cloud.hooks.video_intelligence.CloudVideoIntelligenceHook.get_conn")
def test_annotate_video(self, get_conn):
# Given
annotate_video_method = get_conn.return_value.annotate_video
get_conn.return_value.annotate_video.return_value = ANNOTATE_VIDEO_RESPONSE
# When
result = self.hook.annotate_video(input_uri=INPUT_URI, features=FEATURES)
# Then
self.assertIs(result, ANNOTATE_VIDEO_RESPONSE)
annotate_video_method.assert_called_once_with(
input_uri=INPUT_URI,
input_content=None,
features=FEATURES,
video_context=None,
output_uri=None,
location_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.video_intelligence.CloudVideoIntelligenceHook.get_conn")
def test_annotate_video_with_output_uri(self, get_conn):
# Given
annotate_video_method = get_conn.return_value.annotate_video
get_conn.return_value.annotate_video.return_value = ANNOTATE_VIDEO_RESPONSE
# When
result = self.hook.annotate_video(input_uri=INPUT_URI, output_uri=OUTPUT_URI, features=FEATURES)
# Then
self.assertIs(result, ANNOTATE_VIDEO_RESPONSE)
annotate_video_method.assert_called_once_with(
input_uri=INPUT_URI,
output_uri=OUTPUT_URI,
input_content=None,
features=FEATURES,
video_context=None,
location_id=None,
retry=None,
timeout=None,
metadata=None,
)
| apache-2.0 | 474,879,580,294,383,170 | 38.361905 | 110 | 0.689814 | false |
tholum/PiBunny | system.d/library/tools_installer/tools_to_install/responder/Report.py | 1 | 3980 | #!/usr/bin/env python
# This file is part of Responder, a network take-over set of tools
# created and maintained by Laurent Gaffie.
# email: [email protected]
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sqlite3
import os
def color(txt, code = 1, modifier = 0):
if txt.startswith('[*]'):
settings.Config.PoisonersLogger.warning(txt)
elif 'Analyze' in txt:
settings.Config.AnalyzeLogger.warning(txt)
if os.name == 'nt': # No colors for windows...
return txt
return "\033[%d;3%dm%s\033[0m" % (modifier, code, txt)
def DbConnect():
cursor = sqlite3.connect("./Responder.db")
return cursor
def GetResponderData(cursor):
res = cursor.execute("SELECT * FROM Responder")
for row in res.fetchall():
print('{0} : {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}'.format(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]))
def GetResponderUsernamesStatistic(cursor):
res = cursor.execute("SELECT COUNT(DISTINCT UPPER(user)) FROM Responder")
for row in res.fetchall():
print color('[+] In total {0} unique user accounts were captured.'.format(row[0]), code = 2, modifier = 1)
def GetResponderUsernames(cursor):
res = cursor.execute("SELECT DISTINCT user FROM Responder")
for row in res.fetchall():
print('User account: {0}'.format(row[0]))
def GetResponderUsernamesWithDetails(cursor):
res = cursor.execute("SELECT client, user, module, type, cleartext FROM Responder WHERE UPPER(user) in (SELECT DISTINCT UPPER(user) FROM Responder) ORDER BY client")
for row in res.fetchall():
print('IP: {0} module: {1}:{3}\nuser account: {2}'.format(row[0], row[2], row[1], row[3]))
def GetResponderCompleteHash(cursor):
res = cursor.execute("SELECT fullhash FROM Responder WHERE UPPER(user) in (SELECT DISTINCT UPPER(user) FROM Responder)")
for row in res.fetchall():
print('{0}'.format(row[0]))
def GetUniqueLookups(cursor):
res = cursor.execute("SELECT * FROM Poisoned WHERE ForName in (SELECT DISTINCT UPPER(ForName) FROM Poisoned) ORDER BY SentToIp, Poisoner")
for row in res.fetchall():
print('IP: {0}, Protocol: {1}, Looking for name: {2}'.format(row[2], row[1], row[3]))
def GetStatisticUniqueLookups(cursor):
res = cursor.execute("SELECT COUNT(*) FROM Poisoned WHERE ForName in (SELECT DISTINCT UPPER(ForName) FROM Poisoned)")
for row in res.fetchall():
print color('[+] In total {0} unique queries were poisoned.'.format(row[0]), code = 2, modifier = 1)
def SavePoisonersToDb(result):
for k in [ 'Poisoner', 'SentToIp', 'ForName', 'AnalyzeMode']:
if not k in result:
result[k] = ''
def SaveToDb(result):
for k in [ 'module', 'type', 'client', 'hostname', 'user', 'cleartext', 'hash', 'fullhash' ]:
if not k in result:
result[k] = ''
cursor = DbConnect()
print color("[+] Generating report...", code = 3, modifier = 1)
print color("[+] Unique lookups ordered by IP:", code = 2, modifier = 1)
GetUniqueLookups(cursor)
GetStatisticUniqueLookups(cursor)
print color("\n[+] Extracting captured usernames:", code = 2, modifier = 1)
GetResponderUsernames(cursor)
print color("\n[+] Username details:", code = 2, modifier = 1)
GetResponderUsernamesWithDetails(cursor)
GetResponderUsernamesStatistic(cursor)
#print color("\n[+] Captured hashes:", code = 2, modifier = 1)
#GetResponderCompleteHash(cursor)
| mit | -430,646,472,000,770,940 | 40.894737 | 170 | 0.691457 | false |
thriuin/ckan_client_demo | create_open_data.py | 1 | 1540 | __author__ = 'Statistics Canada'
__copyright__ = 'Crown Copyright, Canada 2014'
import urllib2
import simplejson as json
# Add a new data set. For this example, we will use the NAICS 2012 dataset from Statistics Canada
# Ensure the data set does not already exist. Exit if it does
query_data = urllib2.quote(json.dumps({'id': '9b25e61a-89c3-4719-afd8-fc61c7aeba0c'}))
found = False
try:
# Use a valid URL
response = urllib2.urlopen('http://data.gc.ca/test/api/3/action/package_show', query_data)
if response.code == 200:
print "Data set already exists."
exit()
except urllib2.HTTPError, hx:
# If the data set is not found, a 404 exception is thrown
if hx.code == 404:
"Data set not found. Proceeding..."
else:
print "Unexpected error: " + hx.__str__()
exit()
# Load the JSON and call the CKAN API function package_create()
try:
new_ds = json.load(open("new_data_set.json"))
except json.JSONDecodeError, jx:
print('Invalid JSON: ' + jx.__str__())
# Encode the JSON for the HTTP header
new_ds_string = urllib2.quote(json.dumps(new_ds))
# Call the CKAN API function package_create(). Use a valid URL
request = urllib2.Request('http://data.gc.ca/test/api/action/package_create')
# Replace xxxx... with an appropriate API Key
request.add_header('Authorization', 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx')
try:
response = urllib2.urlopen(request, new_ds_string)
print "CKAN Return Code: " + response.code
except urllib2.HTTPError, hx:
print hx.__str__()
| mit | -5,844,206,824,841,157,000 | 31.083333 | 97 | 0.687013 | false |
android-art-intel/marshmallow | art-extension/tools/perf_analysis/find_hottest.py | 1 | 3556 | #!/usr/bin/python
import os, sys, csv, operator, heapq
def GetMethodName(filename):
with open(filename, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';', quotechar='"')
for row in spamreader:
for cell in row:
return cell
def AppendData(filename, data):
data[filename] = {}
with open(filename, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';', quotechar='"')
for i,row in enumerate(spamreader):
data[filename][i] = {}
for j,cell in enumerate(row):
data[filename][i][j] = cell
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def ComputeData(data, max_data, sum_data):
for filename in data:
for i in data[filename]:
if len(max_data) < len(data[filename]):
max_data.append([])
if len(sum_data) < len(data[filename]):
sum_data.append([])
for j in data[filename][i]:
# append new 0s if any
if len(max_data[i]) < len(data[filename][i]):
max_data[i].append(0)
if len(sum_data[i]) < len(data[filename][i]):
sum_data[i].append(0)
# if cell is a number, then we can update our numbers.
if is_number(data[filename][i][j]):
if len(max_data[i]) < len(data[filename][i]):
max_data[i].append(0)
if len(sum_data[i]) < len(data[filename][i]):
sum_data[i].append(0)
f_data = float(data[filename][i][j])
f_max = float(max_data[i][j])
# compute max data
if f_max < f_data:
max_data[i][j] = f_data
# compute sum data
sum_data[i][j] += f_data
else:
max_data[i][j] = data[filename][i][j]
sum_data[i][j] = data[filename][i][j]
def ComputeSumPerMethod(data, sum_per_method, metric_id):
for filename in data:
sum_per_method[filename] = 0
for i in data[filename]:
if is_number(data[filename][i][metric_id]):
sum_per_method[filename] += float(data[filename][i][metric_id])
def DumpData(output_name, data, max_data, sum_data):
if len(data) == 0:
return
with open(output_name, 'wb') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(['Max'])
for row in max_data:
wr.writerow(row)
wr.writerow(['Sum'])
for row in sum_data:
wr.writerow(row)
def DumpHottestMethods(output_name, sorted_methods, sum_per_method):
if len(data) == 0:
return
with open(output_name, 'wb') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
for filename in sorted_methods:
wr.writerow([GetMethodName(filename), sum_per_method[filename]])
if len(sys.argv) != 2:
print "Usage: ./find_hottest.py [folder-name]"
sys.exit(0)
data = {}
folder_name = sys.argv[1]
found_file = False
print "Collecting data..."
for filename in os.listdir(folder_name):
if filename.endswith(".csv"):
filename = folder_name + "/" + filename
AppendData(filename, data)
found_file = True
if found_file == False:
print "There is no CSV file in folder " + folder_name
else:
sum_per_method = {}
print "Computing sum per method..."
ComputeSumPerMethod(data, sum_per_method, 1)
print "Sorting data..."
# sorted_methods = heapq.nlargest(10, list(sum_per_method))
sorted_methods = list(sorted(sum_per_method, key=sum_per_method.__getitem__, reverse=True))
print "Dumping data..."
DumpHottestMethods("hottest_methods.csv", sorted_methods, sum_per_method)
sys.stdout.write("\nDone.\n")
| apache-2.0 | -1,728,301,882,559,878,000 | 28.38843 | 93 | 0.61108 | false |
gazhay/kodikast | lukecast.py | 1 | 12945 | #!/usr/bin/env python3
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GLib, GdkPixbuf
try:
gi.require_version('AppIndicator3', '0.1')
from gi.repository import AppIndicator3 as AppIndicator
except:
from gi.repository import AppIndicator
import re,subprocess,socket
import urllib.parse,time,os,signal,sys
import base64
from random import randint
from zeroconf import ServiceBrowser, Zeroconf
from gi.repository import GObject
tempsock = "/tmp/lukecast"
# TODO
# Playlist - if we can vlc:quit after a file, we can do multiple files
# Broadcast - stream to all clients found
# Authorisation at Kodi End - uname pwd
#
VERSION = "0.5a"
ICONDIR = "./kodikasticons"
DEVMODE = True
def shellquote(s):
return "'" + s.replace("'", "'\\''") + "'"
def alert(msg):
parent = None
md = Gtk.MessageDialog(parent, 0, Gtk.MessageType.INFO, Gtk.ButtonsType.CLOSE, msg)
md.run()
md.destroy()
Hosts = []
MandatoryFudgePeriod = 3;
# Check for VLC
isVLC = subprocess.run(["which vlc"], stdout=subprocess.PIPE, shell=True)
# print(isVL/C.stdout)
if (isVLC.stdout==b''):
alert("VLC is not installed, cannot continue")
quit()
# Check for webcam
videoDevs = subprocess.run(["ls /dev/video* | wc -l"], stdout=subprocess.PIPE, shell=True)
if (videoDevs.stdout!=b''):
# print("Number of devices {%d}" % int(videoDevs.stdout));
videoOn=True
else:
videoOn=False
def get_resource_path(rel_path):
dir_of_py_file = os.path.dirname(__file__)
rel_path_to_resource = os.path.join(dir_of_py_file, rel_path)
abs_path_to_resource = os.path.abspath(rel_path_to_resource)
return abs_path_to_resource
# ############################################################################## Indicator
class IndicatorKodicast:
SubMenuRef = ""
SubMenuGroup = ""
KodiTarget = ""
VLCPid = ""
mode = 0
# lastConnect = None
# statusIcons = [ "KodiKast-Red", "KodiKast-Grn", "KodiKast-Ylw", "KodiKast-Ppl" ]
statusIcons = [ "LukeInit", "LukeGrey", "LukeGreen", "LukeBlue" ]
def addSeperator(self, menu):
item = Gtk.SeparatorMenuItem()
item.show()
menu.append(item)
def addMenuItem(self, menu, label, handler):
item = Gtk.MenuItem()
item.set_label(label)
item.connect("activate", handler)
item.show()
menu.append(item)
def addRadioMenu(self, menu, label):
item = Gtk.CheckMenuItem(label=label)
item.set_active(is_active=False)
# item.connect("activate", self.toggleMe)
item.show()
menu.append(item)
def addSubMenu(self, menu, label):
pass
def aboutDialog(self, evt):
dlg = Gtk.AboutDialog();
dlg.set_name("About...")
dlg.set_program_name("Luke Cast")
dlg.set_version(VERSION)
dlg.set_comments("""
A GTK Indicator to stream media to Avahi discovered Kodi instances.
Media, Screen, Webcam, to any Kodi with jsonrpc enabled.
""")
dlg.set_authors(['Gareth Hay'])
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(get_resource_path(ICONDIR)+"/"+self.statusIcons[ randint(0, len(self.statusIcons)-1) ]+".png" , 100, 100)
dlg.set_logo(pixbuf)
# dlg.set_logo_icon_name("kodi")
dlg.show()
def reboot(self, evt):
self.handler_cast_stop()
Gtk.main_quit()
os.execv(__file__, sys.argv)
def __init__(self):
self.ind = AppIndicator.Indicator.new("indicator-lukecast", self.statusIcons[0], AppIndicator.IndicatorCategory.SYSTEM_SERVICES)
self.ind.set_icon_theme_path( get_resource_path(ICONDIR))
self.ind.set_icon( self.statusIcons[0] )
self.ind.set_status (AppIndicator.IndicatorStatus.ACTIVE)
self.mode = 0
# have to give indicator a menu
self.menu = Gtk.Menu()
self.addMenuItem( self.menu, "About...", self.aboutDialog)
if DEVMODE:
self.addMenuItem( self.menu, "Restart", self.reboot)
self.addMenuItem(self.menu, "Reconnect Receiver", self.handler_reconnect )
self.addSeperator(self.menu)
item = Gtk.MenuItem()
item.set_label("Available Receivers")
submenu = Gtk.Menu()
subitem = Gtk.RadioMenuItem(group=None, label="Nowhere")
subitem.set_active(is_active=True)
subitem.connect("activate", self.handlesubChecks)
subitem.show()
submenu.append(subitem)
submenu.show()
item.set_submenu( submenu )
self.SubMenuGroup = subitem
self.SubMenuRef = submenu
item.show()
self.menu.append(item)
self.addSeperator( self.menu )
self.addMenuItem(self.menu, "Start Screen Cast" , self.handler_cast_start)
self.addMenuItem(self.menu, "Start File Cast...", self.handler_cast_file )
if videoOn:
self.addMenuItem(self.menu, "Start Webcam Stream0" , self.handler_cast_cam )
self.addRadioMenu(self.menu, " With Sound")
self.addMenuItem(self.menu, "Stop Cast" , self.handler_cast_stop )
self.addSeperator( self.menu )
self.addMenuItem(self.menu, "Exit" , self.handler_menu_exit )
self.menu.show()
self.ind.set_menu(self.menu)
GLib.timeout_add_seconds(1, self.handler_timeout)
def handler_reconnect(self,evt=None, hosts=None):
if hosts==None:
hosts = self.KodiTarget
if socket.gethostname().find('.')>=0:
thisisme=socket.gethostname()
else:
thisisme=socket.gethostbyaddr(socket.gethostname())[0]
jsonpart = {'request' : '{"jsonrpc":"2.0", "id":1, "method": "Player.Open","params":{"item":{"file":"http://%s:8554/stream.mp4"}}}' % thisisme }
jsonstr = urllib.parse.urlencode(jsonpart) # added parse. as its moved in python3
# This will have to be for multiple hosts
streamUrl = 'http://%s:8080/jsonrpc?' % (hosts)
streamUrl+= jsonstr
credentials = b'kodi:test'
encoded_credentials = base64.b64encode(credentials)
authorization = b'Basic ' + encoded_credentials
command = "/usr/bin/curl -g -H 'Content-Type: application/json' -H 'Authorization: %s' -H 'Accept: application/json' '%s'" % (authorization.decode("utf-8") , streamUrl)
print("Executing %s" % command)
curlProc = subprocess.run(command, stdout=subprocess.PIPE, shell=True)
print(curlProc.stdout)
connect_hosts=handler_reconnect
def handlesubChecks(self, evt):
if evt.get_active()==True:
self.KodiTarget = evt.get_label()
self.mode = 1
if self.KodiTarget == "Nowhere":
self.mode = 0
def handler_menu_exit(self, evt):
Gtk.main_quit()
def handler_cast_file(self, evt):
dialog = Gtk.FileChooserDialog("Please choose a file", None,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
filter = Gtk.FileFilter()
filter.set_name("Videos")
filter.add_mime_type("video/mpeg")
filter.add_pattern("*.mp4")
filter.add_pattern("*.ogg")
filter.add_pattern("*.mkv")
filter.add_pattern("*.mpeg")
filter.add_pattern("*.avi")
dialog.add_filter(filter)
response = dialog.run()
ff = self.fudgeUri(dialog.get_filename())
dialog.destroy()
time.sleep(0.1)
if response == Gtk.ResponseType.OK:
self.streamUrlTo( ff, self.KodiTarget )
return
elif response == Gtk.ResponseType.CANCEL:
print("Cancel clicked")
def fudgeUri(self, inuri):
return "file://"+(inuri.replace("\n","").replace(" ","\ ")+" vlc://quit")
# /* Handle a dropped file on a desktop file with code below */
def handler_drop_cast_start(self):
content = open(tempsock, 'r').read()
if (len(content)>0):
# trim this and cvlc stream it.
# refactor stream launch code to function(url, hosts)
open(tempsock,"w").close()
content=self.fudgeUri(content)
# print(content)
if not self.targetCheck():
alert("No target selected")
return
self.streamUrlTo( content, self.KodiTarget )
self.lastConnect = None
time.sleep(0.1) # stops a cpu 100% problem
return True
def targetCheck(self):
if (self.KodiTarget == "") or (self.KodiTarget=="Nowhere"):
return False
return True
def streamUrlTo(self, uri, hostlist):
self.mode = 2 # :input-slave=alsa://hw:0,0
sout = "#transcode{vcodec=h264,acodec=mpga,ab=128,channels=2,samplerate=44100}:standard{access=http,mux=ts,ttl=15,dst=:8554/stream.mp4"
# sout = "#transcode{vcodec=h264,scale=1,vb=0}:standard{access=http,mux=ts,ttl=15,dst=:8554/}"
command = 'vlc -Idummy '+uri+' --sout "%s"' % sout
# print("## Command to exec")
# print(command)
# print("##")
self.VLCPid = subprocess.Popen(command, shell=True, preexec_fn=os.setsid)
self.handler_reconnect(hosts=hostlist)
def handler_cast_start(self, evt=None):
if not self.targetCheck():
alert("No target selected")
return
self.streamUrlTo("screen:// :screen-fps=10 :screen-caching=10 vlc://quit", self.KodiTarget)
def handler_cast_cam(self, evt):
if not self.targetCheck():
alert("No target selected")
return
# With audio here
self.streamUrlTo("v4l2:///dev/video0 vlc://quit", self.KodiTarget)
def handler_cast_stop(self, evt=None):
self.stopCasting()
def handler_timeout(self):
"""This will be called every few seconds by the GLib.timeout.
"""
if self.KodiTarget=="Nowhere":
self.KodiTarget=""
self.mode = 0
if self.KodiTarget=="" and self.VLCPid != "":
self.killVLC()
if self.VLCPid != "":
try:
if self.VLCPid.poll()==None:
pass
else:
self.mode = 1
except OSError:
self.mode = 1
if (self.ind.get_icon() != self.statusIcons[self.mode]):
self.ind.set_icon(self.statusIcons[self.mode])
return True
def killVLC( self ):
try:
os.killpg(os.getpgid(self.VLCPid.pid), signal.SIGTERM)
except:
command = 'killall vlc'
process = subprocess.run(command, shell=True)
def stopCasting( self ):
self.mode = 1
self.killVLC()
def quitApp( self ):
self.stopCasting()
def main(self):
# attempt multiprocess shenanigans
GObject.idle_add(self.handler_drop_cast_start)
Gtk.main()
# ############################################################################## Avahi
class AvahiListener(object):
# Having problems removing - could be pyhton2->3 conversioj rpbos
target = ""
DEBUGME = False;
def remove_service(self, zeroconf, type, name):
for host in Hosts:
if host.get("name")== name:
info = host
for itemA in self.target.SubMenuRef.get_children():
if itemA.get_label()==info['info'].server:
if itemA.get_active():
self.target.KodiTarget = ""
self.target.mode=0
self.target.SubMenuRef.remove(itemA) #itemA.remove()
if self.DEBUGME: print("Service %s removed" % (info['info'].server,))
Hosts.remove(info)
def add_service(self, zeroconf, type, name):
info = zeroconf.get_service_info(type, name)
# subitem = Gtk.CheckMenuItem()
subitem = Gtk.RadioMenuItem(group=self.target.SubMenuGroup, label=info.server)
subitem.connect("activate", self.target.handlesubChecks)
subitem.set_label(info.server)
subitem.show()
self.target.SubMenuRef.append(subitem)
self.target.SubMenuRef.show()
Hosts.append({"name": name, "info": info})
if self.DEBUGME: print("Service %s removed" % (info['info'].server,))
def setTarget(self, targetobj):
self.target = targetobj
# ############################################################################## Main
if __name__ == "__main__":
try:
zeroconf = Zeroconf()
listener = AvahiListener()
ind = IndicatorKodicast()
listener.setTarget(ind);
browser = ServiceBrowser(zeroconf, "_xbmc-jsonrpc._tcp.local.", listener)
try:
open(tempsock,"w").close();
except:
print( "socket file not available")
pass
ind.main()
finally:
ind.handler_cast_stop()
| gpl-3.0 | -4,870,596,901,379,037,000 | 34.368852 | 178 | 0.588335 | false |
angryrancor/kivy | kivy/uix/behaviors.py | 1 | 56965 | '''
Behaviors
=========
.. versionadded:: 1.8.0
This module implements behaviors that can be mixed with existing base widgets.
For example, if you want to add a "button" capability to an
:class:`~kivy.uix.image.Image`, you could do::
class IconButton(ButtonBehavior, Image):
pass
This would give you an :class:`~kivy.uix.image.Image` with the events and
properties inherited from :class:`ButtonBehavior`. For example, the *on_press*
and *on_release* events would be fired when appropriate::
class IconButton(ButtonBehavior, Image):
def on_press(self):
print("on_press")
Or in kv::
IconButton:
on_press: print('on_press')
Naturally, you could also bind to any property changes the behavior class
offers::
def state_changed(*args):
print('state changed')
button = IconButton()
button.bind(state=state_changed)
.. note::
The behavior class must always be _before_ the widget class. If you don't
specify the inheritance in this order, the behavior will not work because
the behavior methods are overwritten by the class method listed first.
Similarly, if you combine a behavior class with a class which
requires the use of the methods also defined by the behavior class, the
resulting class may not function properly. E.g. combining a ButtonBehavior
with a Slider, both of which require the on_touch_up methods, the resulting
class will not work.
'''
__all__ = ('ButtonBehavior', 'ToggleButtonBehavior', 'DragBehavior',
'FocusBehavior', 'CompoundSelectionBehavior')
from kivy.clock import Clock
from kivy.properties import OptionProperty, ObjectProperty, NumericProperty,\
ReferenceListProperty, BooleanProperty, ListProperty, AliasProperty
from kivy.config import Config
from kivy.metrics import sp
from kivy.base import EventLoop
from kivy.logger import Logger
from functools import partial
from weakref import ref
from time import time
import string
# When we are generating documentation, Config doesn't exist
_scroll_timeout = _scroll_distance = 0
_is_desktop = False
_keyboard_mode = 'system'
if Config:
_scroll_timeout = Config.getint('widgets', 'scroll_timeout')
_scroll_distance = Config.getint('widgets', 'scroll_distance')
_is_desktop = Config.getboolean('kivy', 'desktop')
_keyboard_mode = Config.get('kivy', 'keyboard_mode')
class ButtonBehavior(object):
'''Button behavior.
:Events:
`on_press`
Fired when the button is pressed.
`on_release`
Fired when the button is released (i.e. the touch/click that
pressed the button goes away).
'''
state = OptionProperty('normal', options=('normal', 'down'))
'''State of the button, must be one of 'normal' or 'down'.
The state is 'down' only when the button is currently touched/clicked,
otherwise 'normal'.
:attr:`state` is an :class:`~kivy.properties.OptionProperty`.
'''
last_touch = ObjectProperty(None)
'''Contains the last relevant touch received by the Button. This can
be used in `on_press` or `on_release` in order to know which touch
dispatched the event.
.. versionadded:: 1.8.0
:attr:`last_touch` is a :class:`~kivy.properties.ObjectProperty`,
defaults to None.
'''
MIN_STATE_TIME = 0.035
'''The minimum period of time which the widget must remain in the
`'down'` state.
:attr:`MIN_STATE_TIME` is a float.
'''
always_release = BooleanProperty(True)
'''This determines if the widget fires a `on_release` event if
the touch_up is outside the widget.
.. versionadded:: 1.9.0
:attr:`always_release` is a :class:`~kivy.properties.BooleanProperty`,
defaults to `True`.
'''
def __init__(self, **kwargs):
self.register_event_type('on_press')
self.register_event_type('on_release')
super(ButtonBehavior, self).__init__(**kwargs)
self.__state_event = None
self.__touch_time = None
self.fbind('state', self.cancel_event)
def _do_press(self):
self.state = 'down'
def _do_release(self, *args):
self.state = 'normal'
def cancel_event(self, *args):
if self.__state_event:
self.__state_event.cancel()
self.__state_event = None
def on_touch_down(self, touch):
if super(ButtonBehavior, self).on_touch_down(touch):
return True
if touch.is_mouse_scrolling:
return False
if not self.collide_point(touch.x, touch.y):
return False
if self in touch.ud:
return False
touch.grab(self)
touch.ud[self] = True
self.last_touch = touch
self.__touch_time = time()
self._do_press()
self.dispatch('on_press')
return True
def on_touch_move(self, touch):
if touch.grab_current is self:
return True
if super(ButtonBehavior, self).on_touch_move(touch):
return True
return self in touch.ud
def on_touch_up(self, touch):
if touch.grab_current is not self:
return super(ButtonBehavior, self).on_touch_up(touch)
assert(self in touch.ud)
touch.ungrab(self)
self.last_touch = touch
if (not self.always_release
and not self.collide_point(*touch.pos)):
self.state = 'normal'
return
touchtime = time() - self.__touch_time
if touchtime < self.MIN_STATE_TIME:
self.__state_event = Clock.schedule_once(
self._do_release, self.MIN_STATE_TIME - touchtime)
else:
self._do_release()
self.dispatch('on_release')
return True
def on_press(self):
pass
def on_release(self):
pass
def trigger_action(self, duration=0.1):
'''Trigger whatever action(s) have been bound to the button by calling
both the on_press and on_release callbacks.
This simulates a quick button press without using any touch events.
Duration is the length of the press in seconds. Pass 0 if you want
the action to happen instantly.
.. versionadded:: 1.8.0
'''
self._do_press()
self.dispatch('on_press')
def trigger_release(dt):
self._do_release()
self.dispatch('on_release')
if not duration:
trigger_release(0)
else:
Clock.schedule_once(trigger_release, duration)
class ToggleButtonBehavior(ButtonBehavior):
'''ToggleButton behavior, see ToggleButton module documentation for more
information.
.. versionadded:: 1.8.0
'''
__groups = {}
group = ObjectProperty(None, allownone=True)
'''Group of the button. If None, no group will be used (button is
independent). If specified, :attr:`group` must be a hashable object, like
a string. Only one button in a group can be in 'down' state.
:attr:`group` is a :class:`~kivy.properties.ObjectProperty`
'''
allow_no_selection = BooleanProperty(True)
'''This specifies whether the checkbox in group allows everything to
be deselected.
.. versionadded:: 1.9.0
:attr:`allow_no_selection` is a :class:`BooleanProperty` defaults to
`True`
'''
def __init__(self, **kwargs):
self._previous_group = None
super(ToggleButtonBehavior, self).__init__(**kwargs)
def on_group(self, *largs):
groups = ToggleButtonBehavior.__groups
if self._previous_group:
group = groups[self._previous_group]
for item in group[:]:
if item() is self:
group.remove(item)
break
group = self._previous_group = self.group
if group not in groups:
groups[group] = []
r = ref(self, ToggleButtonBehavior._clear_groups)
groups[group].append(r)
def _release_group(self, current):
if self.group is None:
return
group = self.__groups[self.group]
for item in group[:]:
widget = item()
if widget is None:
group.remove(item)
if widget is current:
continue
widget.state = 'normal'
def _do_press(self):
if (not self.allow_no_selection and
self.group and self.state == 'down'):
return
self._release_group(self)
self.state = 'normal' if self.state == 'down' else 'down'
def _do_release(self, *args):
pass
@staticmethod
def _clear_groups(wk):
# auto flush the element when the weak reference have been deleted
groups = ToggleButtonBehavior.__groups
for group in list(groups.values()):
if wk in group:
group.remove(wk)
break
@staticmethod
def get_widgets(groupname):
'''Return the widgets contained in a specific group. If the group
doesn't exist, an empty list will be returned.
.. important::
Always release the result of this method! In doubt, do::
l = ToggleButtonBehavior.get_widgets('mygroup')
# do your job
del l
.. warning::
It's possible that some widgets that you have previously
deleted are still in the list. Garbage collector might need
more elements before flushing it. The return of this method
is informative, you've been warned!
'''
groups = ToggleButtonBehavior.__groups
if groupname not in groups:
return []
return [x() for x in groups[groupname] if x()][:]
class DragBehavior(object):
'''Drag behavior. When combined with a widget, dragging in the rectangle
defined by :attr:`drag_rectangle` will drag the widget.
For example, to make a popup which is draggable by its title do::
from kivy.uix.behaviors import DragBehavior
from kivy.uix.popup import Popup
class DragPopup(DragBehavior, Popup):
pass
And in .kv do::
<DragPopup>:
drag_rectangle: self.x, self.y+self._container.height, self.width,\
self.height - self._container.height
drag_timeout: 10000000
drag_distance: 0
.. versionadded:: 1.8.0
'''
drag_distance = NumericProperty(_scroll_distance)
'''Distance to move before dragging the :class:`DragBehavior`, in pixels.
As soon as the distance has been traveled, the :class:`DragBehavior` will
start to drag, and no touch event will go to children.
It is advisable that you base this value on the dpi of your target device's
screen.
:attr:`drag_distance` is a :class:`~kivy.properties.NumericProperty`,
defaults to 20 (pixels), according to the default value of scroll_distance
in user configuration.
'''
drag_timeout = NumericProperty(_scroll_timeout)
'''Timeout allowed to trigger the :attr:`drag_distance`, in milliseconds.
If the user has not moved :attr:`drag_distance` within the timeout,
dragging will be disabled, and the touch event will go to the children.
:attr:`drag_timeout` is a :class:`~kivy.properties.NumericProperty`,
defaults to 55 (milliseconds), according to the default value of
scroll_timeout in user configuration.
'''
drag_rect_x = NumericProperty(0)
'''X position of the axis aligned bounding rectangle where dragging
is allowed. In window coordinates.
:attr:`drag_rect_x` is a :class:`~kivy.properties.NumericProperty`,
defaults to 0.
'''
drag_rect_y = NumericProperty(0)
'''Y position of the axis aligned bounding rectangle where dragging
is allowed. In window coordinates.
:attr:`drag_rect_Y` is a :class:`~kivy.properties.NumericProperty`,
defaults to 0.
'''
drag_rect_width = NumericProperty(100)
'''Width of the axis aligned bounding rectangle where dragging is allowed.
:attr:`drag_rect_width` is a :class:`~kivy.properties.NumericProperty`,
defaults to 100.
'''
drag_rect_height = NumericProperty(100)
'''Height of the axis aligned bounding rectangle where dragging is allowed.
:attr:`drag_rect_height` is a :class:`~kivy.properties.NumericProperty`,
defaults to 100.
'''
drag_rectangle = ReferenceListProperty(drag_rect_x, drag_rect_y,
drag_rect_width, drag_rect_height)
'''Position and size of the axis aligned bounding rectangle where dragging
is allowed.
:attr:`drag_rectangle` is a :class:`~kivy.properties.ReferenceListProperty`
of (:attr:`drag_rect_x`, :attr:`drag_rect_y`, :attr:`drag_rect_width`,
:attr:`drag_rect_height`) properties.
'''
def __init__(self, **kwargs):
self._drag_touch = None
super(DragBehavior, self).__init__(**kwargs)
def _get_uid(self, prefix='sv'):
return '{0}.{1}'.format(prefix, self.uid)
def on_touch_down(self, touch):
xx, yy, w, h = self.drag_rectangle
x, y = touch.pos
if not self.collide_point(x, y):
touch.ud[self._get_uid('svavoid')] = True
return super(DragBehavior, self).on_touch_down(touch)
if self._drag_touch or ('button' in touch.profile and
touch.button.startswith('scroll')) or\
not ((xx < x <= xx + w) and (yy < y <= yy + h)):
return super(DragBehavior, self).on_touch_down(touch)
# no mouse scrolling, so the user is going to drag with this touch.
self._drag_touch = touch
uid = self._get_uid()
touch.grab(self)
touch.ud[uid] = {
'mode': 'unknown',
'dx': 0,
'dy': 0}
Clock.schedule_once(self._change_touch_mode,
self.drag_timeout / 1000.)
return True
def on_touch_move(self, touch):
if self._get_uid('svavoid') in touch.ud or\
self._drag_touch is not touch:
return super(DragBehavior, self).on_touch_move(touch) or\
self._get_uid() in touch.ud
if touch.grab_current is not self:
return True
uid = self._get_uid()
ud = touch.ud[uid]
mode = ud['mode']
if mode == 'unknown':
ud['dx'] += abs(touch.dx)
ud['dy'] += abs(touch.dy)
if ud['dx'] > sp(self.drag_distance):
mode = 'drag'
if ud['dy'] > sp(self.drag_distance):
mode = 'drag'
ud['mode'] = mode
if mode == 'drag':
self.x += touch.dx
self.y += touch.dy
return True
def on_touch_up(self, touch):
if self._get_uid('svavoid') in touch.ud:
return super(DragBehavior, self).on_touch_up(touch)
if self._drag_touch and self in [x() for x in touch.grab_list]:
touch.ungrab(self)
self._drag_touch = None
ud = touch.ud[self._get_uid()]
if ud['mode'] == 'unknown':
super(DragBehavior, self).on_touch_down(touch)
Clock.schedule_once(partial(self._do_touch_up, touch), .1)
else:
if self._drag_touch is not touch:
super(DragBehavior, self).on_touch_up(touch)
return self._get_uid() in touch.ud
def _do_touch_up(self, touch, *largs):
super(DragBehavior, self).on_touch_up(touch)
# don't forget about grab event!
for x in touch.grab_list[:]:
touch.grab_list.remove(x)
x = x()
if not x:
continue
touch.grab_current = x
super(DragBehavior, self).on_touch_up(touch)
touch.grab_current = None
def _change_touch_mode(self, *largs):
if not self._drag_touch:
return
uid = self._get_uid()
touch = self._drag_touch
ud = touch.ud[uid]
if ud['mode'] != 'unknown':
return
touch.ungrab(self)
self._drag_touch = None
super(DragBehavior, self).on_touch_down(touch)
return
class FocusBehavior(object):
'''Implements keyboard focus behavior. When combined with other
FocusBehavior widgets it allows one to cycle focus among them by pressing
tab. In addition, upon gaining focus the instance will automatically
receive keyboard input.
Focus, very different then selection, is intimately tied with the keyboard;
each keyboard can focus on zero or one widgets, and each widget can only
have the focus of one keyboard. However, multiple keyboards can focus
simultaneously on different widgets. When escape is hit, the widget having
the focus of that keyboard will de-focus.
In essence, focus is implemented as a doubly linked list, where each
node holds a (weak) reference to the instance before it and after it,
as visualized when cycling through the nodes using tab (forward) or
shift+tab (backward). If previous or next widget is not specified,
:attr:`focus_next` and :attr:`focus_previous` defaults to `None`,
which means that the children list and parents are walked to find
the next focusable widget, unless :attr:`focus_next` or
:attr:`focus_previous` is set to the `StopIteration` class, in which case
focus stops there.
For example, to cycle focus between :class:`~kivy.uix.button.Button`
elements of a :class:`~kivy.uix.gridlayout.GridLayout`::
class FocusButton(FocusBehavior, Button):
pass
grid = GridLayout(cols=4)
for i in range(40):
grid.add_widget(FocusButton(text=str(i)))
# clicking on a widget will activate focus, and tab can now be used
# to cycle through
.. versionadded:: 1.9.0
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
'''
_requested_keyboard = False
_keyboard = ObjectProperty(None, allownone=True)
_keyboards = {}
ignored_touch = []
'''A list of touches that should not be used to defocus. After on_touch_up,
every touch that is not in :attr:`ignored_touch` will defocus all the
focused widgets, if, the config keyboard mode is not multi. Touches on
focusable widgets that were used to focus are automatically added here.
Example usage::
class Unfocusable(Widget):
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
FocusBehavior.ignored_touch.append(touch)
Notice that you need to access this as class, not instance variable.
'''
def _set_keyboard(self, value):
focus = self.focus
keyboard = self._keyboard
keyboards = FocusBehavior._keyboards
if keyboard:
self.focus = False # this'll unbind
if self._keyboard: # remove assigned keyboard from dict
del keyboards[keyboard]
if value and not value in keyboards:
keyboards[value] = None
self._keyboard = value
self.focus = focus
def _get_keyboard(self):
return self._keyboard
keyboard = AliasProperty(_get_keyboard, _set_keyboard,
bind=('_keyboard', ))
'''The keyboard to bind, or bound to the widget when focused.
When None, a keyboard is requested and released whenever the widget comes
into and out of focus. If not None, it must be a keyboard, which gets
bound and unbound from the widget whenever it's in or out of focus. It is
useful only when more than one keyboard is available, so it is recommended
to be set to None when only one keyboard is available
If more than one keyboard is available, whenever an instance get focused
a new keyboard will be requested if None. Unless, the other instances lose
focus (e.g. if tab was used), a new keyboard will appear. When this is
undesired, the keyboard property can be used. For example, if there are
two users with two keyboards, then each keyboard can be assigned to
different groups of instances of FocusBehavior, ensuring that within
each group, only one FocusBehavior will have focus, and will receive input
from the correct keyboard. see `keyboard_mode` in :mod:`~kivy.config` for
information on the keyboard modes.
:attr:`keyboard` is a :class:`~kivy.properties.AliasProperty`, defaults to
None.
.. note::
When Config's `keyboard_mode` is multi, each new touch is considered
a touch by a different user and will focus (if clicked on a
focusable) with a new keyboard. Already focused elements will not lose
their focus (even if clicked on a unfocusable).
.. note:
If the keyboard property is set, that keyboard will be used when the
instance gets focused. If widgets with different keyboards are linked
through :attr:`focus_next` and :attr:`focus_previous`, then as they are
tabbed through, different keyboards will become active. Therefore,
typically it's undesirable to link instances which are assigned
different keyboards.
.. note:
When an instance has focus, setting keyboard to None will remove the
current keyboard, but will then try to get a keyboard back. It is
better to set :attr:`focus` to False.
.. warning:
When assigning a keyboard, the keyboard must not be released while
it is still assigned to an instance. Similarly, the keyboard created
by the instance on focus and assigned to :attr:`keyboard` if None,
will be released by the instance when the instance loses focus.
Therefore, it is not safe to assign this keyboard to another instance's
:attr:`keyboard`.
'''
is_focusable = BooleanProperty(_is_desktop)
'''Whether the instance can become focused. If focused, it'll lose focus
when set to False.
:attr:`is_focusable` is a :class:`~kivy.properties.BooleanProperty`,
defaults to True on a desktop (i.e. desktop is True in
:mod:`~kivy.config`), False otherwise.
'''
focus = BooleanProperty(False)
'''Whether the instance currently has focus.
Setting it to True, will bind to and/or request the keyboard, and input
will be forwarded to the instance. Setting it to False, will unbind
and/or release the keyboard. For a given keyboard, only one widget can
have its focus, so focusing one will automatically unfocus the other
instance holding its focus.
:attr:`focus` is a :class:`~kivy.properties.BooleanProperty`, defaults to
False.
'''
focused = focus
'''An alias of :attr:`focus`.
:attr:`focused` is a :class:`~kivy.properties.BooleanProperty`, defaults to
False.
.. warning::
:attr:`focused` is an alias of :attr:`focus` and will be removed in
2.0.0.
'''
def _set_on_focus_next(self, instance, value):
''' If changing code, ensure following code is not infinite loop:
widget.focus_next = widget
widget.focus_previous = widget
widget.focus_previous = widget2
'''
next = self._old_focus_next
if next is value: # prevent infinite loop
return
if isinstance(next, FocusBehavior):
next.focus_previous = None
self._old_focus_next = value
if value is None or value is StopIteration:
return
if not isinstance(value, FocusBehavior):
raise ValueError('focus_next accepts only objects based'
' on FocusBehavior, or the StopIteration class.')
value.focus_previous = self
focus_next = ObjectProperty(None, allownone=True)
'''The :class:`FocusBehavior` instance to acquire focus when
tab is pressed when this instance has focus, if not `None` or
`'StopIteration'`.
When tab is pressed, focus cycles through all the :class:`FocusBehavior`
widgets that are linked through :attr:`focus_next` and are focusable. If
:attr:`focus_next` is `None`, it instead walks the children lists to find
the next focusable widget. Finally, if :attr:`focus_next` is
the `StopIteration` class, focus won't move forward, but end here.
.. note:
Setting :attr:`focus_next` automatically sets :attr:`focus_previous`
of the other instance to point to this instance, if not None or
`StopIteration`. Similarly, if it wasn't None or `StopIteration`, it
also sets the :attr:`focus_previous` property of the instance
previously in :attr:`focus_next` to `None`. Therefore, it is only
required to set one side of the :attr:`focus_previous`,
:attr:`focus_next`, links since the other side will be set
automatically.
:attr:`focus_next` is a :class:`~kivy.properties.ObjectProperty`, defaults
to `None`.
'''
def _set_on_focus_previous(self, instance, value):
prev = self._old_focus_previous
if prev is value:
return
if isinstance(prev, FocusBehavior):
prev.focus_next = None
self._old_focus_previous = value
if value is None or value is StopIteration:
return
if not isinstance(value, FocusBehavior):
raise ValueError('focus_previous accepts only objects based'
' on FocusBehavior, or the StopIteration class.')
value.focus_next = self
focus_previous = ObjectProperty(None, allownone=True)
'''The :class:`FocusBehavior` instance to acquire focus when
shift+tab is pressed on this instance, if not None or `StopIteration`.
When shift+tab is pressed, focus cycles through all the
:class:`FocusBehavior` widgets that are linked through
:attr:`focus_previous` and are focusable. If :attr:`focus_previous` is
`None', it instead walks the children tree to find the
previous focusable widget. Finally, if :attr:`focus_previous` is the
`StopIteration` class, focus won't move backward, but end here.
.. note:
Setting :attr:`focus_previous` automatically sets :attr:`focus_next`
of the other instance to point to this instance, if not None or
`StopIteration`. Similarly, if it wasn't None or `StopIteration`, it
also sets the :attr:`focus_next` property of the instance previously in
:attr:`focus_previous` to `None`. Therefore, it is only required
to set one side of the :attr:`focus_previous`, :attr:`focus_next`,
links since the other side will be set automatically.
:attr:`focus_previous` is a :class:`~kivy.properties.ObjectProperty`,
defaults to `None`.
'''
keyboard_mode = OptionProperty('auto', options=('auto', 'managed'))
'''How the keyboard visibility should be managed (auto will have standard
behaviour to show/hide on focus, managed requires setting keyboard_visible
manually, or calling the helper functions ``show_keyboard()``
and ``hide_keyboard()``.
:attr:`keyboard_mode` is an :class:`~kivy.properties.OptionsProperty` and
defaults to 'auto'. Can be one of 'auto' or 'managed'.
'''
input_type = OptionProperty('text', options=('text', 'number', 'url',
'mail', 'datetime', 'tel',
'address'))
'''The kind of input keyboard to request.
.. versionadded:: 1.8.0
:attr:`input_type` is an :class:`~kivy.properties.OptionsProperty` and
defaults to 'text'. Can be one of 'text', 'number', 'url', 'mail',
'datetime', 'tel', 'address'.
'''
unfocus_on_touch = BooleanProperty(_keyboard_mode not in
('multi', 'systemandmulti'))
'''Whether a instance should lose focus when clicked outside the instance.
When a user clicks on a widget that is focus aware and shares the same
keyboard as the this widget (which in the case of only one keyboard, are
all focus aware widgets), then as the other widgets gains focus, this
widget loses focus. In addition to that, if this property is `True`,
clicking on any widget other than this widget, will remove focus form this
widget.
:attr:`unfocus_on_touch` is a :class:`~kivy.properties.BooleanProperty`,
defaults to `False` if the `keyboard_mode` in :attr:`~kivy.config.Config`
is `'multi'` or `'systemandmulti'`, otherwise it defaults to `True`.
'''
def __init__(self, **kwargs):
self._old_focus_next = None
self._old_focus_previous = None
super(FocusBehavior, self).__init__(**kwargs)
self._keyboard_mode = _keyboard_mode
fbind = self.fbind
fbind('focus', self._on_focus)
fbind('disabled', self._on_focusable)
fbind('is_focusable', self._on_focusable)
fbind('focus_next', self._set_on_focus_next)
fbind('focus_previous', self._set_on_focus_previous)
def _on_focusable(self, instance, value):
if self.disabled or not self.is_focusable:
self.focus = False
def _on_focus(self, instance, value, *largs):
if self.keyboard_mode == 'auto':
if value:
self._bind_keyboard()
else:
self._unbind_keyboard()
def _ensure_keyboard(self):
if self._keyboard is None:
self._requested_keyboard = True
keyboard = self._keyboard =\
EventLoop.window.request_keyboard(
self._keyboard_released, self, input_type=self.input_type)
keyboards = FocusBehavior._keyboards
if keyboard not in keyboards:
keyboards[keyboard] = None
def _bind_keyboard(self):
self._ensure_keyboard()
keyboard = self._keyboard
if not keyboard or self.disabled or not self.is_focusable:
self.focus = False
return
keyboards = FocusBehavior._keyboards
old_focus = keyboards[keyboard] # keyboard should be in dict
if old_focus:
old_focus.focus = False
# keyboard shouldn't have been released here, see keyboard warning
keyboards[keyboard] = self
keyboard.bind(on_key_down=self.keyboard_on_key_down,
on_key_up=self.keyboard_on_key_up,
on_textinput=self.keyboard_on_textinput)
def _unbind_keyboard(self):
keyboard = self._keyboard
if keyboard:
keyboard.unbind(on_key_down=self.keyboard_on_key_down,
on_key_up=self.keyboard_on_key_up,
on_textinput=self.keyboard_on_textinput)
if self._requested_keyboard:
keyboard.release()
self._keyboard = None
self._requested_keyboard = False
del FocusBehavior._keyboards[keyboard]
else:
FocusBehavior._keyboards[keyboard] = None
def keyboard_on_textinput(self, window, text):
pass
def _keyboard_released(self):
self.focus = False
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
if (not self.disabled and self.is_focusable and
('button' not in touch.profile or
not touch.button.startswith('scroll'))):
self.focus = True
FocusBehavior.ignored_touch.append(touch)
return super(FocusBehavior, self).on_touch_down(touch)
@staticmethod
def _handle_post_on_touch_up(touch):
''' Called by window after each touch has finished.
'''
touches = FocusBehavior.ignored_touch
if touch in touches:
touches.remove(touch)
return
for focusable in list(FocusBehavior._keyboards.values()):
if focusable is None or not focusable.unfocus_on_touch:
continue
focusable.focus = False
def _get_focus_next(self, focus_dir):
current = self
walk_tree = 'walk' if focus_dir is 'focus_next' else 'walk_reverse'
while 1:
# if we hit a focusable, walk through focus_xxx
while getattr(current, focus_dir) is not None:
current = getattr(current, focus_dir)
if current is self or current is StopIteration:
return None # make sure we don't loop forever
if current.is_focusable and not current.disabled:
return current
# hit unfocusable, walk widget tree
itr = getattr(current, walk_tree)(loopback=True)
if focus_dir is 'focus_next':
next(itr) # current is returned first when walking forward
for current in itr:
if isinstance(current, FocusBehavior):
break
# why did we stop
if isinstance(current, FocusBehavior):
if current is self:
return None
if current.is_focusable and not current.disabled:
return current
else:
return None
def keyboard_on_key_down(self, window, keycode, text, modifiers):
'''The method bound to the keyboard when the instance has focus.
When the instance becomes focused, this method is bound to the
keyboard and will be called for every input press. The parameters are
the same as :meth:`kivy.core.window.WindowBase.on_key_down`.
When overwriting the method in the derived widget, super should be
called to enable tab cycling. If the derived widget wishes to use tab
for its own purposes, it can call super at the end after it is done if
it didn't consume tab.
Similar to other keyboard functions, it should return True if the
key was consumed.
'''
if keycode[1] == 'tab': # deal with cycle
if ['shift'] == modifiers:
next = self._get_focus_next('focus_previous')
else:
next = self._get_focus_next('focus_next')
if next:
self.focus = False
next.focus = True
return True
return False
def keyboard_on_key_up(self, window, keycode):
'''The method bound to the keyboard when the instance has focus.
When the instance becomes focused, this method is bound to the
keyboard and will be called for every input release. The parameters are
the same as :meth:`kivy.core.window.WindowBase.on_key_up`.
When overwriting the method in the derived widget, super should be
called to enable de-focusing on escape. If the derived widget wishes
to use escape for its own purposes, it can call super at the end after
it is done if it didn't consume escape.
See :meth:`on_key_down`
'''
if keycode[1] == 'escape':
self.focus = False
return True
return False
def show_keyboard(self):
'''
Convenience function to show the keyboard in managed mode.
'''
if self.keyboard_mode == 'managed':
self._bind_keyboard()
def hide_keyboard(self):
'''
Convenience function to hide the keyboard in managed mode.
'''
if self.keyboard_mode == 'managed':
self._unbind_keyboard()
class CompoundSelectionBehavior(object):
'''Selection behavior implements the logic behind keyboard and touch
selection of selectable widgets managed by the derived widget.
For example, it could be combined with a
:class:`~kivy.uix.gridlayout.GridLayout` to add selection to the layout.
At its core, it keeps a dynamic list of widgets that can be selected.
Then, as the touches and keyboard input are passed in, it selects one or
more of the widgets based on these inputs. For example, it uses the mouse
scroll and keyboard up/down buttons to scroll through the list of widgets.
Multiselection can also be achieved using the keyboard shift and ctrl keys.
Finally, in addition to the up/down type keyboard inputs, it can also
accepts letters from the kayboard to be used to select nodes with
associated strings that start with those letters, similar to how files
are selected by a file browser.
When the controller needs to select a node it calls :meth:`select_node` and
:meth:`deselect_node`. Therefore, they must be overwritten in order affect
the selected nodes. By default, the class doesn't listen to keyboard and
touch events, therefore, the derived widget must call
:meth:`select_with_touch`, :meth:`select_with_key_down`, and
:meth:`select_with_key_up` on events that it wants to pass on for selection
purposes.
For example, to add selection to a grid layout which will contain
:class:`~kivy.uix.Button` widgets::
class SelectableGrid(CompoundSelectionBehavior, GridLayout):
def __init__(self, **kwargs):
super(CompoundSelectionBehavior, self).__init__(**kwargs)
keyboard = Window.request_keyboard(None, self)
keyboard.bind(on_key_down=self.select_with_key_down,
on_key_up=self.select_with_key_up)
def select_node(self, node):
node.background_color = (1, 0, 0, 1)
return super(CompoundSelectionBehavior, self).select_node(node)
def deselect_node(self, node):
node.background_color = (1, 1, 1, 1)
super(CompoundSelectionBehavior, self).deselect_node(node)
Then, for each button added to the layout, bind on_touch_down of the button
to :meth:`select_with_touch` to pass on the touch events.
.. versionadded:: 1.9.0
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
'''
selected_nodes = ListProperty([])
'''The list of selected nodes.
.. note:
Multiple nodes can be selected right after another using e.g. the
keyboard, so when listening to :attr:`selected_nodes` one should be
aware of this.
:attr:`selected_nodes` is a :class:`~kivy.properties.ListProperty` and
defaults to the empty list, []. It is read-only and should not be modified.
'''
touch_multiselect = BooleanProperty(False)
'''A special touch mode which determines whether touch events, as
processed with :meth:`select_with_touch`, will add to the selection the
currently touched node, or if it will clear the selection before adding the
node. This allows the selection of multiple nodes by simply touching them.
This is different than :attr:`multiselect`, because when this is True
simply touching an unselected node will select it, even if e.g. ctrl is not
pressed. If this is False, however, ctrl is required to be held in order to
add to selection when :attr:`multiselect` is True.
.. note::
:attr:`multiselect`, when False, will disable
:attr:`touch_multiselect`.
:attr:`touch_multiselect` is a :class:`~kivy.properties.BooleanProperty`,
defaults to False.
'''
multiselect = BooleanProperty(False)
'''Determines whether multiple nodes can be selected. If enabled, keyboard
shift and ctrl selection, optionally combined with touch, for example, will
be able to select multiple widgets in the normally expected manner.
This dominates :attr:`touch_multiselect` when False.
:attr:`multiselect` is a :class:`~kivy.properties.BooleanProperty`
, defaults to False.
'''
keyboard_select = BooleanProperty(True)
''' Whether the keybaord can be used for selection. If False, keyboard
inputs will be ignored.
:attr:`keyboard_select` is a :class:`~kivy.properties.BooleanProperty`
, defaults to True.
'''
page_count = NumericProperty(10)
'''Determines by how much the selected node is moved up or down, relative
to position of the last selected node, when pageup (or pagedown) is
pressed.
:attr:`page_count` is a :class:`~kivy.properties.NumericProperty`,
defaults to 10.
'''
up_count = NumericProperty(1)
'''Determines by how much the selected node is moved up or down, relative
to position of the last selected node, when the up (or down) arrow on the
keyboard is pressed.
:attr:`up_count` is a :class:`~kivy.properties.NumericProperty`,
defaults to 1.
'''
right_count = NumericProperty(1)
'''Determines by how much the selected node is moved up or down, relative
to position of the last selected node, when the right (or left) arrow on
the keyboard is pressed.
:attr:`right_count` is a :class:`~kivy.properties.NumericProperty`,
defaults to 1.
'''
scroll_count = NumericProperty(0)
'''Determines by how much the selected node is moved up or down, relative
to position of the last selected node, when the mouse scroll wheel is
scrolled.
:attr:`right_count` is a :class:`~kivy.properties.NumericProperty`,
defaults to 0.
'''
_anchor = None # the last anchor node selected (e.g. shift relative node)
# the idx may be out of sync
_anchor_idx = 0 # cache indexs in case list hasn't changed
_last_selected_node = None # the absolute last node selected
_last_node_idx = 0
_ctrl_down = False # if it's pressed - for e.g. shift selection
_shift_down = False
# holds str used to find node, e.g. if word is typed. passed to goto_node
_word_filter = ''
_last_key_time = 0 # time since last press, for finding whole strs in node
_printable = set(string.printable)
_key_list = [] # keys that are already pressed, to not press continuously
_offset_counts = {} # cache of counts for faster access
def __init__(self, **kwargs):
super(CompoundSelectionBehavior, self).__init__(**kwargs)
def ensure_single_select(*l):
if (not self.multiselect) and len(self.selected_nodes) > 1:
self.clear_selection()
update_counts = self._update_counts
update_counts()
fbind = self.fbind
fbind('multiselect', ensure_single_select)
fbind('page_count', update_counts)
fbind('up_count', update_counts)
fbind('right_count', update_counts)
fbind('scroll_count', update_counts)
def select_with_touch(self, node, touch=None):
'''(internal) Processes a touch on the node. This should be called by
the derived widget when a node is touched and is to be used for
selection. Depending on the keyboard keys pressed and the
configuration, it could select or deslect this and other nodes in the
selectable nodes list, :meth:`get_selectable_nodes`.
:Parameters:
`node`
The node that recieved the touch. Can be None for a scroll
type touch.
`touch`
Optionally, the touch. Defaults to None.
:Returns:
bool, True if the touch was used, False otherwise.
'''
multi = self.multiselect
multiselect = multi and (self._ctrl_down or self.touch_multiselect)
range_select = multi and self._shift_down
if touch and 'button' in touch.profile and touch.button in\
('scrollup', 'scrolldown', 'scrollleft', 'scrollright'):
node_src, idx_src = self._reslove_last_node()
node, idx = self.goto_node(touch.button, node_src, idx_src)
if node == node_src:
return False
if range_select:
self._select_range(multiselect, True, node, idx)
else:
if not multiselect:
self.clear_selection()
self.select_node(node)
return True
if node is None:
return False
if (node in self.selected_nodes and (not range_select)): # selected
if multiselect:
self.deselect_node(node)
else:
self.clear_selection()
self.select_node(node)
elif range_select:
# keep anchor only if not multislect (ctrl-type selection)
self._select_range(multiselect, not multiselect, node, 0)
else: # it's not selected at this point
if not multiselect:
self.clear_selection()
self.select_node(node)
return True
def select_with_key_down(self, keyboard, scancode, codepoint, modifiers,
**kwargs):
'''Processes a key press. This is called when a key press is to be used
for selection. Depending on the keyboard keys pressed and the
configuration, it could select or deslect nodes or node ranges
from the selectable nodes list, :meth:`get_selectable_nodes`.
The parameters are such that it could be bound directly to the
on_key_down event of a keyboard. Therefore, it is safe to be called
repeatedly when the key is held down as is done by the keyboard.
:Returns:
bool, True if the keypress was used, False otherwise.
'''
if not self.keyboard_select:
return False
keys = self._key_list
multi = self.multiselect
node_src, idx_src = self._reslove_last_node()
if scancode[1] == 'shift':
self._shift_down = True
elif scancode[1] in ('ctrl', 'lctrl', 'rctrl'):
self._ctrl_down = True
elif (multi and 'ctrl' in modifiers and scancode[1] in ('a', 'A')
and scancode[1] not in keys):
sister_nodes = self.get_selectable_nodes()
select = self.select_node
for node in sister_nodes:
select(node)
keys.append(scancode[1])
else:
if scancode[1] in self._printable:
if time() - self._last_key_time <= 1.:
self._word_filter += scancode[1]
else:
self._word_filter = scancode[1]
self._last_key_time = time()
node, idx = self.goto_node(self._word_filter, node_src,
idx_src)
else:
node, idx = self.goto_node(scancode[1], node_src, idx_src)
if node == node_src:
return False
multiselect = multi and 'ctrl' in modifiers
if multi and 'shift' in modifiers:
self._select_range(multiselect, True, node, idx)
else:
if not multiselect:
self.clear_selection()
self.select_node(node)
return True
return False
def select_with_key_up(self, keyboard, scancode, **kwargs):
'''(internal) Processes a key release. This must be called by the
derived widget when a key that :meth:`select_with_key_down` returned
True is released.
The parameters are such that it could be bound directly to the
on_key_up event of a keyboard.
:Returns:
bool, True if the key release was used, False otherwise.
'''
if scancode[1] == 'shift':
self._shift_down = False
elif scancode[1] in ('ctrl', 'lctrl', 'rctrl'):
self._ctrl_down = False
else:
try:
self._key_list.remove(scancode[1])
return True
except ValueError:
return False
return True
def _update_counts(self, *largs):
# doesn't invert indices here
pc = self.page_count
uc = self.up_count
rc = self.right_count
sc = self.scroll_count
self._offset_counts = {'pageup': -pc, 'pagedown': pc, 'up': -uc,
'down': uc, 'right': rc, 'left': -rc, 'scrollup': sc,
'scrolldown': -sc, 'scrollright': -sc, 'scrollleft': sc}
def _reslove_last_node(self):
# for offset selection, we have a anchor, and we select everything
# between anchor and added offset relative to last node
sister_nodes = self.get_selectable_nodes()
if not len(sister_nodes):
return None, 0
last_node = self._last_selected_node
last_idx = self._last_node_idx
end = len(sister_nodes) - 1
if last_node is None:
last_node = self._anchor
last_idx = self._anchor_idx
if last_node is None:
return sister_nodes[end], end
if last_idx > end or sister_nodes[last_idx] != last_node:
try:
return last_node, sister_nodes.index(last_node)
except ValueError:
return sister_nodes[end], end
return last_node, last_idx
def _select_range(self, multiselect, keep_anchor, node, idx):
'''Selects a range between self._anchor and node or idx.
If multiselect, it'll add to selection, otherwise it will unselect
everything before selecting the range. This is only called if
self.multiselect is True.
If keep anchor is False, the anchor is moved to node. This should
always be True of keyboard selection.
'''
select = self.select_node
sister_nodes = self.get_selectable_nodes()
end = len(sister_nodes) - 1
last_node = self._anchor
last_idx = self._anchor_idx
if last_node is None:
last_idx = end
last_node = sister_nodes[end]
else:
if last_idx > end or sister_nodes[last_idx] != last_node:
try:
last_idx = sister_nodes.index(last_node)
except ValueError:
# list changed - cannot do select across them
return
if idx > end or sister_nodes[idx] != node:
try: # just in case
idx = sister_nodes.index(node)
except ValueError:
return
if last_idx > idx:
last_idx, idx = idx, last_idx
if not multiselect:
self.clear_selection()
for item in sister_nodes[last_idx:idx + 1]:
select(item)
if keep_anchor:
self._anchor = last_node
self._anchor_idx = last_idx
else:
self._anchor = node # in case idx was reversed, reset
self._anchor_idx = idx
self._last_selected_node = node
self._last_node_idx = idx
def clear_selection(self):
''' Deselects all the currently selected nodes.
'''
# keep the anchor and last selected node
deselect = self.deselect_node
nodes = self.selected_nodes
# empty beforehand so lookup in deselect will be fast
self.selected_nodes = []
for node in nodes:
deselect(node)
def get_selectable_nodes(self):
'''(internal) Returns a list of the nodes that can be selected. It can
be overwritten by the derived widget to return the correct list.
This list is used to determine which nodes to select with group
selection. E.g. the last element in the list will be selected when
home is pressed, pagedown will move (or add to, if shift is held) the
selection from the current position by negative :attr:`page_count`
nodes starting from the position of the currently selected node in
this list and so on. Still, nodes can be selected even if they are not
in this list.
.. note::
It is safe to dynamically change this list including removing,
adding, or re-arranging its elements. Nodes can be selected even
if they are not on this list. And selected nodes removed from the
list will remain selected until :meth:`deselect_node` is called.
.. warning::
Layouts display their children in the reverse order. That is, the
contents of :attr:`~kivy.uix.widget.Widget.children` is displayed
form right to left, bottom to top. Therefore, internally, the
indices of the elements returned by this function is reversed to
make it work by default for most layouts so that the final result
is that e.g. home, although it will select the last element on this
list, visually it'll select the first element when counting from
top to bottom and left to right. If this behavior is not desired,
a reversed list should be returned instead.
Defaults to returning :attr:`~kivy.uix.widget.Widget.children`.
'''
return self.children
def goto_node(self, key, last_node, last_node_idx):
'''(internal) Used by the controller to get the node at the position
indicated by key. The key can be keyboard inputs, e.g. pageup,
or scroll inputs from the mouse scroll wheel, e.g. scrollup.
Last node is the last node selected and is used to find the resulting
node. For example, if the key is up, the returned node is one node
up from the last node.
It can be overwritten by the derived widget.
:Parameters:
`key`
str, the string used to find the desired node. It can be any
of the keyboard keys, as well as the mouse scrollup,
scrolldown, scrollright, and scrollleft strings. If letters
are typed in quick succession, the letters will be combined
before it's passed in as key and can be used to find nodes that
have an associated string that starts with those letters.
`last_node`
The last node that was selected.
`last_node_idx`
The cached index of the last node selected in the
:meth:`get_selectable_nodes` list. If the list hasn't changed
it saves having to look up the index of `last_node` in that
list.
:Returns:
tuple, the node targeted by key and its index in the
:meth:`get_selectable_nodes` list. Returning
`(last_node, last_node_idx)` indicates a node wasn't found.
'''
sister_nodes = self.get_selectable_nodes()
end = len(sister_nodes) - 1
counts = self._offset_counts
if end == -1:
return last_node, last_node_idx
if last_node_idx > end or sister_nodes[last_node_idx] != last_node:
try: # just in case
last_node_idx = sister_nodes.index(last_node)
except ValueError:
return last_node, last_node_idx
try:
idx = max(min(-counts[key] + last_node_idx, end), 0)
return sister_nodes[idx], idx
except KeyError:
pass
if key == 'home':
return sister_nodes[end], end
elif key == 'end':
return sister_nodes[0], 0
else:
return last_node, last_node_idx
def select_node(self, node):
''' Selects a node.
It is called by the controller when it selects a node and can be
called from the outside to select a node directly. The derived widget
should overwrite this method and change the node to its selected state
when this is called
:Parameters:
`node`
The node to be selected.
:Returns:
bool, True if the node was selected, False otherwise.
.. warning::
This method must be called by the derived widget using super if it
is overwritten.
'''
nodes = self.selected_nodes
if (not self.multiselect) and len(nodes):
self.clear_selection()
if node not in nodes:
nodes.append(node)
self._anchor = node
self._last_selected_node = node
return True
def deselect_node(self, node):
''' Deselects a possibly selected node.
It is called by the controller when it deselects a node and can also
be called from the outside to deselect a node directly. The derived
widget should overwrite this method and change the node to its
unselected state when this is called
:Parameters:
`node`
The node to be deselected.
.. warning::
This method must be called by the derived widget using super if it
is overwritten.
'''
try:
self.selected_nodes.remove(node)
except ValueError:
pass
| mit | 289,419,205,945,449,800 | 36.926099 | 79 | 0.616238 | false |
hpcc-systems/nagios-monitoring | hpcc-nagios-tools/hpcc_centralized_nagios.py | 1 | 24447 | #!/usr/bin/env python3
##############################################################################
# HPCC SYSTEMS software Copyright (C) 2015 HPCC Systems.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import lxml.etree as ET
import subprocess
import paramiko
import getpass
import sys
import os
Environment = input('Enter Environment Name > ')
Address = input('Enter IP > ')
ProdOrNot = input(
'Please specify if this environment is production by typing prod or if it is a non-production by typing non-prod > ')
xxx = getpass.getpass()
user = input('Enter username > ')
xml_local = '/tmp/environment.xml'
xml_remote = '/etc/HPCCSystems/environment.xml'
if ProdOrNot == 'prod':
HostDefinition = 'generic-prod-hpcc'
ServiceDefinition = 'hpcc-prod-service'
elif ProdOrNot == 'non-prod':
HostDefinition = 'generic-nonprod-hpcc'
ServiceDefinition = 'hpcc-nonprod-service'
else:
print('Invalid Option')
sys.exit(1)
class XML():
def __init__(self):
self.xml = self
def getXML(self):
host = paramiko.Transport((Address, 22))
host.connect(username=user, password=xxx)
sftp = paramiko.SFTPClient.from_transport(host)
sftp.get(xml_remote, xml_local)
class XMLParser():
def __init__(self):
self.parsing = self
def hpcc_components(self, component_name):
ParseXML = ET.parse(xml_local)
doc_root = ParseXML.getroot()
cluster = []
for i in doc_root.findall('.//{}Process/Instance'.format(component_name)):
ip = i.attrib['netAddress']
cluster.append(ip)
return cluster
def hpcc_master_components(self, component_name):
ParseXML = ET.parse(xml_local)
doc_root = ParseXML.getroot()
ip_port = []
for i in doc_root.findall('.//{}ServerProcess/Instance'.format(component_name)):
master_component = i.attrib['netAddress']
ip_port.append(master_component)
for i in doc_root.findall('.//{}ServerProcess/Instance'.format(component_name)):
port = i.attrib['port']
ip_port.append(port)
return ip_port
def Esp(self):
ParseXML = ET.parse(xml_local)
doc_root = ParseXML.getroot()
ip_port = {}
ports = []
for i in doc_root.findall('.//EspProcess/Instance'):
ip = i.attrib['netAddress']
for i in doc_root.findall('.//EspProcess/EspBinding'):
port = i.attrib['port']
ports.append(port)
ip_port[ip] = ports
return ip_port
def Roxie(self):
ParseXML = ET.parse(xml_local)
doc_root = ParseXML.getroot()
Roxies = []
for roxie in doc_root.findall('.//RoxieServerProcess'):
Roxie = roxie.attrib['computer']
Roxies.append(Roxie)
ip_addr = {}
for node in doc_root.findall('.//Hardware/Computer'):
Machine = node.attrib['name']
IP = node.attrib['netAddress']
ip_addr[Machine] = IP
RoxieSlaves = []
for key in ip_addr:
if key not in Roxies:
continue
elif key in Roxies:
RoxieSlaves.append(ip_addr[key])
return RoxieSlaves
def ThorMaster(self):
ParseXML = ET.parse(xml_local)
doc_root = ParseXML.getroot()
Thors = []
for i in doc_root.findall('.//ThorMasterProcess'):
Thor = i.attrib['computer']
Thors.append(Thor)
d = {}
for i in doc_root.findall('.//Hardware/Computer'):
Machine = i.attrib['name']
IP = i.attrib['netAddress']
d[Machine] = IP
masterport = {}
machine = []
for i in doc_root.findall('.//ThorCluster'):
try:
Computer = i.attrib['computer']
Port = i.attrib['masterport']
if Computer in d:
machine.append(Port)
except KeyError:
continue
masterport.setdefault(d[Computer], [])
masterport[d[Computer]].append(machine)
machine = []
ThorMasters = []
for i in d:
if i not in Thors:
continue
elif i in Thors:
ThorMasters.append(d[i])
return ThorMasters, masterport
def ThorSlave(self):
ParseXML = ET.parse(xml_local)
doc_root = ParseXML.getroot()
Thors = []
for i in doc_root.findall('.//ThorSlaveProcess'):
Thor = i.attrib['computer']
Thors.append(Thor)
d = {}
for i in doc_root.findall('.//Hardware/Computer'):
Machine = i.attrib['name']
IP = i.attrib['netAddress']
d[Machine] = IP
ThorSlaves = []
for i in d:
if i not in Thors:
continue
elif i in Thors:
ThorSlaves.append(d[i])
return ThorSlaves
XML().getXML()
hpcc_component = XMLParser()
# List of HPCC Components from xml file.
dafilesrv = hpcc_component.hpcc_components('Dafilesrv')
dali = hpcc_component.hpcc_master_components('Dali')
sasha = hpcc_component.hpcc_master_components('Sasha')
eclagent = hpcc_component.hpcc_components('EclAgent')
eclccserver = hpcc_component.hpcc_components('EclccServer')
eclserver = hpcc_component.hpcc_components('EclServer')
dfuserver = hpcc_component.hpcc_components('DfuServer')
eclscheduler = hpcc_component.hpcc_components('EclScheduler')
esp = hpcc_component.Esp()
roxie = hpcc_component.Roxie()
thormaster = hpcc_component.ThorMaster()
thorslave = hpcc_component.ThorSlave()
print('=== list of IP Addresses ==='), '\n', dafilesrv, '\n'
print('Number of servers = '), len(dafilesrv), '\n'
print('=========================== Main Components =======================', '\n')
print('dali = '), dali, '\n'
print('sasha = '), sasha, '\n'
print('eclagent = '), eclagent, '\n'
print('Number of ECLAgents = '), len(eclagent), '\n'
print('eclccserver = ', eclccserver, '\n')
print('Number of eclccservers = '), len(eclccserver), '\n'
print('eclserver = '), eclserver, '\n'
print('Number of eclservers = '), len(eclserver), '\n'
print('dfuserver = ', dfuserver, '\n')
print('Number of DFUServers = '), len(dfuserver), '\n'
print('eclscheduler = '), eclscheduler, '\n'
print('Number of schedulers = '), len(eclscheduler), '\n'
print('esp = '), esp, '\n'
print('Number of ESP Servers = '), len(esp), '\n'
print('Roxie = '), roxie, '\n'
print('Number of Roxie Servers = '), len(roxie), '\n'
print('ThorMaster = ', thormaster[0], '\n')
print('Number of ThorMaster Nodes = '), len(thormaster[0]), '\n'
if len(thormaster[1]) > 0:
print('Port Layout = '), thormaster[1], '\n'
print('ThorSlaves = '), thorslave, '\n'
print('Number of ThorSlaves = '), len(thorslave), '\n'
print('======================================================================', '\n')
### Creating directory structure
EnvironmentDirectory = '/etc/HPCCSystems/nagios/{}'.format(Environment)
if os.path.isdir(EnvironmentDirectory) == False:
subprocess.call('mkdir -p /etc/HPCCSystems/nagios/{}'.format(Environment), shell=True)
else:
print('Environment already exists')
sys.exit(1)
### Generating host configuration files with basic checks
for ip in dafilesrv:
if ip:
filename = '/etc/HPCCSystems/nagios/{}/{}.cfg'.format(Environment, ip)
config_file = open(filename, 'w')
host_conf = """
define host {{
use {0}
host_name {1}
alias {1}
address {1}
}}
define service {{
use {2}
host_name {1}
service_description Dafilesrv Status
check_command check_dafilesrv
}}
define service {{
use {2}
host_name {1}
service_description SSH Status
check_command check_ssh
}}
""".format(HostDefinition, ip, ServiceDefinition)
config_file.write(host_conf)
config_file.close()
### Appending Dali entry to host configuration file
def InsertDaliEntry():
filename = '/etc/HPCCSystems/nagios/{}/{}.cfg'.format(Environment, dali[0])
config_file = open(filename, 'a')
host_conf = """
define service {{
use {}
host_name {}
service_description Dali Service Status
check_command check_dali!{}!5000
}}
""".format(ServiceDefinition, dali[0], dali[1])
config_file.write(host_conf)
config_file.close()
### Appending Sasha entry to host configuration file
def InsertSashaEntry():
filename = '/etc/HPCCSystems/nagios/{}/{}.cfg'.format(Environment, sasha[0])
config_file = open(filename, 'a')
host_conf = """
define service {{
use {}
host_name {}
service_description Sasha Service Status
check_command check_sasha!{}!5000
}}
""".format(ServiceDefinition, sasha[0], sasha[1])
config_file.write(host_conf)
config_file.close()
### Appending ESP entry to host configuration file
def InsertESPServerEntry():
for ip in esp:
filename = '/etc/HPCCSystems/nagios/{}/{}.cfg'.format(Environment, ip)
config_file = open(filename, 'a')
for port in esp[ip]:
host_conf = """
define service {{
use {0}
host_name {1}
service_description ESP Status port {2}
check_command check_esp!{2}
}}
""".format(ServiceDefinition, ip, port)
config_file.write(host_conf)
config_file.close()
### Appending Roxie entry to host configuration file
def InsertRoxieEntry():
if roxie:
for ip in roxie:
filename = '/etc/HPCCSystems/nagios/{}/{}.cfg'.format(Environment, ip)
config_file = open(filename, 'a')
host_conf = """
define service {{
use {}
host_name {}
service_description Roxie Status
check_command check_roxie
}}
""".format(ServiceDefinition, ip)
config_file.write(host_conf)
config_file.close()
### Appending ThorMaster entry to host configuration file
def InsertThorMasterEntry():
if thormaster:
for ip in thormaster[0]:
if len(thormaster[1]) > 0:
for port in thormaster[1][ip]:
filename = '/etc/HPCCSystems/nagios/{}/{}.cfg'.format(Environment, ip)
config_file = open(filename, 'a')
host_conf = """
define service {{
use {0}
host_name {1}
service_description ThorMaster Status port {2}
check_command check_thormaster!{2}
}}
""".format(ServiceDefinition, ip, int(port[0]))
config_file.write(host_conf)
config_file.close()
else:
filename = '/etc/HPCCSystems/nagios/{}/{}.cfg'.format(Environment, ip)
config_file = open(filename, 'a')
host_conf = """
define service {{
use {}
host_name {}
service_description ThorMaster Status port 20000
check_command check_thormaster!20000
}}
""".format(ServiceDefinition, ip)
config_file.write(host_conf)
config_file.close()
### Appending new entries to hostgroup file
def EditHostGroups():
hostgroup_filename = open('/etc/HPCCSystems/nagios/{0}/{0}_hostgroup.cfg'.format(Environment), 'a')
cleanup = ','.join(dafilesrv)
bulk = """
define hostgroup {{
hostgroup_name {0}
alias {0}
members {1}
}}
""".format(Environment, cleanup)
hostgroup_filename.write(bulk)
hostgroup_filename.close()
def hpccCommands():
filename = '/etc/HPCCSystems/nagios/hpcc_objects/hpcc_commands.cfg'
if os.path.isfile(filename) == False:
config_file = open(filename, 'a')
command_conf = """
### HPCC Platform Checks ###
define command{
command_name check_dafilesrv
command_line /usr/lib/nagios/plugins/check_dafilesrv $HOSTADDRESS$
}
define command{
command_name check_dali
command_line /usr/lib/nagios/plugins/check_dali $HOSTADDRESS$ $ARG1$ $ARG2$
}
define command{
command_name check_roxie
command_line /usr/lib/nagios/plugins/check_roxie $HOSTADDRESS$
}
define command{
command_name check_sasha
command_line /usr/lib/nagios/plugins/check_sasha $HOSTADDRESS$ $ARG1$ $ARG2$
}
### HPCC Custom Checks ###
define command{
command_name check_esp
command_line /usr/local/nagios/libexec/check_tcp -H $HOSTADDRESS$ -p $ARG1$
}
define command{
command_name check_thormaster
command_line /usr/local/nagios/libexec/check_tcp -H $HOSTADDRESS$ -p $ARG1$
}
"""
config_file.write(command_conf)
config_file.close()
def hpccTimePeriods():
filename = '/etc/HPCCSystems/nagios/hpcc_objects/hpcc_timeperiods.cfg'
if os.path.isfile(filename) == False:
config_file = open(filename, 'a')
definitions_conf = """
# This defines a timeperiod where all times are valid for checks,
# notifications, etc. The classic "24x7" support nightmare. :-)
define timeperiod{
timeperiod_name hpcc-24x7
alias 24 Hours A Day, 7 Days A Week
sunday 00:00-24:00
monday 00:00-24:00
tuesday 00:00-24:00
wednesday 00:00-24:00
thursday 00:00-24:00
friday 00:00-24:00
saturday 00:00-24:00
}
# 'workhours/banker hours' timeperiod definition for non-production systems
define timeperiod{
timeperiod_name hpcc-workhours
alias Normal Work Hours
monday 06:00-17:00
tuesday 06:00-17:00
wednesday 06:00-17:00
thursday 06:00-17:00
friday 06:00-17:00
}
"""
config_file.write(definitions_conf)
config_file.close()
def hpccContacts():
filename = '/etc/HPCCSystems/nagios/hpcc_objects/hpcc_contacts.cfg'
if os.path.isfile(filename) == False:
config_file = open(filename, 'a')
definitions_conf = """
define contact{
contact_name hpcc_support_team ; Short name of user
use generic-contact ; Inherit default values from generic-contact template
alias ; Full name of user
email [email protected] ; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
}
define contactgroup{
contactgroup_name hpcc-admins
alias hpcc-administrators
members hpcc_support_team
}
"""
config_file.write(definitions_conf)
config_file.close()
def hpccProdDefinitions():
filename = '/etc/HPCCSystems/nagios/hpcc_objects/hpcc_prod_definitions.cfg'
if os.path.isfile(filename) == False:
config_file = open(filename, 'a')
definitions_conf = """
# HPCC Host Definition
define host{
name generic-prod-hpcc ; The name of this host template
notifications_enabled 1 ; Host notifications are enabled
event_handler_enabled 1 ; Host event handler is enabled
flap_detection_enabled 1 ; Flap detection is enabled
process_perf_data 1 ; Process performance data
retain_status_information 1 ; Retain status information across program restarts
retain_nonstatus_information 1 ; Retain non-status information across program restarts
notification_period hpcc-24x7 ; Send host notifications at any time
notification_interval 30 ; Resend notifications every 30 minutes
notification_options d,r ; Only send notifications for specific host states
contact_groups hpcc-admins ; Notifications get sent to the admins by default
check_period hpcc-24x7 ; By default, switches are monitored round the clock
check_interval m5 ; Switches are checked every 5 minutes
retry_interval 1 ; Schedule host check retries at 1 minute intervals
max_check_attempts 10 ; Check each switch 10 times (max)
check_command check-host-alive ; Default command to check if routers are "alive"
register 0 ; DONT REGISTER THIS DEFINITION - ITS NOT A REAL HOST, JUST A TEMPLATE!
}
# HPCC Service Definition
define service{
name hpcc-prod-service ; The 'name' of this service template
active_checks_enabled 1 ; Active service checks are enabled
passive_checks_enabled 1 ; Passive service checks are enabled/accepted
parallelize_check 1 ; Active service checks should be parallelized (disabling this can lead to major performance problems)
obsess_over_service 1 ; We should obsess over this service (if necessary)
check_freshness 0 ; Default is to NOT check service 'freshness'
notifications_enabled 1 ; Service notifications are enabled
event_handler_enabled 1 ; Service event handler is enabled
flap_detection_enabled 1 ; Flap detection is enabled
process_perf_data 1 ; Process performance data
retain_status_information 1 ; Retain status information across program restarts
retain_nonstatus_information 1 ; Retain non-status information across program restarts
is_volatile 0 ; The service is not volatile
check_period hpcc-24x7 ; The service can be checked at any time of the day
max_check_attempts 3 ; Re-check the service up to 3 times in order to determine its final (hard) state
normal_check_interval 10 ; Check the service every 10 minutes under normal conditions
retry_check_interval 2 ; Re-check the service every two minutes until a hard state can be determined
contact_groups hpcc-admins ; Notifications get sent out to everyone in the 'admins' group
notification_options w,u,c,r ; Send notifications about warning, unknown, critical, and recovery events
notification_interval 60 ; Re-notify about service problems every hour
notification_period hpcc-24x7 ; Notifications can be sent out at any time
register 0 ; DONT REGISTER THIS DEFINITION - ITS NOT A REAL SERVICE, JUST A TEMPLATE!
}
"""
config_file.write(definitions_conf)
config_file.close()
def hpccNonProdDefinitions():
filename = '/etc/HPCCSystems/nagios/hpcc_objects/hpcc_nonprod_definitions.cfg'
if os.path.isfile(filename) == False:
config_file = open(filename, 'a')
definitions_conf = """
# HPCC Host Definition
define host{
name generic-nonprod-hpcc ; The name of this host template
notifications_enabled 1 ; Host notifications are enabled
event_handler_enabled 1 ; Host event handler is enabled
flap_detection_enabled 1 ; Flap detection is enabled
process_perf_data 1 ; Process performance data
retain_status_information 1 ; Retain status information across program restarts
retain_nonstatus_information 1 ; Retain non-status information across program restarts
notification_period hpcc-workhours ; Send host notifications at any time
notification_interval 30 ; Resend notifications every 30 minutes
notification_options d,r ; Only send notifications for specific host states
contact_groups hpcc-admins ; Notifications get sent to the admins by default
check_period hpcc-24x7 ; By default, switches are monitored round the clock
check_interval m5 ; Switches are checked every 5 minutes
retry_interval 1 ; Schedule host check retries at 1 minute intervals
max_check_attempts 10 ; Check each switch 10 times (max)
check_command check-host-alive ; Default command to check if routers are "alive"
register 0 ; DONT REGISTER THIS DEFINITION - ITS NOT A REAL HOST, JUST A TEMPLATE!
}
# HPCC Service Definition
define service{
name hpcc-nonprod-service
active_checks_enabled 1
passive_checks_enabled 1
parallelize_check 1
obsess_over_service 1
check_freshness 0
notifications_enabled 1
event_handler_enabled 1
flap_detection_enabled 1
process_perf_data 1
retain_status_information 1
retain_nonstatus_information 1
is_volatile 0
check_period hpcc-24x7
max_check_attempts 3
normal_check_interval 10
retry_check_interval 2
contact_groups hpcc-admins
notification_options w,u,c,r
notification_interval 60
notification_period hpcc-workhours
register 0
}
"""
config_file.write(definitions_conf)
config_file.close()
### Creating configuration files
InsertDaliEntry()
InsertSashaEntry()
InsertESPServerEntry()
InsertRoxieEntry()
InsertThorMasterEntry()
EditHostGroups()
hpccTimePeriods()
hpccContacts()
hpccCommands()
hpccProdDefinitions()
hpccNonProdDefinitions()
### Changing file ownership
subprocess.call('chown nagios. /etc/HPCCSystems/nagios/ -R', shell=True)
### Restarting Nagios application
subprocess.call('systemctl restart nagios.service', shell=True)
| apache-2.0 | 1,601,087,806,931,783,200 | 37.804762 | 166 | 0.553565 | false |
dellysunnymtech/bitbake | lib/bb/server/xmlrpc.py | 1 | 13251 | #
# BitBake XMLRPC Server
#
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
# Copyright (C) 2006 - 2008 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
This module implements an xmlrpc server for BitBake.
Use this by deriving a class from BitBakeXMLRPCServer and then adding
methods which you want to "export" via XMLRPC. If the methods have the
prefix xmlrpc_, then registering those function will happen automatically,
if not, you need to call register_function.
Use register_idle_function() to add a function which the xmlrpc server
calls from within server_forever when no requests are pending. Make sure
that those functions are non-blocking or else you will introduce latency
in the server's main loop.
"""
import bb
import xmlrpclib, sys
from bb import daemonize
from bb.ui import uievent
import hashlib, time
import socket
import os, signal
import threading
try:
import cPickle as pickle
except ImportError:
import pickle
DEBUG = False
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
import inspect, select, httplib
from . import BitBakeBaseServer, BitBakeBaseServerConnection, BaseImplServer
class BBTransport(xmlrpclib.Transport):
def __init__(self, timeout):
self.timeout = timeout
self.connection_token = None
xmlrpclib.Transport.__init__(self)
# Modified from default to pass timeout to HTTPConnection
def make_connection(self, host):
#return an existing connection if possible. This allows
#HTTP/1.1 keep-alive.
if self._connection and host == self._connection[0]:
return self._connection[1]
# create a HTTP connection object from a host descriptor
chost, self._extra_headers, x509 = self.get_host_info(host)
#store the host argument along with the connection object
self._connection = host, httplib.HTTPConnection(chost, timeout=self.timeout)
return self._connection[1]
def set_connection_token(self, token):
self.connection_token = token
def send_content(self, h, body):
if self.connection_token:
h.putheader("Bitbake-token", self.connection_token)
xmlrpclib.Transport.send_content(self, h, body)
def _create_server(host, port, timeout = 60):
t = BBTransport(timeout)
s = xmlrpclib.ServerProxy("http://%s:%d/" % (host, port), transport=t, allow_none=True)
return s, t
class BitBakeServerCommands():
def __init__(self, server):
self.server = server
self.has_client = False
def registerEventHandler(self, host, port):
"""
Register a remote UI Event Handler
"""
s, t = _create_server(host, port)
# we don't allow connections if the cooker is running
if (self.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]):
return None
self.event_handle = bb.event.register_UIHhandler(s)
return self.event_handle
def unregisterEventHandler(self, handlerNum):
"""
Unregister a remote UI Event Handler
"""
return bb.event.unregister_UIHhandler(handlerNum)
def runCommand(self, command):
"""
Run a cooker command on the server
"""
return self.cooker.command.runCommand(command, self.server.readonly)
def getEventHandle(self):
return self.event_handle
def terminateServer(self):
"""
Trigger the server to quit
"""
self.server.quit = True
print("Server (cooker) exiting")
return
def addClient(self):
if self.has_client:
return None
token = hashlib.md5(str(time.time())).hexdigest()
self.server.set_connection_token(token)
self.has_client = True
return token
def removeClient(self):
if self.has_client:
self.server.set_connection_token(None)
self.has_client = False
if self.server.single_use:
self.server.quit = True
# This request handler checks if the request has a "Bitbake-token" header
# field (this comes from the client side) and compares it with its internal
# "Bitbake-token" field (this comes from the server). If the two are not
# equal, it is assumed that a client is trying to connect to the server
# while another client is connected to the server. In this case, a 503 error
# ("service unavailable") is returned to the client.
class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
def __init__(self, request, client_address, server):
self.server = server
SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server)
def do_POST(self):
try:
remote_token = self.headers["Bitbake-token"]
except:
remote_token = None
if remote_token != self.server.connection_token and remote_token != "observer":
self.report_503()
else:
if remote_token == "observer":
self.server.readonly = True
else:
self.server.readonly = False
SimpleXMLRPCRequestHandler.do_POST(self)
def report_503(self):
self.send_response(503)
response = 'No more client allowed'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
class XMLRPCProxyServer(BaseImplServer):
""" not a real working server, but a stub for a proxy server connection
"""
def __init__(self, host, port):
self.host = host
self.port = port
class XMLRPCServer(SimpleXMLRPCServer, BaseImplServer):
# remove this when you're done with debugging
# allow_reuse_address = True
def __init__(self, interface):
"""
Constructor
"""
BaseImplServer.__init__(self)
if (interface[1] == 0): # anonymous port, not getting reused
self.single_use = True
# Use auto port configuration
if (interface[1] == -1):
interface = (interface[0], 0)
SimpleXMLRPCServer.__init__(self, interface,
requestHandler=BitBakeXMLRPCRequestHandler,
logRequests=False, allow_none=True)
self.host, self.port = self.socket.getsockname()
self.connection_token = None
#self.register_introspection_functions()
self.commands = BitBakeServerCommands(self)
self.autoregister_all_functions(self.commands, "")
self.interface = interface
self.single_use = False
def addcooker(self, cooker):
BaseImplServer.addcooker(self, cooker)
self.commands.cooker = cooker
def autoregister_all_functions(self, context, prefix):
"""
Convenience method for registering all functions in the scope
of this class that start with a common prefix
"""
methodlist = inspect.getmembers(context, inspect.ismethod)
for name, method in methodlist:
if name.startswith(prefix):
self.register_function(method, name[len(prefix):])
def serve_forever(self):
# Start the actual XMLRPC server
bb.cooker.server_main(self.cooker, self._serve_forever)
def _serve_forever(self):
"""
Serve Requests. Overloaded to honor a quit command
"""
self.quit = False
while not self.quit:
fds = [self]
nextsleep = 0.1
for function, data in self._idlefuns.items():
try:
retval = function(self, data, False)
if retval is False:
del self._idlefuns[function]
elif retval is True:
nextsleep = 0
else:
fds = fds + retval
except SystemExit:
raise
except:
import traceback
traceback.print_exc()
pass
socktimeout = self.socket.gettimeout() or nextsleep
socktimeout = min(socktimeout, nextsleep)
# Mirror what BaseServer handle_request would do
try:
fd_sets = select.select(fds, [], [], socktimeout)
if fd_sets[0] and self in fd_sets[0]:
self._handle_request_noblock()
except IOError:
# we ignore interrupted calls
pass
# Tell idle functions we're exiting
for function, data in self._idlefuns.items():
try:
retval = function(self, data, True)
except:
pass
self.server_close()
return
def set_connection_token(self, token):
self.connection_token = token
class BitBakeXMLRPCServerConnection(BitBakeBaseServerConnection):
def __init__(self, serverImpl, clientinfo=("localhost", 0), observer_only = False, featureset = []):
self.connection, self.transport = _create_server(serverImpl.host, serverImpl.port)
self.clientinfo = clientinfo
self.serverImpl = serverImpl
self.observer_only = observer_only
self.featureset = featureset
def connect(self, token = None):
if token is None:
if self.observer_only:
token = "observer"
else:
token = self.connection.addClient()
if token is None:
return None
self.transport.set_connection_token(token)
self.events = uievent.BBUIEventQueue(self.connection, self.clientinfo)
for event in bb.event.ui_queue:
self.events.queue_event(event)
_, error = self.connection.runCommand(["setFeatures", self.featureset])
if error:
# no need to log it here, the error shall be sent to the client
raise BaseException(error)
return self
def removeClient(self):
if not self.observer_only:
self.connection.removeClient()
def terminate(self):
# Don't wait for server indefinitely
import socket
socket.setdefaulttimeout(2)
try:
self.events.system_quit()
except:
pass
try:
self.connection.removeClient()
except:
pass
class BitBakeServer(BitBakeBaseServer):
def initServer(self, interface = ("localhost", 0)):
self.interface = interface
self.serverImpl = XMLRPCServer(interface)
def detach(self):
daemonize.createDaemon(self.serverImpl.serve_forever, "bitbake-cookerdaemon.log")
del self.cooker
def establishConnection(self, featureset):
self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, self.interface, False, featureset)
return self.connection.connect()
def set_connection_token(self, token):
self.connection.transport.set_connection_token(token)
class BitBakeXMLRPCClient(BitBakeBaseServer):
def __init__(self, observer_only = False, token = None):
self.token = token
self.observer_only = observer_only
# if we need extra caches, just tell the server to load them all
pass
def saveConnectionDetails(self, remote):
self.remote = remote
def establishConnection(self, featureset):
# The format of "remote" must be "server:port"
try:
[host, port] = self.remote.split(":")
port = int(port)
except Exception as e:
bb.warn("Failed to read remote definition (%s)" % str(e))
raise e
# We need our IP for the server connection. We get the IP
# by trying to connect with the server
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((host, port))
ip = s.getsockname()[0]
s.close()
except Exception as e:
bb.warn("Could not create socket for %s:%s (%s)" % (host, port, str(e)))
raise e
try:
self.serverImpl = XMLRPCProxyServer(host, port)
self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, (ip, 0), self.observer_only, featureset)
return self.connection.connect(self.token)
except Exception as e:
bb.warn("Could not connect to server at %s:%s (%s)" % (host, port, str(e)))
raise e
def endSession(self):
self.connection.removeClient()
| gpl-2.0 | -5,845,057,423,331,369,000 | 33.871053 | 117 | 0.622066 | false |
bplancher/odoo | addons/stock/stock.py | 1 | 300413 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import date, datetime
from dateutil import relativedelta
import json
import time
import sets
import openerp
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_compare, float_round
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from openerp import SUPERUSER_ID, api, models
import openerp.addons.decimal_precision as dp
from openerp.addons.procurement import procurement
import logging
from openerp.exceptions import UserError
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# Incoterms
#----------------------------------------------------------
class stock_incoterms(osv.osv):
_name = "stock.incoterms"
_description = "Incoterms"
_columns = {
'name': fields.char('Name', required=True, help="Incoterms are series of sales terms. They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."),
'code': fields.char('Code', size=3, required=True, help="Incoterm Standard Code"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM you will not use."),
}
_defaults = {
'active': True,
}
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_name = "stock.location"
_description = "Inventory Locations"
_parent_name = "location_id"
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
_rec_name = 'complete_name'
def _location_owner(self, cr, uid, location, context=None):
''' Return the company owning the location if any '''
return location and (location.usage == 'internal') and location.company_id or False
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = m.name
parent = m.location_id
while parent:
res[m.id] = parent.name + '/' + res[m.id]
parent = parent.location_id
return res
def _get_sublocations(self, cr, uid, ids, context=None):
""" return all sublocations of the given stock locations (included) """
if context is None:
context = {}
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
return self.search(cr, uid, [('id', 'child_of', ids)], context=context_with_inactive)
def _name_get(self, cr, uid, location, context=None):
name = location.name
while location.location_id and location.usage != 'view':
location = location.location_id
name = location.name + '/' + name
return name
def name_get(self, cr, uid, ids, context=None):
res = []
for location in self.browse(cr, uid, ids, context=context):
res.append((location.id, self._name_get(cr, uid, location, context=context)))
return res
_columns = {
'name': fields.char('Location Name', required=True, translate=True),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."),
'usage': fields.selection([
('supplier', 'Vendor Location'),
('view', 'View'),
('internal', 'Internal Location'),
('customer', 'Customer Location'),
('inventory', 'Inventory Loss'),
('procurement', 'Procurement'),
('production', 'Production'),
('transit', 'Transit Location')],
'Location Type', required=True,
help="""* Vendor Location: Virtual location representing the source location for products coming from your vendors
\n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products
\n* Internal Location: Physical locations inside your own warehouses,
\n* Customer Location: Virtual location representing the destination location for products sent to your customers
\n* Inventory Loss: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories)
\n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (vendor or production) is not known yet. This location should be empty when the procurement scheduler has finished running.
\n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products
\n* Transit Location: Counterpart location that should be used in inter-companies or inter-warehouses operations
""", select=True),
'complete_name': fields.function(_complete_name, type='char', string="Full Location Name",
store={'stock.location': (_get_sublocations, ['name', 'location_id', 'active'], 10)}),
'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'),
'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'),
'partner_id': fields.many2one('res.partner', 'Owner', help="Owner of the location if not internal"),
'comment': fields.text('Additional Information'),
'posx': fields.integer('Corridor (X)', help="Optional localization details, for information purpose only"),
'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"),
'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between companies'),
'scrap_location': fields.boolean('Is a Scrap Location?', help='Check this box to allow using this location to put scrapped/damaged goods.'),
'return_location': fields.boolean('Is a Return Location?', help='Check this box to allow using this location as a return location.'),
'removal_strategy_id': fields.many2one('product.removal', 'Removal Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to take the products from, which lot etc. for this location. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."),
'putaway_strategy_id': fields.many2one('product.putaway', 'Put Away Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to store the products. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."),
'barcode': fields.char('Barcode', copy=False, oldname='loc_barcode'),
}
_defaults = {
'active': True,
'usage': 'internal',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c),
'posx': 0,
'posy': 0,
'posz': 0,
'scrap_location': False,
}
_sql_constraints = [('barcode_company_uniq', 'unique (barcode,company_id)', 'The barcode for a location must be unique per company !')]
def create(self, cr, uid, default, context=None):
if not default.get('barcode', False):
default.update({'barcode': default.get('complete_name', False)})
return super(stock_location, self).create(cr, uid, default, context=context)
def get_putaway_strategy(self, cr, uid, location, product, context=None):
''' Returns the location where the product has to be put, if any compliant putaway strategy is found. Otherwise returns None.'''
putaway_obj = self.pool.get('product.putaway')
loc = location
while loc:
if loc.putaway_strategy_id:
res = putaway_obj.putaway_apply(cr, uid, loc.putaway_strategy_id, product, context=context)
if res:
return res
loc = loc.location_id
def _default_removal_strategy(self, cr, uid, context=None):
return 'fifo'
def get_removal_strategy(self, cr, uid, qty, move, ops=False, context=None):
''' Returns the removal strategy to consider for the given move/ops
:rtype: char
'''
product = move.product_id
location = move.location_id
if product.categ_id.removal_strategy_id:
return product.categ_id.removal_strategy_id.method
loc = location
while loc:
if loc.removal_strategy_id:
return loc.removal_strategy_id.method
loc = loc.location_id
return self._default_removal_strategy(cr, uid, context=context)
def get_warehouse(self, cr, uid, location, context=None):
"""
Returns warehouse id of warehouse that contains location
:param location: browse record (stock.location)
"""
wh_obj = self.pool.get("stock.warehouse")
whs = wh_obj.search(cr, uid, [('view_location_id.parent_left', '<=', location.parent_left),
('view_location_id.parent_right', '>=', location.parent_left)], context=context)
return whs and whs[0] or False
#----------------------------------------------------------
# Routes
#----------------------------------------------------------
class stock_location_route(osv.osv):
_name = 'stock.location.route'
_description = "Inventory Routes"
_order = 'sequence'
_columns = {
'name': fields.char('Route Name', required=True, translate=True),
'sequence': fields.integer('Sequence'),
'pull_ids': fields.one2many('procurement.rule', 'route_id', 'Procurement Rules', copy=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the route without removing it."),
'push_ids': fields.one2many('stock.location.path', 'route_id', 'Push Rules', copy=True),
'product_selectable': fields.boolean('Applicable on Product', help="When checked, the route will be selectable in the Inventory tab of the Product form. It will take priority over the Warehouse route. "),
'product_categ_selectable': fields.boolean('Applicable on Product Category', help="When checked, the route will be selectable on the Product Category. It will take priority over the Warehouse route. "),
'warehouse_selectable': fields.boolean('Applicable on Warehouse', help="When a warehouse is selected for this route, this route should be seen as the default route when products pass through this warehouse. This behaviour can be overridden by the routes on the Product/Product Categories or by the Preferred Routes on the Procurement"),
'supplied_wh_id': fields.many2one('stock.warehouse', 'Supplied Warehouse'),
'supplier_wh_id': fields.many2one('stock.warehouse', 'Supplying Warehouse'),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Leave this field empty if this route is shared between all companies'),
#Reverse many2many fields:
'product_ids': fields.many2many('product.template', 'stock_route_product', 'route_id', 'product_id', 'Products'),
'categ_ids': fields.many2many('product.category', 'stock_location_route_categ', 'route_id', 'categ_id', 'Product Categories'),
'warehouse_ids': fields.many2many('stock.warehouse', 'stock_route_warehouse', 'route_id', 'warehouse_id', 'Warehouses'),
}
_defaults = {
'sequence': lambda self, cr, uid, ctx: 0,
'active': True,
'product_selectable': True,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location.route', context=c),
}
def write(self, cr, uid, ids, vals, context=None):
'''when a route is deactivated, deactivate also its pull and push rules'''
if isinstance(ids, (int, long)):
ids = [ids]
res = super(stock_location_route, self).write(cr, uid, ids, vals, context=context)
if 'active' in vals:
push_ids = []
pull_ids = []
for route in self.browse(cr, uid, ids, context=context):
if route.push_ids:
push_ids += [r.id for r in route.push_ids if r.active != vals['active']]
if route.pull_ids:
pull_ids += [r.id for r in route.pull_ids if r.active != vals['active']]
if push_ids:
self.pool.get('stock.location.path').write(cr, uid, push_ids, {'active': vals['active']}, context=context)
if pull_ids:
self.pool.get('procurement.rule').write(cr, uid, pull_ids, {'active': vals['active']}, context=context)
return res
def view_product_ids(self, cr, uid, ids, context=None):
return {
'name': _('Products'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'product.template',
'type': 'ir.actions.act_window',
'domain': [('route_ids', 'in', ids[0])],
}
def view_categ_ids(self, cr, uid, ids, context=None):
return {
'name': _('Product Categories'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'product.category',
'type': 'ir.actions.act_window',
'domain': [('route_ids', 'in', ids[0])],
}
#----------------------------------------------------------
# Quants
#----------------------------------------------------------
class stock_quant(osv.osv):
"""
Quants are the smallest unit of stock physical instances
"""
_name = "stock.quant"
_description = "Quants"
def _get_quant_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for q in self.browse(cr, uid, ids, context=context):
res[q.id] = q.product_id.code or ''
if q.lot_id:
res[q.id] = q.lot_id.name
res[q.id] += ': ' + str(q.qty) + q.product_id.uom_id.name
return res
def _calc_inventory_value(self, cr, uid, ids, name, attr, context=None):
context = dict(context or {})
res = {}
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for quant in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if quant.company_id.id != uid_company_id:
#if the company of the quant is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = quant.company_id.id
quant = self.browse(cr, uid, quant.id, context=context)
res[quant.id] = self._get_inventory_value(cr, uid, quant, context=context)
return res
def _get_inventory_value(self, cr, uid, quant, context=None):
return quant.product_id.standard_price * quant.qty
_columns = {
'name': fields.function(_get_quant_name, type='char', string='Identifier'),
'product_id': fields.many2one('product.product', 'Product', required=True, ondelete="restrict", readonly=True, select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="restrict", readonly=True, select=True, auto_join=True),
'qty': fields.float('Quantity', required=True, help="Quantity of products in this quant, in the default unit of measure of the product", readonly=True, select=True),
'product_uom_id': fields.related('product_id', 'uom_id', type='many2one', relation="product.uom", string='Unit of Measure', readonly=True),
'package_id': fields.many2one('stock.quant.package', string='Package', help="The package containing this quant", readonly=True, select=True),
'packaging_type_id': fields.related('package_id', 'packaging_id', type='many2one', relation='product.packaging', string='Type of packaging', readonly=True, store=True),
'reservation_id': fields.many2one('stock.move', 'Reserved for Move', help="The move the quant is reserved for", readonly=True, select=True),
'lot_id': fields.many2one('stock.production.lot', 'Lot', readonly=True, select=True, ondelete="restrict"),
'cost': fields.float('Unit Cost'),
'owner_id': fields.many2one('res.partner', 'Owner', help="This is the owner of the quant", readonly=True, select=True),
'create_date': fields.datetime('Creation Date', readonly=True),
'in_date': fields.datetime('Incoming Date', readonly=True, select=True),
'history_ids': fields.many2many('stock.move', 'stock_quant_move_rel', 'quant_id', 'move_id', 'Moves', help='Moves that operate(d) on this quant', copy=False),
'company_id': fields.many2one('res.company', 'Company', help="The company to which the quants belong", required=True, readonly=True, select=True),
'inventory_value': fields.function(_calc_inventory_value, string="Inventory Value", type='float', readonly=True),
# Used for negative quants to reconcile after compensated by a new positive one
'propagated_from_id': fields.many2one('stock.quant', 'Linked Quant', help='The negative quant this is coming from', readonly=True, select=True),
'negative_move_id': fields.many2one('stock.move', 'Move Negative Quant', help='If this is a negative quant, this will be the move that caused this negative quant.', readonly=True),
'negative_dest_location_id': fields.related('negative_move_id', 'location_dest_id', type='many2one', relation='stock.location', string="Negative Destination Location", readonly=True,
help="Technical field used to record the destination location of a move that created a negative quant"),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.quant', context=c),
}
def init(self, cr):
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_quant_product_location_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX stock_quant_product_location_index ON stock_quant (product_id, location_id, company_id, qty, in_date, reservation_id)')
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
''' Overwrite the read_group in order to sum the function field 'inventory_value' in group by'''
res = super(stock_quant, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
if 'inventory_value' in fields:
for line in res:
if '__domain' in line:
lines = self.search(cr, uid, line['__domain'], context=context)
inv_value = 0.0
for line2 in self.browse(cr, uid, lines, context=context):
inv_value += line2.inventory_value
line['inventory_value'] = inv_value
return res
def action_view_quant_history(self, cr, uid, ids, context=None):
'''
This function returns an action that display the history of the quant, which
mean all the stock moves that lead to this quant creation with this quant quantity.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_move_form2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context={})[0]
move_ids = []
for quant in self.browse(cr, uid, ids, context=context):
move_ids += [move.id for move in quant.history_ids]
result['domain'] = "[('id','in',[" + ','.join(map(str, move_ids)) + "])]"
return result
def quants_reserve(self, cr, uid, quants, move, link=False, context=None):
'''This function reserves quants for the given move (and optionally given link). If the total of quantity reserved is enough, the move's state
is also set to 'assigned'
:param quants: list of tuple(quant browse record or None, qty to reserve). If None is given as first tuple element, the item will be ignored. Negative quants should not be received as argument
:param move: browse record
:param link: browse record (stock.move.operation.link)
'''
toreserve = []
reserved_availability = move.reserved_availability
#split quants if needed
for quant, qty in quants:
if qty <= 0.0 or (quant and quant.qty <= 0.0):
raise UserError(_('You can not reserve a negative quantity or a negative quant.'))
if not quant:
continue
self._quant_split(cr, uid, quant, qty, context=context)
toreserve.append(quant.id)
reserved_availability += quant.qty
#reserve quants
if toreserve:
self.write(cr, SUPERUSER_ID, toreserve, {'reservation_id': move.id}, context=context)
#check if move'state needs to be set as 'assigned'
rounding = move.product_id.uom_id.rounding
if float_compare(reserved_availability, move.product_qty, precision_rounding=rounding) == 0 and move.state in ('confirmed', 'waiting') :
self.pool.get('stock.move').write(cr, uid, [move.id], {'state': 'assigned'}, context=context)
elif float_compare(reserved_availability, 0, precision_rounding=rounding) > 0 and not move.partially_available:
self.pool.get('stock.move').write(cr, uid, [move.id], {'partially_available': True}, context=context)
def quants_move(self, cr, uid, quants, move, location_to, location_from=False, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, entire_pack=False, context=None):
"""Moves all given stock.quant in the given destination location. Unreserve from current move.
:param quants: list of tuple(browse record(stock.quant) or None, quantity to move)
:param move: browse record (stock.move)
:param location_to: browse record (stock.location) depicting where the quants have to be moved
:param location_from: optional browse record (stock.location) explaining where the quant has to be taken (may differ from the move source location in case a removal strategy applied). This parameter is only used to pass to _quant_create if a negative quant must be created
:param lot_id: ID of the lot that must be set on the quants to move
:param owner_id: ID of the partner that must own the quants to move
:param src_package_id: ID of the package that contains the quants to move
:param dest_package_id: ID of the package that must be set on the moved quant
"""
quants_reconcile = []
to_move_quants = []
self._check_location(cr, uid, location_to, context=context)
check_lot = False
for quant, qty in quants:
if not quant:
#If quant is None, we will create a quant to move (and potentially a negative counterpart too)
quant = self._quant_create(cr, uid, qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id, dest_package_id=dest_package_id, force_location_from=location_from, force_location_to=location_to, context=context)
check_lot = True
else:
self._quant_split(cr, uid, quant, qty, context=context)
to_move_quants.append(quant)
quants_reconcile.append(quant)
if to_move_quants:
to_recompute_move_ids = [x.reservation_id.id for x in to_move_quants if x.reservation_id and x.reservation_id.id != move.id]
self.move_quants_write(cr, uid, to_move_quants, move, location_to, dest_package_id, lot_id=lot_id, entire_pack=entire_pack, context=context)
self.pool.get('stock.move').recalculate_move_state(cr, uid, to_recompute_move_ids, context=context)
if location_to.usage == 'internal':
# Do manual search for quant to avoid full table scan (order by id)
cr.execute("""
SELECT 0 FROM stock_quant, stock_location WHERE product_id = %s AND stock_location.id = stock_quant.location_id AND
((stock_location.parent_left >= %s AND stock_location.parent_left < %s) OR stock_location.id = %s) AND qty < 0.0 LIMIT 1
""", (move.product_id.id, location_to.parent_left, location_to.parent_right, location_to.id))
if cr.fetchone():
for quant in quants_reconcile:
self._quant_reconcile_negative(cr, uid, quant, move, context=context)
# In case of serial tracking, check if the product does not exist somewhere internally already
# Checking that a positive quant already exists in an internal location is too restrictive.
# Indeed, if a warehouse is configured with several steps (e.g. "Pick + Pack + Ship") and
# one step is forced (creates a quant of qty = -1.0), it is not possible afterwards to
# correct the inventory unless the product leaves the stock.
picking_type = move.picking_id and move.picking_id.picking_type_id or False
if check_lot and lot_id and move.product_id.tracking == 'serial' and (not picking_type or (picking_type.use_create_lots or picking_type.use_existing_lots)):
other_quants = self.search(cr, uid, [('product_id', '=', move.product_id.id), ('lot_id', '=', lot_id),
('location_id.usage', '=', 'internal')], context=context)
if other_quants:
# We raise an error if:
# - the total quantity is strictly larger than 1.0
# - there are more than one negative quant, to avoid situations where the user would
# force the quantity at several steps of the process
other_quants = self.browse(cr, uid, other_quants, context=context)
if sum(other_quants.mapped('qty')) > 1.0 or len([q for q in other_quants.mapped('qty') if q < 0]) > 1:
lot_name = self.pool['stock.production.lot'].browse(cr, uid, lot_id, context=context).name
raise UserError(_('The serial number %s is already in stock.') % lot_name + _("Otherwise make sure the right stock/owner is set."))
def move_quants_write(self, cr, uid, quants, move, location_dest_id, dest_package_id, lot_id = False, entire_pack=False, context=None):
context=context or {}
vals = {'location_id': location_dest_id.id,
'history_ids': [(4, move.id)],
'reservation_id': False}
if lot_id and any(x.id for x in quants if not x.lot_id.id):
vals['lot_id'] = lot_id
if not entire_pack:
vals.update({'package_id': dest_package_id})
self.write(cr, SUPERUSER_ID, [q.id for q in quants], vals, context=context)
def quants_get_preferred_domain(self, cr, uid, qty, move, ops=False, lot_id=False, domain=None, preferred_domain_list=[], context=None):
''' This function tries to find quants for the given domain and move/ops, by trying to first limit
the choice on the quants that match the first item of preferred_domain_list as well. But if the qty requested is not reached
it tries to find the remaining quantity by looping on the preferred_domain_list (tries with the second item and so on).
Make sure the quants aren't found twice => all the domains of preferred_domain_list should be orthogonal
'''
context = context or {}
domain = domain or [('qty', '>', 0.0)]
domain = list(domain)
quants = [(None, qty)]
if ops:
restrict_lot_id = lot_id
location = ops.location_id
if ops.owner_id:
domain += [('owner_id', '=', ops.owner_id.id)]
if ops.package_id and not ops.product_id:
domain += [('package_id', 'child_of', ops.package_id.id)]
elif ops.package_id and ops.product_id:
domain += [('package_id', '=', ops.package_id.id)]
else:
domain += [('package_id', '=', False)]
domain += [('location_id', '=', ops.location_id.id)]
else:
restrict_lot_id = move.restrict_lot_id.id
location = move.location_id
if move.restrict_partner_id:
domain += [('owner_id', '=', move.restrict_partner_id.id)]
domain += [('location_id', 'child_of', move.location_id.id)]
if context.get('force_company'):
domain += [('company_id', '=', context.get('force_company'))]
else:
domain += [('company_id', '=', move.company_id.id)]
removal_strategy = self.pool.get('stock.location').get_removal_strategy(cr, uid, qty, move, ops=ops, context=context)
product = move.product_id
domain += [('product_id', '=', move.product_id.id)]
#don't look for quants in location that are of type production, supplier or inventory.
if location.usage in ['inventory', 'production', 'supplier']:
return quants
res_qty = qty
if restrict_lot_id:
if not preferred_domain_list:
preferred_domain_list = [[('lot_id', '=', restrict_lot_id)], [('lot_id', '=', False)]]
else:
lot_list = []
no_lot_list = []
for pref_domain in preferred_domain_list:
pref_lot_domain = pref_domain + [('lot_id', '=', restrict_lot_id)]
pref_no_lot_domain = pref_domain + [('lot_id', '=', False)]
lot_list.append(pref_lot_domain)
no_lot_list.append(pref_no_lot_domain)
preferred_domain_list = lot_list + no_lot_list
if not preferred_domain_list:
return self.quants_get(cr, uid, qty, move, ops=ops, domain=domain, removal_strategy=removal_strategy, context=context)
for preferred_domain in preferred_domain_list:
res_qty_cmp = float_compare(res_qty, 0, precision_rounding=product.uom_id.rounding)
if res_qty_cmp > 0:
#try to replace the last tuple (None, res_qty) with something that wasn't chosen at first because of the preferred order
quants.pop()
tmp_quants = self.quants_get(cr, uid, res_qty, move, ops=ops, domain=domain + preferred_domain,
removal_strategy=removal_strategy, context=context)
for quant in tmp_quants:
if quant[0]:
res_qty -= quant[1]
quants += tmp_quants
return quants
def quants_get(self, cr, uid, qty, move, ops=False, domain=None, removal_strategy='fifo', context=None):
"""
Use the removal strategies of product to search for the correct quants
If you inherit, put the super at the end of your method.
:location: browse record of the parent location where the quants have to be found
:product: browse record of the product to find
:qty in UoM of product
"""
domain = domain or [('qty', '>', 0.0)]
return self.apply_removal_strategy(cr, uid, qty, move, ops=ops, domain=domain, removal_strategy=removal_strategy, context=context)
def apply_removal_strategy(self, cr, uid, quantity, move, ops=False, domain=None, removal_strategy='fifo', context=None):
if removal_strategy == 'fifo':
order = 'in_date, id'
return self._quants_get_order(cr, uid, quantity, move, ops=ops, domain=domain, orderby=order, context=context)
elif removal_strategy == 'lifo':
order = 'in_date desc, id desc'
return self._quants_get_order(cr, uid, quantity, move, ops=ops, domain=domain, orderby=order, context=context)
raise UserError(_('Removal strategy %s not implemented.') % (removal_strategy,))
def _quant_create(self, cr, uid, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False,
force_location_from=False, force_location_to=False, context=None):
'''Create a quant in the destination location and create a negative quant in the source location if it's an internal location.
'''
if context is None:
context = {}
price_unit = self.pool.get('stock.move').get_price_unit(cr, uid, move, context=context)
location = force_location_to or move.location_dest_id
rounding = move.product_id.uom_id.rounding
vals = {
'product_id': move.product_id.id,
'location_id': location.id,
'qty': float_round(qty, precision_rounding=rounding),
'cost': price_unit,
'history_ids': [(4, move.id)],
'in_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': move.company_id.id,
'lot_id': lot_id,
'owner_id': owner_id,
'package_id': dest_package_id,
}
if move.location_id.usage == 'internal':
#if we were trying to move something from an internal location and reach here (quant creation),
#it means that a negative quant has to be created as well.
negative_vals = vals.copy()
negative_vals['location_id'] = force_location_from and force_location_from.id or move.location_id.id
negative_vals['qty'] = float_round(-qty, precision_rounding=rounding)
negative_vals['cost'] = price_unit
negative_vals['negative_move_id'] = move.id
negative_vals['package_id'] = src_package_id
negative_quant_id = self.create(cr, SUPERUSER_ID, negative_vals, context=context)
vals.update({'propagated_from_id': negative_quant_id})
picking_type = move.picking_id and move.picking_id.picking_type_id or False
if lot_id and move.product_id.tracking == 'serial' and (not picking_type or (picking_type.use_create_lots or picking_type.use_existing_lots)):
if qty != 1.0:
raise UserError(_('You should only receive by the piece with the same serial number'))
#create the quant as superuser, because we want to restrict the creation of quant manually: we should always use this method to create quants
quant_id = self.create(cr, SUPERUSER_ID, vals, context=context)
return self.browse(cr, uid, quant_id, context=context)
def _quant_split(self, cr, uid, quant, qty, context=None):
context = context or {}
rounding = quant.product_id.uom_id.rounding
if float_compare(abs(quant.qty), abs(qty), precision_rounding=rounding) <= 0: # if quant <= qty in abs, take it entirely
return False
qty_round = float_round(qty, precision_rounding=rounding)
new_qty_round = float_round(quant.qty - qty, precision_rounding=rounding)
# Fetch the history_ids manually as it will not do a join with the stock moves then (=> a lot faster)
cr.execute("""SELECT move_id FROM stock_quant_move_rel WHERE quant_id = %s""", (quant.id,))
res = cr.fetchall()
new_quant = self.copy(cr, SUPERUSER_ID, quant.id, default={'qty': new_qty_round, 'history_ids': [(4, x[0]) for x in res]}, context=context)
self.write(cr, SUPERUSER_ID, quant.id, {'qty': qty_round}, context=context)
return self.browse(cr, uid, new_quant, context=context)
def _get_latest_move(self, cr, uid, quant, context=None):
move = False
for m in quant.history_ids:
if not move or m.date > move.date:
move = m
return move
@api.cr_uid_ids_context
def _quants_merge(self, cr, uid, solved_quant_ids, solving_quant, context=None):
path = []
for move in solving_quant.history_ids:
path.append((4, move.id))
self.write(cr, SUPERUSER_ID, solved_quant_ids, {'history_ids': path}, context=context)
def _search_quants_to_reconcile(self, cr, uid, quant, context=None):
"""
Searches negative quants to reconcile for where the quant to reconcile is put
"""
dom = [('qty', '<', 0)]
order = 'in_date'
dom += [('location_id', 'child_of', quant.location_id.id), ('product_id', '=', quant.product_id.id),
('owner_id', '=', quant.owner_id.id)]
if quant.package_id.id:
dom += [('package_id', '=', quant.package_id.id)]
if quant.lot_id:
dom += ['|', ('lot_id', '=', False), ('lot_id', '=', quant.lot_id.id)]
order = 'lot_id, in_date'
# Do not let the quant eat itself, or it will kill its history (e.g. returns / Stock -> Stock)
dom += [('id', '!=', quant.propagated_from_id.id)]
quants_search = self.search(cr, uid, dom, order=order, context=context)
product = quant.product_id
quants = []
quantity = quant.qty
for quant in self.browse(cr, uid, quants_search, context=context):
rounding = product.uom_id.rounding
if float_compare(quantity, abs(quant.qty), precision_rounding=rounding) >= 0:
quants += [(quant, abs(quant.qty))]
quantity -= abs(quant.qty)
elif float_compare(quantity, 0.0, precision_rounding=rounding) != 0:
quants += [(quant, quantity)]
quantity = 0
break
return quants
def _quant_reconcile_negative(self, cr, uid, quant, move, context=None):
"""
When new quant arrive in a location, try to reconcile it with
negative quants. If it's possible, apply the cost of the new
quant to the counterpart of the negative quant.
"""
context = context or {}
context = dict(context)
context.update({'force_unlink': True})
solving_quant = quant
quants = self._search_quants_to_reconcile(cr, uid, quant, context=context)
product_uom_rounding = quant.product_id.uom_id.rounding
for quant_neg, qty in quants:
if not quant_neg or not solving_quant:
continue
to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id)], context=context)
if not to_solve_quant_ids:
continue
solving_qty = qty
solved_quant_ids = []
for to_solve_quant in self.browse(cr, uid, to_solve_quant_ids, context=context):
if float_compare(solving_qty, 0, precision_rounding=product_uom_rounding) <= 0:
continue
solved_quant_ids.append(to_solve_quant.id)
self._quant_split(cr, uid, to_solve_quant, min(solving_qty, to_solve_quant.qty), context=context)
solving_qty -= min(solving_qty, to_solve_quant.qty)
remaining_solving_quant = self._quant_split(cr, uid, solving_quant, qty, context=context)
remaining_neg_quant = self._quant_split(cr, uid, quant_neg, -qty, context=context)
#if the reconciliation was not complete, we need to link together the remaining parts
if remaining_neg_quant:
remaining_to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id), ('id', 'not in', solved_quant_ids)], context=context)
if remaining_to_solve_quant_ids:
self.write(cr, SUPERUSER_ID, remaining_to_solve_quant_ids, {'propagated_from_id': remaining_neg_quant.id}, context=context)
if solving_quant.propagated_from_id and solved_quant_ids:
self.write(cr, SUPERUSER_ID, solved_quant_ids, {'propagated_from_id': solving_quant.propagated_from_id.id}, context=context)
#delete the reconciled quants, as it is replaced by the solved quants
self.unlink(cr, SUPERUSER_ID, [quant_neg.id], context=context)
if solved_quant_ids:
#price update + accounting entries adjustments
self._price_update(cr, uid, solved_quant_ids, solving_quant.cost, context=context)
#merge history (and cost?)
self._quants_merge(cr, uid, solved_quant_ids, solving_quant, context=context)
self.unlink(cr, SUPERUSER_ID, [solving_quant.id], context=context)
solving_quant = remaining_solving_quant
def _price_update(self, cr, uid, ids, newprice, context=None):
self.write(cr, SUPERUSER_ID, ids, {'cost': newprice}, context=context)
def quants_unreserve(self, cr, uid, move, context=None):
related_quants = [x.id for x in move.reserved_quant_ids]
if related_quants:
#if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed
if move.partially_available:
self.pool.get("stock.move").write(cr, uid, [move.id], {'partially_available': False}, context=context)
self.write(cr, SUPERUSER_ID, related_quants, {'reservation_id': False}, context=context)
def _quants_get_order(self, cr, uid, quantity, move, ops=False, domain=[], orderby='in_date', context=None):
''' Implementation of removal strategies
If it can not reserve, it will return a tuple (None, qty)
'''
if context is None:
context = {}
product = move.product_id
res = []
offset = 0
while float_compare(quantity, 0, precision_rounding=product.uom_id.rounding) > 0:
quants = self.search(cr, uid, domain, order=orderby, limit=10, offset=offset, context=context)
if not quants:
res.append((None, quantity))
break
for quant in self.browse(cr, uid, quants, context=context):
rounding = product.uom_id.rounding
if float_compare(quantity, abs(quant.qty), precision_rounding=rounding) >= 0:
res += [(quant, abs(quant.qty))]
quantity -= abs(quant.qty)
elif float_compare(quantity, 0.0, precision_rounding=rounding) != 0:
res += [(quant, quantity)]
quantity = 0
break
offset += 10
return res
def _check_location(self, cr, uid, location, context=None):
if location.usage == 'view':
raise UserError(_('You cannot move to a location of type view %s.') % (location.name))
return True
def unlink(self, cr, uid, ids, context=None):
context = context or {}
if not context.get('force_unlink'):
raise UserError(_('Under no circumstances should you delete or change quants yourselves!'))
super(stock_quant, self).unlink(cr, uid, ids, context=context)
#----------------------------------------------------------
# Stock Picking
#----------------------------------------------------------
class stock_picking(models.Model):
_name = "stock.picking"
_inherit = ['mail.thread']
_description = "Transfer"
_order = "priority desc, date asc, id desc"
def _set_min_date(self, cr, uid, id, field, value, arg, context=None):
move_obj = self.pool.get("stock.move")
if value:
move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines]
move_obj.write(cr, uid, move_ids, {'date_expected': value}, context=context)
def _set_priority(self, cr, uid, id, field, value, arg, context=None):
move_obj = self.pool.get("stock.move")
if value:
move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines]
move_obj.write(cr, uid, move_ids, {'priority': value}, context=context)
def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None):
""" Finds minimum and maximum dates for picking.
@return: Dictionary of values
"""
res = {}
for id in ids:
res[id] = {'min_date': False, 'max_date': False, 'priority': '1'}
if not ids:
return res
cr.execute("""select
picking_id,
min(date_expected),
max(date_expected),
max(priority)
from
stock_move
where
picking_id IN %s
group by
picking_id""", (tuple(ids),))
for pick, dt1, dt2, prio in cr.fetchall():
res[pick]['min_date'] = dt1
res[pick]['max_date'] = dt2
res[pick]['priority'] = prio
return res
def create(self, cr, user, vals, context=None):
context = context or {}
if ('name' not in vals) or (vals.get('name') in ('/', False)):
ptype_id = vals.get('picking_type_id', context.get('default_picking_type_id', False))
sequence_id = self.pool.get('stock.picking.type').browse(cr, user, ptype_id, context=context).sequence_id.id
vals['name'] = self.pool.get('ir.sequence').next_by_id(cr, user, sequence_id, context=context)
# As the on_change in one2many list is WIP, we will overwrite the locations on the stock moves here
# As it is a create the format will be a list of (0, 0, dict)
if vals.get('move_lines') and vals.get('location_id') and vals.get('location_dest_id'):
for move in vals['move_lines']:
if len(move) == 3:
move[2]['location_id'] = vals['location_id']
move[2]['location_dest_id'] = vals['location_dest_id']
return super(stock_picking, self).create(cr, user, vals, context)
def write(self, cr, uid, ids, vals, context=None):
res = super(stock_picking, self).write(cr, uid, ids, vals, context=context)
after_vals = {}
if vals.get('location_id'):
after_vals['location_id'] = vals['location_id']
if vals.get('location_dest_id'):
after_vals['location_dest_id'] = vals['location_dest_id']
# Change locations of moves if those of the picking change
if after_vals:
moves = []
for pick in self.browse(cr, uid, ids, context=context):
moves += [x.id for x in pick.move_lines if not x.scrapped]
if moves:
self.pool['stock.move'].write(cr, uid, moves, after_vals, context=context)
return res
def _state_get(self, cr, uid, ids, field_name, arg, context=None):
'''The state of a picking depends on the state of its related stock.move
draft: the picking has no line or any one of the lines is draft
done, draft, cancel: all lines are done / draft / cancel
confirmed, waiting, assigned, partially_available depends on move_type (all at once or partial)
'''
res = {}
for pick in self.browse(cr, uid, ids, context=context):
if not pick.move_lines:
res[pick.id] = pick.launch_pack_operations and 'assigned' or 'draft'
continue
if any([x.state == 'draft' for x in pick.move_lines]):
res[pick.id] = 'draft'
continue
if all([x.state == 'cancel' for x in pick.move_lines]):
res[pick.id] = 'cancel'
continue
if all([x.state in ('cancel', 'done') for x in pick.move_lines]):
res[pick.id] = 'done'
continue
order = {'confirmed': 0, 'waiting': 1, 'assigned': 2}
order_inv = {0: 'confirmed', 1: 'waiting', 2: 'assigned'}
lst = [order[x.state] for x in pick.move_lines if x.state not in ('cancel', 'done')]
if pick.move_type == 'one':
res[pick.id] = order_inv[min(lst)]
else:
#we are in the case of partial delivery, so if all move are assigned, picking
#should be assign too, else if one of the move is assigned, or partially available, picking should be
#in partially available state, otherwise, picking is in waiting or confirmed state
res[pick.id] = order_inv[max(lst)]
if not all(x == 2 for x in lst):
if any(x == 2 for x in lst):
res[pick.id] = 'partially_available'
else:
#if all moves aren't assigned, check if we have one product partially available
for move in pick.move_lines:
if move.partially_available:
res[pick.id] = 'partially_available'
break
return res
def _get_pickings(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.picking_id:
res.add(move.picking_id.id)
return list(res)
def _get_pickings_dates_priority(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.picking_id and (not (move.picking_id.min_date < move.date_expected < move.picking_id.max_date) or move.priority > move.picking_id.priority):
res.add(move.picking_id.id)
return list(res)
def _get_pack_operation_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pick in self.browse(cr, uid, ids, context=context):
res[pick.id] = False
if pick.pack_operation_ids:
res[pick.id] = True
return res
def _get_quant_reserved_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pick in self.browse(cr, uid, ids, context=context):
res[pick.id] = False
for move in pick.move_lines:
if move.reserved_quant_ids:
res[pick.id] = True
continue
return res
def action_assign_owner(self, cr, uid, ids, context=None):
for picking in self.browse(cr, uid, ids, context=context):
packop_ids = [op.id for op in picking.pack_operation_ids]
self.pool.get('stock.pack.operation').write(cr, uid, packop_ids, {'owner_id': picking.owner_id.id}, context=context)
def onchange_picking_type(self, cr, uid, ids, picking_type_id, partner_id, context=None):
res = {}
if picking_type_id:
picking_type = self.pool['stock.picking.type'].browse(cr, uid, picking_type_id, context=context)
if not picking_type.default_location_src_id:
if partner_id:
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context)
location_id = partner.property_stock_supplier.id
else:
customerloc, supplierloc = self.pool['stock.warehouse']._get_partner_locations(cr, uid, [], context=context)
location_id = supplierloc.id
else:
location_id = picking_type.default_location_src_id.id
if not picking_type.default_location_dest_id:
if partner_id:
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context)
location_dest_id = partner.property_stock_customer.id
else:
customerloc, supplierloc = self.pool['stock.warehouse']._get_partner_locations(cr, uid, [], context=context)
location_dest_id = customerloc.id
else:
location_dest_id = picking_type.default_location_dest_id.id
res['value'] = {'location_id': location_id,
'location_dest_id': location_dest_id,
'picking_type_code': picking_type.code,}
return res
def _default_location_destination(self):
# retrieve picking type from context; if none this returns an empty recordset
picking_type_id = self._context.get('default_picking_type_id')
picking_type = self.env['stock.picking.type'].browse(picking_type_id)
return picking_type.default_location_dest_id
def _default_location_source(self):
# retrieve picking type from context; if none this returns an empty recordset
picking_type_id = self._context.get('default_picking_type_id')
picking_type = self.env['stock.picking.type'].browse(picking_type_id)
return picking_type.default_location_src_id
@api.model
def default_get(self, fields):
res = super(stock_picking, self).default_get(fields)
if self._context.get('default_picking_type_id') and 'picking_type_id' in fields:
picking_type = self.env['stock.picking.type'].browse(res['picking_type_id'])
res['picking_type_code'] = picking_type.code
return res
_columns = {
'name': fields.char('Reference', select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False),
'origin': fields.char('Source Document', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Reference of the document", select=True),
'backorder_id': fields.many2one('stock.picking', 'Back Order of', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True, copy=False),
'note': fields.text('Notes'),
'move_type': fields.selection([('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="It specifies goods to be deliver partially or all at once"),
'state': fields.function(_state_get, type="selection", copy=False,
store={
'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_type', 'launch_pack_operations'], 20),
'stock.move': (_get_pickings, ['state', 'picking_id', 'partially_available'], 20)},
selection=[
('draft', 'Draft'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('partially_available', 'Partially Available'),
('assigned', 'Available'),
('done', 'Done'),
], string='Status', readonly=True, select=True, track_visibility='onchange',
help="""
* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Partially Available: some products are available and reserved\n
* Ready to Transfer: products reserved, simply waiting for confirmation.\n
* Transferred: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""
),
'location_id': fields.many2one('stock.location', required=True, string="Source Location Zone",
default=_default_location_source, readonly=True, states={'draft': [('readonly', False)]}),
'location_dest_id': fields.many2one('stock.location', required=True,string="Destination Location Zone",
default=_default_location_destination, readonly=True, states={'draft': [('readonly', False)]}),
'move_lines': fields.one2many('stock.move', 'picking_id', string="Stock Moves", copy=True),
'move_lines_related': fields.related('move_lines', type='one2many', relation='stock.move', string="Move Lines"),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, required=True),
'picking_type_code': fields.related('picking_type_id', 'code', type='selection', selection=[('incoming', 'Suppliers'), ('outgoing', 'Customers'), ('internal', 'Internal')]),
'picking_type_entire_packs': fields.related('picking_type_id', 'show_entire_packs', type='boolean'),
'priority': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_priority, type='selection', selection=procurement.PROCUREMENT_PRIORITIES, string='Priority',
store={'stock.move': (_get_pickings_dates_priority, ['priority', 'picking_id'], 20)}, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, select=1, help="Priority for this picking. Setting manually a value here would set it as priority for all the moves",
track_visibility='onchange', required=True),
'min_date': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_min_date,
store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Scheduled Date', select=1, help="Scheduled time for the first part of the shipment to be processed. Setting manually a value here would set it as expected date for all the stock moves.", track_visibility='onchange'),
'max_date': fields.function(get_min_max_date, multi="min_max_date",
store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', string='Max. Expected Date', select=2, help="Scheduled time for the last part of the shipment to be processed"),
'date': fields.datetime('Creation Date', help="Creation Date, usually the time of the order", select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, track_visibility='onchange'),
'date_done': fields.datetime('Date of Transfer', help="Completion Date of Transfer", readonly=True, copy=False),
'quant_reserved_exist': fields.function(_get_quant_reserved_exist, type='boolean', string='Has quants already reserved', help='Check the existance of quants linked to this picking'),
'partner_id': fields.many2one('res.partner', 'Partner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'pack_operation_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Related Packing Operations'),
'pack_operation_product_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, domain=[('product_id', '!=', False)], string='Non pack'),
'pack_operation_pack_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, domain=[('product_id', '=', False)], string='Pack'),
# technical field for attrs in view
'pack_operation_exist': fields.function(_get_pack_operation_exist, type='boolean', string='Has Pack Operations', help='Check the existance of pack operation on the picking'),
'owner_id': fields.many2one('res.partner', 'Owner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Default Owner"),
'printed': fields.boolean('Printed'),
# Used to search on pickings
'product_id': fields.related('move_lines', 'product_id', type='many2one', relation='product.product', string='Product'),
'recompute_pack_op': fields.boolean('Recompute pack operation?', help='True if reserved quants changed, which mean we might need to recompute the package operations', copy=False),
'group_id': fields.related('move_lines', 'group_id', type='many2one', relation='procurement.group', string='Procurement Group', readonly=True,
store={
'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_lines'], 10),
'stock.move': (_get_pickings, ['group_id', 'picking_id'], 10),
}),
'launch_pack_operations': fields.boolean("Launch Pack Operations", copy=False),
}
_defaults = {
'name': '/',
'state': 'draft',
'move_type': 'direct',
'printed': False,
'priority': '1', # normal
'date': fields.datetime.now,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c),
'recompute_pack_op': False,
'launch_pack_operations': False,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per company!'),
]
def do_print_picking(self, cr, uid, ids, context=None):
'''This function prints the picking list'''
context = dict(context or {}, active_ids=ids)
self.write(cr, uid, ids, {'printed': True}, context=context)
return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_picking', context=context)
def launch_packops(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'launch_pack_operations': True}, context=context)
def action_confirm(self, cr, uid, ids, context=None):
todo = []
todo_force_assign = []
for picking in self.browse(cr, uid, ids, context=context):
if not picking.move_lines:
self.launch_packops(cr, uid, [picking.id], context=context)
if picking.location_id.usage in ('supplier', 'inventory', 'production'):
todo_force_assign.append(picking.id)
for r in picking.move_lines:
if r.state == 'draft':
todo.append(r.id)
if len(todo):
self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context)
if todo_force_assign:
self.force_assign(cr, uid, todo_force_assign, context=context)
return True
def action_assign(self, cr, uid, ids, context=None):
""" Check availability of picking moves.
This has the effect of changing the state and reserve quants on available moves, and may
also impact the state of the picking as it is computed based on move's states.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
if pick.state == 'draft':
self.action_confirm(cr, uid, [pick.id], context=context)
#skip the moves that don't need to be checked
move_ids = [x.id for x in pick.move_lines if x.state not in ('draft', 'cancel', 'done')]
if not move_ids:
raise UserError(_('Nothing to check the availability for.'))
self.pool.get('stock.move').action_assign(cr, uid, move_ids, context=context)
return True
def force_assign(self, cr, uid, ids, context=None):
""" Changes state of picking to available if moves are confirmed or waiting.
@return: True
"""
pickings = self.browse(cr, uid, ids, context=context)
for pick in pickings:
move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed', 'waiting']]
self.pool.get('stock.move').force_assign(cr, uid, move_ids, context=context)
return True
def action_cancel(self, cr, uid, ids, context=None):
for pick in self.browse(cr, uid, ids, context=context):
ids2 = [move.id for move in pick.move_lines]
self.pool.get('stock.move').action_cancel(cr, uid, ids2, context)
return True
def action_done(self, cr, uid, ids, context=None):
"""Changes picking state to done by processing the Stock Moves of the Picking
Normally that happens when the button "Done" is pressed on a Picking view.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
todo = []
for move in pick.move_lines:
if move.state == 'draft':
todo.extend(self.pool.get('stock.move').action_confirm(cr, uid, [move.id], context=context))
elif move.state in ('assigned', 'confirmed'):
todo.append(move.id)
if len(todo):
self.pool.get('stock.move').action_done(cr, uid, todo, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
#on picking deletion, cancel its move then unlink them too
move_obj = self.pool.get('stock.move')
context = context or {}
for pick in self.browse(cr, uid, ids, context=context):
move_ids = [move.id for move in pick.move_lines]
move_obj.action_cancel(cr, uid, move_ids, context=context)
move_obj.unlink(cr, uid, move_ids, context=context)
return super(stock_picking, self).unlink(cr, uid, ids, context=context)
def _create_backorder(self, cr, uid, picking, backorder_moves=[], context=None):
""" Move all non-done lines into a new backorder picking. If the key 'do_only_split' is given in the context, then move all lines not in context.get('split', []) instead of all non-done lines.
"""
if not backorder_moves:
backorder_moves = picking.move_lines
backorder_move_ids = [x.id for x in backorder_moves if x.state not in ('done', 'cancel')]
if 'do_only_split' in context and context['do_only_split']:
backorder_move_ids = [x.id for x in backorder_moves if x.id not in context.get('split', [])]
if backorder_move_ids:
backorder_id = self.copy(cr, uid, picking.id, {
'name': '/',
'move_lines': [],
'pack_operation_ids': [],
'backorder_id': picking.id,
})
backorder = self.browse(cr, uid, backorder_id, context=context)
self.message_post(cr, uid, picking.id, body=_("Back order <em>%s</em> <b>created</b>.") % (backorder.name), context=context)
move_obj = self.pool.get("stock.move")
move_obj.write(cr, uid, backorder_move_ids, {'picking_id': backorder_id}, context=context)
if not picking.date_done:
self.write(cr, uid, [picking.id], {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
self.action_confirm(cr, uid, [backorder_id], context=context)
self.action_assign(cr, uid, [backorder_id], context=context)
return backorder_id
return False
@api.cr_uid_ids_context
def recheck_availability(self, cr, uid, picking_ids, context=None):
self.action_assign(cr, uid, picking_ids, context=context)
self.do_prepare_partial(cr, uid, picking_ids, context=context)
def _get_top_level_packages(self, cr, uid, quants_suggested_locations, context=None):
"""This method searches for the higher level packages that can be moved as a single operation, given a list of quants
to move and their suggested destination, and returns the list of matching packages.
"""
# Try to find as much as possible top-level packages that can be moved
pack_obj = self.pool.get("stock.quant.package")
quant_obj = self.pool.get("stock.quant")
top_lvl_packages = set()
quants_to_compare = quants_suggested_locations.keys()
for pack in list(set([x.package_id for x in quants_suggested_locations.keys() if x and x.package_id])):
loop = True
test_pack = pack
good_pack = False
pack_destination = False
while loop:
pack_quants = pack_obj.get_content(cr, uid, [test_pack.id], context=context)
all_in = True
for quant in quant_obj.browse(cr, uid, pack_quants, context=context):
# If the quant is not in the quants to compare and not in the common location
if not quant in quants_to_compare:
all_in = False
break
else:
#if putaway strat apply, the destination location of each quant may be different (and thus the package should not be taken as a single operation)
if not pack_destination:
pack_destination = quants_suggested_locations[quant]
elif pack_destination != quants_suggested_locations[quant]:
all_in = False
break
if all_in:
good_pack = test_pack
if test_pack.parent_id:
test_pack = test_pack.parent_id
else:
#stop the loop when there's no parent package anymore
loop = False
else:
#stop the loop when the package test_pack is not totally reserved for moves of this picking
#(some quants may be reserved for other picking or not reserved at all)
loop = False
if good_pack:
top_lvl_packages.add(good_pack)
return list(top_lvl_packages)
def _prepare_pack_ops(self, cr, uid, picking, quants, forced_qties, context=None):
""" returns a list of dict, ready to be used in create() of stock.pack.operation.
:param picking: browse record (stock.picking)
:param quants: browse record list (stock.quant). List of quants associated to the picking
:param forced_qties: dictionary showing for each product (keys) its corresponding quantity (value) that is not covered by the quants associated to the picking
"""
def _picking_putaway_apply(product):
location = False
# Search putaway strategy
if product_putaway_strats.get(product.id):
location = product_putaway_strats[product.id]
else:
location = self.pool.get('stock.location').get_putaway_strategy(cr, uid, picking.location_dest_id, product, context=context)
product_putaway_strats[product.id] = location
return location or picking.location_dest_id.id
# If we encounter an UoM that is smaller than the default UoM or the one already chosen, use the new one instead.
product_uom = {} # Determines UoM used in pack operations
location_dest_id = None
location_id = None
for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]:
if not product_uom.get(move.product_id.id):
product_uom[move.product_id.id] = move.product_id.uom_id
if move.product_uom.id != move.product_id.uom_id.id and move.product_uom.factor > product_uom[move.product_id.id].factor:
product_uom[move.product_id.id] = move.product_uom
if not move.scrapped:
if location_dest_id and move.location_dest_id.id != location_dest_id:
raise UserError(_('The destination location must be the same for all the moves of the picking.'))
location_dest_id = move.location_dest_id.id
if location_id and move.location_id.id != location_id:
raise UserError(_('The source location must be the same for all the moves of the picking.'))
location_id = move.location_id.id
pack_obj = self.pool.get("stock.quant.package")
quant_obj = self.pool.get("stock.quant")
vals = []
qtys_grouped = {}
lots_grouped = {}
#for each quant of the picking, find the suggested location
quants_suggested_locations = {}
product_putaway_strats = {}
for quant in quants:
if quant.qty <= 0:
continue
suggested_location_id = _picking_putaway_apply(quant.product_id)
quants_suggested_locations[quant] = suggested_location_id
#find the packages we can movei as a whole
top_lvl_packages = self._get_top_level_packages(cr, uid, quants_suggested_locations, context=context)
# and then create pack operations for the top-level packages found
for pack in top_lvl_packages:
pack_quant_ids = pack_obj.get_content(cr, uid, [pack.id], context=context)
pack_quants = quant_obj.browse(cr, uid, pack_quant_ids, context=context)
vals.append({
'picking_id': picking.id,
'package_id': pack.id,
'product_qty': 1.0,
'location_id': pack.location_id.id,
'location_dest_id': quants_suggested_locations[pack_quants[0]],
'owner_id': pack.owner_id.id,
})
#remove the quants inside the package so that they are excluded from the rest of the computation
for quant in pack_quants:
del quants_suggested_locations[quant]
# Go through all remaining reserved quants and group by product, package, owner, source location and dest location
# Lots will go into pack operation lot object
for quant, dest_location_id in quants_suggested_locations.items():
key = (quant.product_id.id, quant.package_id.id, quant.owner_id.id, quant.location_id.id, dest_location_id)
if qtys_grouped.get(key):
qtys_grouped[key] += quant.qty
else:
qtys_grouped[key] = quant.qty
if quant.product_id.tracking != 'none' and quant.lot_id:
lots_grouped.setdefault(key, {}).setdefault(quant.lot_id.id, 0.0)
lots_grouped[key][quant.lot_id.id] += quant.qty
# Do the same for the forced quantities (in cases of force_assign or incomming shipment for example)
for product, qty in forced_qties.items():
if qty <= 0:
continue
suggested_location_id = _picking_putaway_apply(product)
key = (product.id, False, picking.owner_id.id, picking.location_id.id, suggested_location_id)
if qtys_grouped.get(key):
qtys_grouped[key] += qty
else:
qtys_grouped[key] = qty
# Create the necessary operations for the grouped quants and remaining qtys
uom_obj = self.pool.get('product.uom')
prevals = {}
for key, qty in qtys_grouped.items():
product = self.pool.get("product.product").browse(cr, uid, key[0], context=context)
uom_id = product.uom_id.id
qty_uom = qty
if product_uom.get(key[0]):
uom_id = product_uom[key[0]].id
qty_uom = uom_obj._compute_qty(cr, uid, product.uom_id.id, qty, uom_id)
pack_lot_ids = []
if lots_grouped.get(key):
for lot in lots_grouped[key].keys():
pack_lot_ids += [(0, 0, {'lot_id': lot, 'qty': 0.0, 'qty_todo': lots_grouped[key][lot]})]
val_dict = {
'picking_id': picking.id,
'product_qty': qty_uom,
'product_id': key[0],
'package_id': key[1],
'owner_id': key[2],
'location_id': key[3],
'location_dest_id': key[4],
'product_uom_id': uom_id,
'pack_lot_ids': pack_lot_ids,
}
if key[0] in prevals:
prevals[key[0]].append(val_dict)
else:
prevals[key[0]] = [val_dict]
# prevals var holds the operations in order to create them in the same order than the picking stock moves if possible
processed_products = set()
for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]:
if move.product_id.id not in processed_products:
vals += prevals.get(move.product_id.id, [])
processed_products.add(move.product_id.id)
return vals
@api.cr_uid_ids_context
def do_prepare_partial(self, cr, uid, picking_ids, context=None):
context = context or {}
pack_operation_obj = self.pool.get('stock.pack.operation')
#get list of existing operations and delete them
existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', picking_ids)], context=context)
if existing_package_ids:
pack_operation_obj.unlink(cr, uid, existing_package_ids, context)
for picking in self.browse(cr, uid, picking_ids, context=context):
forced_qties = {} # Quantity remaining after calculating reserved quants
picking_quants = []
#Calculate packages, reserved quants, qtys of this picking's moves
for move in picking.move_lines:
if move.state not in ('assigned', 'confirmed', 'waiting'):
continue
move_quants = move.reserved_quant_ids
picking_quants += move_quants
forced_qty = (move.state == 'assigned') and move.product_qty - sum([x.qty for x in move_quants]) or 0
#if we used force_assign() on the move, or if the move is incoming, forced_qty > 0
if float_compare(forced_qty, 0, precision_rounding=move.product_id.uom_id.rounding) > 0:
if forced_qties.get(move.product_id):
forced_qties[move.product_id] += forced_qty
else:
forced_qties[move.product_id] = forced_qty
for vals in self._prepare_pack_ops(cr, uid, picking, picking_quants, forced_qties, context=context):
vals['fresh_record'] = False
pack_operation_obj.create(cr, uid, vals, context=context)
#recompute the remaining quantities all at once
self.do_recompute_remaining_quantities(cr, uid, picking_ids, context=context)
self.write(cr, uid, picking_ids, {'recompute_pack_op': False}, context=context)
@api.cr_uid_ids_context
def do_unreserve(self, cr, uid, picking_ids, context=None):
"""
Will remove all quants for picking in picking_ids
"""
moves_to_unreserve = []
pack_line_to_unreserve = []
for picking in self.browse(cr, uid, picking_ids, context=context):
moves_to_unreserve += [m.id for m in picking.move_lines if m.state not in ('done', 'cancel')]
pack_line_to_unreserve += [p.id for p in picking.pack_operation_ids]
if moves_to_unreserve:
if pack_line_to_unreserve:
self.pool.get('stock.pack.operation').unlink(cr, uid, pack_line_to_unreserve, context=context)
self.pool.get('stock.move').do_unreserve(cr, uid, moves_to_unreserve, context=context)
def recompute_remaining_qty(self, cr, uid, picking, done_qtys=False, context=None):
def _create_link_for_index(operation_id, index, product_id, qty_to_assign, quant_id=False):
move_dict = prod2move_ids[product_id][index]
qty_on_link = min(move_dict['remaining_qty'], qty_to_assign)
self.pool.get('stock.move.operation.link').create(cr, uid, {'move_id': move_dict['move'].id, 'operation_id': operation_id, 'qty': qty_on_link, 'reserved_quant_id': quant_id}, context=context)
if move_dict['remaining_qty'] == qty_on_link:
prod2move_ids[product_id].pop(index)
else:
move_dict['remaining_qty'] -= qty_on_link
return qty_on_link
def _create_link_for_quant(operation_id, quant, qty):
"""create a link for given operation and reserved move of given quant, for the max quantity possible, and returns this quantity"""
if not quant.reservation_id.id:
return _create_link_for_product(operation_id, quant.product_id.id, qty)
qty_on_link = 0
for i in range(0, len(prod2move_ids[quant.product_id.id])):
if prod2move_ids[quant.product_id.id][i]['move'].id != quant.reservation_id.id:
continue
qty_on_link = _create_link_for_index(operation_id, i, quant.product_id.id, qty, quant_id=quant.id)
break
return qty_on_link
def _create_link_for_product(operation_id, product_id, qty):
'''method that creates the link between a given operation and move(s) of given product, for the given quantity.
Returns True if it was possible to create links for the requested quantity (False if there was not enough quantity on stock moves)'''
qty_to_assign = qty
prod_obj = self.pool.get("product.product")
product = prod_obj.browse(cr, uid, product_id)
rounding = product.uom_id.rounding
qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding)
if prod2move_ids.get(product_id):
while prod2move_ids[product_id] and qtyassign_cmp > 0:
qty_on_link = _create_link_for_index(operation_id, 0, product_id, qty_to_assign, quant_id=False)
qty_to_assign -= qty_on_link
qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding)
return qtyassign_cmp == 0
uom_obj = self.pool.get('product.uom')
package_obj = self.pool.get('stock.quant.package')
quant_obj = self.pool.get('stock.quant')
link_obj = self.pool.get('stock.move.operation.link')
quants_in_package_done = set()
prod2move_ids = {}
still_to_do = []
#make a dictionary giving for each product, the moves and related quantity that can be used in operation links
moves = sorted([x for x in picking.move_lines if x.state not in ('done', 'cancel')], key=lambda x: (((x.state == 'assigned') and -2 or 0) + (x.partially_available and -1 or 0)))
for move in moves:
if not prod2move_ids.get(move.product_id.id):
prod2move_ids[move.product_id.id] = [{'move': move, 'remaining_qty': move.product_qty}]
else:
prod2move_ids[move.product_id.id].append({'move': move, 'remaining_qty': move.product_qty})
need_rereserve = False
#sort the operations in order to give higher priority to those with a package, then a serial number
operations = picking.pack_operation_ids
operations = sorted(operations, key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.pack_lot_ids and -1 or 0))
#delete existing operations to start again from scratch
links = link_obj.search(cr, uid, [('operation_id', 'in', [x.id for x in operations])], context=context)
if links:
link_obj.unlink(cr, uid, links, context=context)
#1) first, try to create links when quants can be identified without any doubt
for ops in operations:
lot_qty = {}
for packlot in ops.pack_lot_ids:
lot_qty[packlot.lot_id.id] = uom_obj._compute_qty(cr, uid, ops.product_uom_id.id, packlot.qty, ops.product_id.uom_id.id)
#for each operation, create the links with the stock move by seeking on the matching reserved quants,
#and deffer the operation if there is some ambiguity on the move to select
if ops.package_id and not ops.product_id and (not done_qtys or ops.qty_done):
#entire package
quant_ids = package_obj.get_content(cr, uid, [ops.package_id.id], context=context)
for quant in quant_obj.browse(cr, uid, quant_ids, context=context):
remaining_qty_on_quant = quant.qty
if quant.reservation_id:
#avoid quants being counted twice
quants_in_package_done.add(quant.id)
qty_on_link = _create_link_for_quant(ops.id, quant, quant.qty)
remaining_qty_on_quant -= qty_on_link
if remaining_qty_on_quant:
still_to_do.append((ops, quant.product_id.id, remaining_qty_on_quant))
need_rereserve = True
elif ops.product_id.id:
#Check moves with same product
product_qty = ops.qty_done if done_qtys else ops.product_qty
qty_to_assign = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, product_qty, ops.product_id.uom_id, context=context)
precision_rounding = ops.product_id.uom_id.rounding
for move_dict in prod2move_ids.get(ops.product_id.id, []):
move = move_dict['move']
for quant in move.reserved_quant_ids:
if float_compare(qty_to_assign, 0, precision_rounding=precision_rounding) != 1:
break
if quant.id in quants_in_package_done:
continue
#check if the quant is matching the operation details
if ops.package_id:
flag = quant.package_id == ops.package_id
else:
flag = not quant.package_id.id
flag = flag and (ops.owner_id.id == quant.owner_id.id)
if flag:
if not lot_qty:
max_qty_on_link = min(quant.qty, qty_to_assign)
qty_on_link = _create_link_for_quant(ops.id, quant, max_qty_on_link)
qty_to_assign -= qty_on_link
else:
if lot_qty.get(quant.lot_id.id): #if there is still some qty left
max_qty_on_link = min(quant.qty, qty_to_assign, lot_qty[quant.lot_id.id])
qty_on_link = _create_link_for_quant(ops.id, quant, max_qty_on_link)
qty_to_assign -= qty_on_link
lot_qty[quant.lot_id.id] -= qty_on_link
qty_assign_cmp = float_compare(qty_to_assign, 0, precision_rounding=precision_rounding)
if qty_assign_cmp > 0:
#qty reserved is less than qty put in operations. We need to create a link but it's deferred after we processed
#all the quants (because they leave no choice on their related move and needs to be processed with higher priority)
still_to_do += [(ops, ops.product_id.id, qty_to_assign)]
need_rereserve = True
#2) then, process the remaining part
all_op_processed = True
for ops, product_id, remaining_qty in still_to_do:
all_op_processed = _create_link_for_product(ops.id, product_id, remaining_qty) and all_op_processed
return (need_rereserve, all_op_processed)
def picking_recompute_remaining_quantities(self, cr, uid, picking, done_qtys=False, context=None):
need_rereserve = False
all_op_processed = True
if picking.pack_operation_ids:
need_rereserve, all_op_processed = self.recompute_remaining_qty(cr, uid, picking, done_qtys=done_qtys, context=context)
return need_rereserve, all_op_processed
@api.cr_uid_ids_context
def do_recompute_remaining_quantities(self, cr, uid, picking_ids, done_qtys=False, context=None):
for picking in self.browse(cr, uid, picking_ids, context=context):
if picking.pack_operation_ids:
self.recompute_remaining_qty(cr, uid, picking, done_qtys=done_qtys, context=context)
def _prepare_values_extra_move(self, cr, uid, op, product, remaining_qty, context=None):
"""
Creates an extra move when there is no corresponding original move to be copied
"""
uom_obj = self.pool.get("product.uom")
uom_id = product.uom_id.id
qty = remaining_qty
if op.product_id and op.product_uom_id and op.product_uom_id.id != product.uom_id.id:
if op.product_uom_id.factor > product.uom_id.factor: #If the pack operation's is a smaller unit
uom_id = op.product_uom_id.id
#HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM
qty = uom_obj._compute_qty_obj(cr, uid, product.uom_id, remaining_qty, op.product_uom_id, rounding_method='HALF-UP')
picking = op.picking_id
ref = product.default_code
name = '[' + ref + ']' + ' ' + product.name if ref else product.name
proc_id = False
for m in op.linked_move_operation_ids:
if m.move_id.procurement_id:
proc_id = m.move_id.procurement_id.id
break
res = {
'picking_id': picking.id,
'location_id': picking.location_id.id,
'location_dest_id': picking.location_dest_id.id,
'product_id': product.id,
'procurement_id': proc_id,
'product_uom': uom_id,
'product_uom_qty': qty,
'name': _('Extra Move: ') + name,
'state': 'draft',
'restrict_partner_id': op.owner_id.id,
'group_id': picking.group_id.id,
}
return res
def _create_extra_moves(self, cr, uid, picking, context=None):
'''This function creates move lines on a picking, at the time of do_transfer, based on
unexpected product transfers (or exceeding quantities) found in the pack operations.
'''
move_obj = self.pool.get('stock.move')
operation_obj = self.pool.get('stock.pack.operation')
moves = []
for op in picking.pack_operation_ids:
for product, remaining_qty in operation_obj._get_remaining_prod_quantities(cr, uid, op, context=context).items():
if float_compare(remaining_qty, 0, precision_rounding=product.uom_id.rounding) > 0:
vals = self._prepare_values_extra_move(cr, uid, op, product, remaining_qty, context=context)
moves.append(move_obj.create(cr, uid, vals, context=context))
if moves:
move_obj.action_confirm(cr, uid, moves, context=context)
return moves
def rereserve_pick(self, cr, uid, ids, context=None):
"""
This can be used to provide a button that rereserves taking into account the existing pack operations
"""
for pick in self.browse(cr, uid, ids, context=context):
self.rereserve_quants(cr, uid, pick, move_ids = [x.id for x in pick.move_lines
if x.state not in ('done', 'cancel')], context=context)
def rereserve_quants(self, cr, uid, picking, move_ids=[], context=None):
""" Unreserve quants then try to reassign quants."""
if context is None:
context = {}
stock_move_obj = self.pool.get('stock.move')
if not move_ids:
self.do_unreserve(cr, uid, [picking.id], context=context)
self.action_assign(cr, uid, [picking.id], context=context)
else:
if 'no_state_change' in context:
move = stock_move_obj.browse(cr, uid, move_ids, context=context)
stock_move_obj.do_unreserve(cr, uid, [m.id for m in move if m.reserved_quant_ids], context=context)
else:
stock_move_obj.do_unreserve(cr, uid, move_ids, context=context)
stock_move_obj.action_assign(cr, uid, move_ids, no_prepare=True, context=context)
def do_new_transfer(self, cr, uid, ids, context=None):
pack_op_obj = self.pool['stock.pack.operation']
data_obj = self.pool['ir.model.data']
for pick in self.browse(cr, uid, ids, context=context):
to_delete = []
if not pick.move_lines and not pick.pack_operation_ids:
raise UserError(_('Please create some Initial Demand or Mark as Todo and create some Operations. '))
# In draft or with no pack operations edited yet, ask if we can just do everything
if pick.state == 'draft' or all([x.qty_done == 0.0 for x in pick.pack_operation_ids]):
# If no lots when needed, raise error
picking_type = pick.picking_type_id
if (picking_type.use_create_lots or picking_type.use_existing_lots):
for pack in pick.pack_operation_ids:
if pack.product_id and pack.product_id.tracking != 'none':
raise UserError(_('Some products require lots, so you need to specify those first!'))
view = data_obj.xmlid_to_res_id(cr, uid, 'stock.view_immediate_transfer')
wiz_id = self.pool['stock.immediate.transfer'].create(cr, uid, {'pick_id': pick.id}, context=context)
return {
'name': _('Immediate Transfer?'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.immediate.transfer',
'views': [(view, 'form')],
'view_id': view,
'target': 'new',
'res_id': wiz_id,
'context': context,
}
# Check backorder should check for other barcodes
if self.check_backorder(cr, uid, pick, context=context):
view = data_obj.xmlid_to_res_id(cr, uid, 'stock.view_backorder_confirmation')
wiz_id = self.pool['stock.backorder.confirmation'].create(cr, uid, {'pick_id': pick.id}, context=context)
return {
'name': _('Create Backorder?'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.backorder.confirmation',
'views': [(view, 'form')],
'view_id': view,
'target': 'new',
'res_id': wiz_id,
'context': context,
}
for operation in pick.pack_operation_ids:
if operation.qty_done < 0:
raise UserError(_('No negative quantities allowed'))
if operation.qty_done > 0:
pack_op_obj.write(cr, uid, operation.id, {'product_qty': operation.qty_done}, context=context)
else:
to_delete.append(operation.id)
if to_delete:
pack_op_obj.unlink(cr, uid, to_delete, context=context)
self.do_transfer(cr, uid, ids, context=context)
return
def check_backorder(self, cr, uid, picking, context=None):
need_rereserve, all_op_processed = self.picking_recompute_remaining_quantities(cr, uid, picking, done_qtys=True, context=context)
for move in picking.move_lines:
if float_compare(move.remaining_qty, 0, precision_rounding = move.product_id.uom_id.rounding) != 0:
return True
return False
def create_lots_for_picking(self, cr, uid, ids, context=None):
lot_obj = self.pool['stock.production.lot']
opslot_obj = self.pool['stock.pack.operation.lot']
to_unlink = []
for picking in self.browse(cr, uid, ids, context=context):
for ops in picking.pack_operation_ids:
for opslot in ops.pack_lot_ids:
if not opslot.lot_id:
lot_id = lot_obj.create(cr, uid, {'name': opslot.lot_name, 'product_id': ops.product_id.id}, context=context)
opslot_obj.write(cr, uid, [opslot.id], {'lot_id':lot_id}, context=context)
#Unlink pack operations where qty = 0
to_unlink += [x.id for x in ops.pack_lot_ids if x.qty == 0.0]
opslot_obj.unlink(cr, uid, to_unlink, context=context)
def do_transfer(self, cr, uid, ids, context=None):
"""
If no pack operation, we do simple action_done of the picking
Otherwise, do the pack operations
"""
if not context:
context = {}
notrack_context = dict(context, mail_notrack=True)
stock_move_obj = self.pool.get('stock.move')
self.create_lots_for_picking(cr, uid, ids, context=context)
for picking in self.browse(cr, uid, ids, context=context):
if not picking.pack_operation_ids:
self.action_done(cr, uid, [picking.id], context=context)
continue
else:
need_rereserve, all_op_processed = self.picking_recompute_remaining_quantities(cr, uid, picking, context=context)
#create extra moves in the picking (unexpected product moves coming from pack operations)
todo_move_ids = []
if not all_op_processed:
todo_move_ids += self._create_extra_moves(cr, uid, picking, context=context)
if need_rereserve or not all_op_processed:
moves_reassign = any(x.origin_returned_move_id or x.move_orig_ids for x in picking.move_lines if x.state not in ['done', 'cancel'])
if moves_reassign and (picking.location_id.usage not in ("supplier", "production", "inventory")):
ctx = dict(context)
ctx['reserve_only_ops'] = True #unnecessary to assign other quants than those involved with pack operations as they will be unreserved anyways.
ctx['no_state_change'] = True
self.rereserve_quants(cr, uid, picking, move_ids=picking.move_lines.ids, context=ctx)
self.do_recompute_remaining_quantities(cr, uid, [picking.id], context=context)
#split move lines if needed
toassign_move_ids = []
for move in picking.move_lines:
remaining_qty = move.remaining_qty
if move.state in ('done', 'cancel'):
#ignore stock moves cancelled or already done
continue
elif move.state == 'draft':
toassign_move_ids.append(move.id)
if float_compare(remaining_qty, 0, precision_rounding = move.product_id.uom_id.rounding) == 0:
if move.state in ('draft', 'assigned', 'confirmed'):
todo_move_ids.append(move.id)
elif float_compare(remaining_qty,0, precision_rounding = move.product_id.uom_id.rounding) > 0 and \
float_compare(remaining_qty, move.product_qty, precision_rounding = move.product_id.uom_id.rounding) < 0:
new_move = stock_move_obj.split(cr, uid, move, remaining_qty, context=notrack_context)
todo_move_ids.append(move.id)
#Assign move as it was assigned before
toassign_move_ids.append(new_move)
todo_move_ids = list(set(todo_move_ids))
if todo_move_ids and not context.get('do_only_split'):
self.pool.get('stock.move').action_done(cr, uid, todo_move_ids, context=context)
elif context.get('do_only_split'):
context = dict(context, split=todo_move_ids)
self._create_backorder(cr, uid, picking, context=context)
return True
@api.cr_uid_ids_context
def do_split(self, cr, uid, picking_ids, context=None):
""" just split the picking (create a backorder) without making it 'done' """
if context is None:
context = {}
ctx = context.copy()
ctx['do_only_split'] = True
return self.do_transfer(cr, uid, picking_ids, context=ctx)
def put_in_pack(self, cr, uid, ids, context=None):
stock_move_obj = self.pool["stock.move"]
stock_operation_obj = self.pool["stock.pack.operation"]
package_obj = self.pool["stock.quant.package"]
package_id = False
for pick in self.browse(cr, uid, ids, context=context):
operations = [x for x in pick.pack_operation_ids if x.qty_done > 0 and (not x.result_package_id)]
pack_operation_ids = []
for operation in operations:
#If we haven't done all qty in operation, we have to split into 2 operation
op = operation
if operation.qty_done < operation.product_qty:
new_operation = stock_operation_obj.copy(cr, uid, operation.id, {'product_qty': operation.qty_done,'qty_done': operation.qty_done}, context=context)
stock_operation_obj.write(cr, uid, operation.id, {'product_qty': operation.product_qty - operation.qty_done,'qty_done': 0}, context=context)
if operation.pack_lot_ids:
packlots_transfer = [(4, x.id) for x in operation.pack_lot_ids]
stock_operation_obj.write(cr, uid, [new_operation], {'pack_lot_ids': packlots_transfer}, context=context)
# the stock.pack.operation.lot records now belong to the new, packaged stock.pack.operation
# we have to create new ones with new quantities for our original, unfinished stock.pack.operation
stock_operation_obj._copy_remaining_pack_lot_ids(cr, uid, new_operation, operation.id, context=context)
op = stock_operation_obj.browse(cr, uid, new_operation, context=context)
pack_operation_ids.append(op.id)
if operations:
stock_operation_obj.check_tracking(cr, uid, pack_operation_ids, context=context)
package_id = package_obj.create(cr, uid, {}, context=context)
stock_operation_obj.write(cr, uid, pack_operation_ids, {'result_package_id': package_id}, context=context)
else:
raise UserError(_('Please process some quantities to put in the pack first!'))
return package_id
class stock_production_lot(osv.osv):
_name = 'stock.production.lot'
_inherit = ['mail.thread']
_description = 'Lot/Serial'
_columns = {
'name': fields.char('Serial Number', required=True, help="Unique Serial Number"),
'ref': fields.char('Internal Reference', help="Internal reference number in case it differs from the manufacturer's serial number"),
'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', 'in', ['product', 'consu'])]),
'quant_ids': fields.one2many('stock.quant', 'lot_id', 'Quants', readonly=True),
'create_date': fields.datetime('Creation Date'),
}
_defaults = {
'name': lambda x, y, z, c: x.pool.get('ir.sequence').next_by_code(y, z, 'stock.lot.serial'),
'product_id': lambda x, y, z, c: c.get('product_id', False),
}
_sql_constraints = [
('name_ref_uniq', 'unique (name, product_id)', 'The combination of serial number and product must be unique !'),
]
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of lots
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
quant_obj = self.pool.get("stock.quant")
quants = quant_obj.search(cr, uid, [('lot_id', 'in', ids)], context=context)
moves = set()
for quant in quant_obj.browse(cr, uid, quants, context=context):
moves |= {move.id for move in quant.history_ids}
if moves:
return {
'domain': "[('id','in',[" + ','.join(map(str, list(moves))) + "])]",
'name': _('Traceability'),
'view_mode': 'tree,form',
'view_type': 'form',
'context': {'tree_view_ref': 'stock.view_move_tree'},
'res_model': 'stock.move',
'type': 'ir.actions.act_window',
}
return False
# ----------------------------------------------------
# Move
# ----------------------------------------------------
class stock_move(osv.osv):
_name = "stock.move"
_description = "Stock Move"
_order = 'picking_id, sequence, id'
def get_price_unit(self, cr, uid, move, context=None):
""" Returns the unit price to store on the quant """
return move.price_unit or move.product_id.standard_price
def name_get(self, cr, uid, ids, context=None):
res = []
for line in self.browse(cr, uid, ids, context=context):
name = line.location_id.name + ' > ' + line.location_dest_id.name
if line.product_id.code:
name = line.product_id.code + ': ' + name
if line.picking_id.origin:
name = line.picking_id.origin + '/ ' + name
res.append((line.id, name))
return res
def _quantity_normalize(self, cr, uid, ids, name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = uom_obj._compute_qty_obj(cr, uid, m.product_uom, m.product_uom_qty, m.product_id.uom_id, context=context)
return res
def _get_remaining_qty(self, cr, uid, ids, field_name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for move in self.browse(cr, uid, ids, context=context):
qty = move.product_qty
for record in move.linked_move_operation_ids:
qty -= record.qty
# Keeping in product default UoM
res[move.id] = float_round(qty, precision_rounding=move.product_id.uom_id.rounding)
return res
def _get_lot_ids(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, False)
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
res[move.id] = [q.lot_id.id for q in move.quant_ids if q.lot_id]
else:
res[move.id] = [q.lot_id.id for q in move.reserved_quant_ids if q.lot_id]
return res
def _get_product_availability(self, cr, uid, ids, field_name, args, context=None):
quant_obj = self.pool.get('stock.quant')
res = dict.fromkeys(ids, False)
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
res[move.id] = move.product_qty
else:
sublocation_ids = self.pool.get('stock.location').search(cr, uid, [('id', 'child_of', [move.location_id.id])], context=context)
quant_ids = quant_obj.search(cr, uid, [('location_id', 'in', sublocation_ids), ('product_id', '=', move.product_id.id), ('reservation_id', '=', False)], context=context)
availability = 0
for quant in quant_obj.browse(cr, uid, quant_ids, context=context):
availability += quant.qty
res[move.id] = min(move.product_qty, availability)
return res
def _get_string_qty_information(self, cr, uid, ids, field_name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = dict.fromkeys(ids, '')
precision = self.pool['decimal.precision'].precision_get(cr, uid, 'Product Unit of Measure')
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('draft', 'done', 'cancel') or move.location_id.usage != 'internal':
res[move.id] = '' # 'not applicable' or 'n/a' could work too
continue
total_available = min(move.product_qty, move.reserved_availability + move.availability)
total_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, total_available, move.product_uom, round=False, context=context)
total_available = float_round(total_available, precision_digits=precision)
info = str(total_available)
#look in the settings if we need to display the UoM name or not
if self.pool.get('res.users').has_group(cr, uid, 'product.group_uom'):
info += ' ' + move.product_uom.name
if move.reserved_availability:
if move.reserved_availability != total_available:
#some of the available quantity is assigned and some are available but not reserved
reserved_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, move.reserved_availability, move.product_uom, round=False, context=context)
reserved_available = float_round(reserved_available, precision_digits=precision)
info += _(' (%s reserved)') % str(reserved_available)
else:
#all available quantity is assigned
info += _(' (reserved)')
res[move.id] = info
return res
def _get_reserved_availability(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, 0)
for move in self.browse(cr, uid, ids, context=context):
res[move.id] = sum([quant.qty for quant in move.reserved_quant_ids])
return res
def _get_move(self, cr, uid, ids, context=None):
res = set()
for quant in self.browse(cr, uid, ids, context=context):
if quant.reservation_id:
res.add(quant.reservation_id.id)
return list(res)
def _get_move_ids(self, cr, uid, ids, context=None):
res = []
for picking in self.browse(cr, uid, ids, context=context):
res += [x.id for x in picking.move_lines]
return res
def _get_moves_from_prod(self, cr, uid, ids, context=None):
if ids:
return self.pool.get('stock.move').search(cr, uid, [('product_id', 'in', ids)], context=context)
return []
def _set_product_qty(self, cr, uid, id, field, value, arg, context=None):
""" The meaning of product_qty field changed lately and is now a functional field computing the quantity
in the default product UoM. This code has been added to raise an error if a write is made given a value
for `product_qty`, where the same write should set the `product_uom_qty` field instead, in order to
detect errors.
"""
raise UserError(_('The requested operation cannot be processed because of a programming error setting the `product_qty` field instead of the `product_uom_qty`.'))
_columns = {
'sequence': fields.integer('Sequence'),
'name': fields.char('Description', required=True, select=True),
'priority': fields.selection(procurement.PROCUREMENT_PRIORITIES, 'Priority'),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}),
'date_expected': fields.datetime('Expected Date', states={'done': [('readonly', True)]}, required=True, select=True, help="Scheduled date for the processing of this move"),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type', 'in', ['product', 'consu'])], states={'done': [('readonly', True)]}),
'product_qty': fields.function(_quantity_normalize, fnct_inv=_set_product_qty, type='float', digits=0, store={
_name: (lambda self, cr, uid, ids, c={}: ids, ['product_id', 'product_uom', 'product_uom_qty'], 10),
}, string='Quantity',
help='Quantity in the default UoM of the product'),
'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True, states={'done': [('readonly', True)]},
help="This is the quantity of products from an inventory "
"point of view. For moves in the state 'done', this is the "
"quantity of products that were actually moved. For other "
"moves, this is the quantity of product that is planned to "
"be moved. Lowering this quantity does not generate a "
"backorder. Changing this quantity on assigned moves affects "
"the product reservation, and should be done with care."
),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True, states={'done': [('readonly', True)]}),
'product_tmpl_id': fields.related('product_id', 'product_tmpl_id', type='many2one', relation='product.template', string='Product Template'),
'product_packaging': fields.many2one('product.packaging', 'preferred Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True, auto_join=True,
states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True, states={'done': [('readonly', True)]}, select=True,
auto_join=True, help="Location where the system will stock the finished products."),
'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"),
'picking_partner_id': fields.related('picking_id', 'partner_id', type='many2one', relation='res.partner', string='Transfer Destination Address'),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True, copy=False),
'move_orig_ids': fields.one2many('stock.move', 'move_dest_id', 'Original Move', help="Optional: previous stock move when chaining them", select=True),
'picking_id': fields.many2one('stock.picking', 'Transfer Reference', select=True, states={'done': [('readonly', True)]}),
'note': fields.text('Notes'),
'state': fields.selection([('draft', 'New'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Move'),
('confirmed', 'Waiting Availability'),
('assigned', 'Available'),
('done', 'Done'),
], 'Status', readonly=True, select=True, copy=False,
help= "* New: When the stock move is created and not yet confirmed.\n"\
"* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\
"* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\
"* Available: When products are reserved, it is set to \'Available\'.\n"\
"* Done: When the shipment is processed, the state is \'Done\'."),
'partially_available': fields.boolean('Partially Available', readonly=True, help="Checks if the move has some stock reserved", copy=False),
'price_unit': fields.float('Unit Price', help="Technical field used to record the product cost set by the user during a picking confirmation (when costing method used is 'average price' or 'real'). Value given in company currency and in product uom."), # as it's a technical field, we intentionally don't provide the digits attribute
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'split_from': fields.many2one('stock.move', string="Move Split From", help="Technical field used to track the origin of a split move, which can be useful in case of debug", copy=False),
'backorder_id': fields.related('picking_id', 'backorder_id', type='many2one', relation="stock.picking", string="Back Order of", select=True),
'origin': fields.char("Source Document"),
'procure_method': fields.selection([('make_to_stock', 'Default: Take From Stock'), ('make_to_order', 'Advanced: Apply Procurement Rules')], 'Supply Method', required=True,
help="""By default, the system will take from the stock in the source location and passively wait for availability. The other possibility allows you to directly create a procurement on the source location (and thus ignore its current stock) to gather products. If we want to chain moves and have this one to wait for the previous, this second option should be chosen."""),
# used for colors in tree views:
'scrapped': fields.related('location_dest_id', 'scrap_location', type='boolean', relation='stock.location', string='Scrapped', readonly=True),
'quant_ids': fields.many2many('stock.quant', 'stock_quant_move_rel', 'move_id', 'quant_id', 'Moved Quants', copy=False),
'reserved_quant_ids': fields.one2many('stock.quant', 'reservation_id', 'Reserved quants'),
'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'move_id', string='Linked Operations', readonly=True, help='Operations that impact this move for the computation of the remaining quantities'),
'remaining_qty': fields.function(_get_remaining_qty, type='float', string='Remaining Quantity', digits=0,
states={'done': [('readonly', True)]}, help="Remaining Quantity in default UoM according to operations matched with this move"),
'procurement_id': fields.many2one('procurement.order', 'Procurement'),
'group_id': fields.many2one('procurement.group', 'Procurement Group'),
'rule_id': fields.many2one('procurement.rule', 'Procurement Rule', help='The procurement rule that created this stock move'),
'push_rule_id': fields.many2one('stock.location.path', 'Push Rule', help='The push rule that created this stock move'),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when this move is cancelled, cancel the linked move too'),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type'),
'inventory_id': fields.many2one('stock.inventory', 'Inventory'),
'lot_ids': fields.function(_get_lot_ids, type='many2many', relation='stock.production.lot', string='Lots'),
'origin_returned_move_id': fields.many2one('stock.move', 'Origin return move', help='move that created the return move', copy=False),
'returned_move_ids': fields.one2many('stock.move', 'origin_returned_move_id', 'All returned moves', help='Optional: all returned moves created from this move'),
'reserved_availability': fields.function(_get_reserved_availability, type='float', string='Quantity Reserved', readonly=True, help='Quantity that has already been reserved for this move'),
'availability': fields.function(_get_product_availability, type='float', string='Forecasted Quantity', readonly=True, help='Quantity in stock that can still be reserved for this move'),
'string_availability_info': fields.function(_get_string_qty_information, type='text', string='Availability', readonly=True, help='Show various information on stock availability for this move'),
'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot', help="Technical field used to depict a restriction on the lot of quants to consider when marking this move as 'done'"),
'restrict_partner_id': fields.many2one('res.partner', 'Owner ', help="Technical field used to depict a restriction on the ownership of quants to consider when marking this move as 'done'"),
'route_ids': fields.many2many('stock.location.route', 'stock_location_route_move', 'move_id', 'route_id', 'Destination route', help="Preferred route to be followed by the procurement order"),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Technical field depicting the warehouse to consider for the route selection on the next procurement (if any)."),
}
def _default_destination_address(self, cr, uid, context=None):
return False
def _default_group_id(self, cr, uid, context=None):
context = context or {}
if context.get('default_picking_id', False):
picking = self.pool.get('stock.picking').browse(cr, uid, context['default_picking_id'], context=context)
return picking.group_id.id
return False
_defaults = {
'partner_id': _default_destination_address,
'state': 'draft',
'priority': '1',
'product_uom_qty': 1.0,
'sequence': 10,
'scrapped': False,
'date': fields.datetime.now,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c),
'date_expected': fields.datetime.now,
'procure_method': 'make_to_stock',
'propagate': True,
'partially_available': False,
'group_id': _default_group_id,
}
def _check_uom(self, cr, uid, ids, context=None):
for move in self.browse(cr, uid, ids, context=context):
if move.product_id.uom_id.category_id.id != move.product_uom.category_id.id:
return False
return True
_constraints = [
(_check_uom,
'You try to move a product using a UoM that is not compatible with the UoM of the product moved. Please use an UoM in the same UoM category.',
['product_uom']),
]
def init(self, cr):
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_move_product_location_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX stock_move_product_location_index ON stock_move (product_id, location_id, location_dest_id, company_id, state)')
@api.cr_uid_ids_context
def do_unreserve(self, cr, uid, move_ids, context=None):
quant_obj = self.pool.get("stock.quant")
for move in self.browse(cr, uid, move_ids, context=context):
if move.state in ('done', 'cancel'):
raise UserError(_('Cannot unreserve a done move'))
quant_obj.quants_unreserve(cr, uid, move, context=context)
if not context.get('no_state_change'):
if self.find_move_ancestors(cr, uid, move, context=context):
self.write(cr, uid, [move.id], {'state': 'waiting'}, context=context)
else:
self.write(cr, uid, [move.id], {'state': 'confirmed'}, context=context)
def _prepare_procurement_from_move(self, cr, uid, move, context=None):
origin = (move.group_id and (move.group_id.name + ":") or "") + (move.rule_id and move.rule_id.name or move.origin or move.picking_id.name or "/")
group_id = move.group_id and move.group_id.id or False
if move.rule_id:
if move.rule_id.group_propagation_option == 'fixed' and move.rule_id.group_id:
group_id = move.rule_id.group_id.id
elif move.rule_id.group_propagation_option == 'none':
group_id = False
return {
'name': move.rule_id and move.rule_id.name or "/",
'origin': origin,
'company_id': move.company_id and move.company_id.id or False,
'date_planned': move.date,
'product_id': move.product_id.id,
'product_qty': move.product_uom_qty,
'product_uom': move.product_uom.id,
'location_id': move.location_id.id,
'move_dest_id': move.id,
'group_id': group_id,
'route_ids': [(4, x.id) for x in move.route_ids],
'warehouse_id': move.warehouse_id.id or (move.picking_type_id and move.picking_type_id.warehouse_id.id or False),
'priority': move.priority,
}
def _push_apply(self, cr, uid, moves, context=None):
push_obj = self.pool.get("stock.location.path")
for move in moves:
#1) if the move is already chained, there is no need to check push rules
#2) if the move is a returned move, we don't want to check push rules, as returning a returned move is the only decent way
# to receive goods without triggering the push rules again (which would duplicate chained operations)
if not move.move_dest_id:
domain = [('location_from_id', '=', move.location_dest_id.id)]
#priority goes to the route defined on the product and product category
route_ids = [x.id for x in move.product_id.route_ids + move.product_id.categ_id.total_route_ids]
rules = push_obj.search(cr, uid, domain + [('route_id', 'in', route_ids)], order='route_sequence, sequence', context=context)
if not rules:
#then we search on the warehouse if a rule can apply
wh_route_ids = []
if move.warehouse_id:
wh_route_ids = [x.id for x in move.warehouse_id.route_ids]
elif move.picking_id.picking_type_id.warehouse_id:
wh_route_ids = [x.id for x in move.picking_id.picking_type_id.warehouse_id.route_ids]
if wh_route_ids:
rules = push_obj.search(cr, uid, domain + [('route_id', 'in', wh_route_ids)], order='route_sequence, sequence', context=context)
if not rules:
#if no specialized push rule has been found yet, we try to find a general one (without route)
rules = push_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context)
if rules:
rule = push_obj.browse(cr, uid, rules[0], context=context)
# Make sure it is not returning the return
if (not move.origin_returned_move_id or move.origin_returned_move_id.location_dest_id.id != rule.location_dest_id.id):
push_obj._apply(cr, uid, rule, move, context=context)
return True
def _create_procurement(self, cr, uid, move, context=None):
""" This will create a procurement order """
return self.pool.get("procurement.order").create(cr, uid, self._prepare_procurement_from_move(cr, uid, move, context=context), context=context)
def _create_procurements(self, cr, uid, moves, context=None):
res = []
for move in moves:
res.append(self._create_procurement(cr, uid, move, context=context))
# Run procurements immediately when generated from multiple moves
self.pool['procurement.order'].run(cr, uid, res, context=context)
return res
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
picking_obj = self.pool['stock.picking']
track = not context.get('mail_notrack') and vals.get('picking_id')
if track:
picking = picking_obj.browse(cr, uid, vals['picking_id'], context=context)
initial_values = {picking.id: {'state': picking.state}}
res = super(stock_move, self).create(cr, uid, vals, context=context)
if track:
picking_obj.message_track(cr, uid, [vals['picking_id']], picking_obj.fields_get(cr, uid, ['state'], context=context), initial_values, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
picking_obj = self.pool['stock.picking']
# Check that we do not modify a stock.move which is done
frozen_fields = set(['product_qty', 'product_uom', 'location_id', 'location_dest_id', 'product_id'])
moves = self.browse(cr, uid, ids, context=context)
for move in moves:
if move.state == 'done':
if frozen_fields.intersection(vals):
raise UserError(_('Quantities, Units of Measure, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator).'))
propagated_changes_dict = {}
#propagation of quantity change
if vals.get('product_uom_qty'):
propagated_changes_dict['product_uom_qty'] = vals['product_uom_qty']
if vals.get('product_uom_id'):
propagated_changes_dict['product_uom_id'] = vals['product_uom_id']
if vals.get('product_uos_qty'):
propagated_changes_dict['product_uos_qty'] = vals['product_uos_qty']
if vals.get('product_uos_id'):
propagated_changes_dict['product_uos_id'] = vals['product_uos_id']
#propagation of expected date:
propagated_date_field = False
if vals.get('date_expected'):
#propagate any manual change of the expected date
propagated_date_field = 'date_expected'
elif (vals.get('state', '') == 'done' and vals.get('date')):
#propagate also any delta observed when setting the move as done
propagated_date_field = 'date'
if not context.get('do_not_propagate', False) and (propagated_date_field or propagated_changes_dict):
#any propagation is (maybe) needed
for move in self.browse(cr, uid, ids, context=context):
if move.move_dest_id and move.propagate:
if 'date_expected' in propagated_changes_dict:
propagated_changes_dict.pop('date_expected')
if propagated_date_field:
current_date = datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT)
new_date = datetime.strptime(vals.get(propagated_date_field), DEFAULT_SERVER_DATETIME_FORMAT)
delta = new_date - current_date
if abs(delta.days) >= move.company_id.propagation_minimum_delta:
old_move_date = datetime.strptime(move.move_dest_id.date_expected, DEFAULT_SERVER_DATETIME_FORMAT)
new_move_date = (old_move_date + relativedelta.relativedelta(days=delta.days or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
propagated_changes_dict['date_expected'] = new_move_date
#For pushed moves as well as for pulled moves, propagate by recursive call of write().
#Note that, for pulled moves we intentionally don't propagate on the procurement.
if propagated_changes_dict:
self.write(cr, uid, [move.move_dest_id.id], propagated_changes_dict, context=context)
track_pickings = not context.get('mail_notrack') and any(field in vals for field in ['state', 'picking_id', 'partially_available'])
if track_pickings:
to_track_picking_ids = set([move.picking_id.id for move in moves if move.picking_id])
if vals.get('picking_id'):
to_track_picking_ids.add(vals['picking_id'])
to_track_picking_ids = list(to_track_picking_ids)
pickings = picking_obj.browse(cr, uid, to_track_picking_ids, context=context)
initial_values = dict((picking.id, {'state': picking.state}) for picking in pickings)
res = super(stock_move, self).write(cr, uid, ids, vals, context=context)
if track_pickings:
picking_obj.message_track(cr, uid, to_track_picking_ids, picking_obj.fields_get(cr, uid, ['state'], context=context), initial_values, context=context)
return res
def onchange_quantity(self, cr, uid, ids, product_id, product_qty, product_uom):
""" On change of product quantity finds UoM
@param product_id: Product id
@param product_qty: Changed Quantity of product
@param product_uom: Unit of measure of product
@return: Dictionary of values
"""
warning = {}
result = {}
if (not product_id) or (product_qty <= 0.0):
result['product_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
# Warn if the quantity was decreased
if ids:
for move in self.read(cr, uid, ids, ['product_qty']):
if product_qty < move['product_qty']:
warning.update({
'title': _('Information'),
'message': _("By changing this quantity here, you accept the "
"new quantity as complete: Odoo will not "
"automatically generate a back order.")})
break
return {'warning': warning}
def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False, loc_dest_id=False, partner_id=False):
""" On change of product id, if finds UoM, quantity
@param prod_id: Changed Product id
@param loc_id: Source location id
@param loc_dest_id: Destination location id
@param partner_id: Address id of partner
@return: Dictionary of values
"""
if not prod_id:
return {'domain': {'product_uom': []}}
user = self.pool.get('res.users').browse(cr, uid, uid)
lang = user and user.lang or False
if partner_id:
addr_rec = self.pool.get('res.partner').browse(cr, uid, partner_id)
if addr_rec:
lang = addr_rec and addr_rec.lang or False
ctx = {'lang': lang}
product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0]
result = {
'name': product.partner_ref,
'product_uom': product.uom_id.id,
'product_uom_qty': 1.00,
}
if loc_id:
result['location_id'] = loc_id
if loc_dest_id:
result['location_dest_id'] = loc_dest_id
res = {'value': result,
'domain': {'product_uom': [('category_id', '=', product.uom_id.category_id.id)]}
}
return res
def _prepare_picking_assign(self, cr, uid, move, context=None):
""" Prepares a new picking for this move as it could not be assigned to
another picking. This method is designed to be inherited.
"""
values = {
'origin': move.origin,
'company_id': move.company_id and move.company_id.id or False,
'move_type': move.group_id and move.group_id.move_type or 'direct',
'partner_id': move.partner_id.id or False,
'picking_type_id': move.picking_type_id and move.picking_type_id.id or False,
'location_id': move.location_id.id,
'location_dest_id': move.location_dest_id.id,
}
return values
@api.cr_uid_ids_context
def _picking_assign(self, cr, uid, move_ids, context=None):
"""Try to assign the moves to an existing picking
that has not been reserved yet and has the same
procurement group, locations and picking type (moves should already have them identical)
Otherwise, create a new picking to assign them to.
"""
move = self.browse(cr, uid, move_ids, context=context)[0]
pick_obj = self.pool.get("stock.picking")
picks = pick_obj.search(cr, uid, [
('group_id', '=', move.group_id.id),
('location_id', '=', move.location_id.id),
('location_dest_id', '=', move.location_dest_id.id),
('picking_type_id', '=', move.picking_type_id.id),
('printed', '=', False),
('state', 'in', ['draft', 'confirmed', 'waiting', 'partially_available', 'assigned'])], limit=1, context=context)
if picks:
pick = picks[0]
else:
values = self._prepare_picking_assign(cr, uid, move, context=context)
pick = pick_obj.create(cr, uid, values, context=context)
return self.write(cr, uid, move_ids, {'picking_id': pick}, context=context)
def onchange_date(self, cr, uid, ids, date, date_expected, context=None):
""" On change of Scheduled Date gives a Move date.
@param date_expected: Scheduled Date
@param date: Move Date
@return: Move Date
"""
if not date_expected:
date_expected = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return {'value': {'date': date_expected}}
def attribute_price(self, cr, uid, move, context=None):
"""
Attribute price to move, important in inter-company moves or receipts with only one partner
"""
if not move.price_unit:
price = move.product_id.standard_price
self.write(cr, uid, [move.id], {'price_unit': price})
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms stock move or put it in waiting if it's linked to another move.
@return: List of ids.
"""
if not context:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
states = {
'confirmed': [],
'waiting': []
}
to_assign = {}
for move in self.browse(cr, uid, ids, context=context):
self.attribute_price(cr, uid, move, context=context)
state = 'confirmed'
#if the move is preceeded, then it's waiting (if preceeding move is done, then action_assign has been called already and its state is already available)
if move.move_orig_ids:
state = 'waiting'
#if the move is split and some of the ancestor was preceeded, then it's waiting as well
elif move.split_from:
move2 = move.split_from
while move2 and state != 'waiting':
if move2.move_orig_ids:
state = 'waiting'
move2 = move2.split_from
states[state].append(move.id)
if not move.picking_id and move.picking_type_id:
key = (move.group_id.id, move.location_id.id, move.location_dest_id.id)
if key not in to_assign:
to_assign[key] = []
to_assign[key].append(move.id)
moves = [move for move in self.browse(cr, uid, states['confirmed'], context=context) if move.procure_method == 'make_to_order']
self._create_procurements(cr, uid, moves, context=context)
for move in moves:
states['waiting'].append(move.id)
states['confirmed'].remove(move.id)
for state, write_ids in states.items():
if len(write_ids):
self.write(cr, uid, write_ids, {'state': state}, context=context)
#assign picking in batch for all confirmed move that share the same details
for key, move_ids in to_assign.items():
self._picking_assign(cr, uid, move_ids, context=context)
moves = self.browse(cr, uid, ids, context=context)
self._push_apply(cr, uid, moves, context=context)
return ids
def force_assign(self, cr, uid, ids, context=None):
""" Changes the state to assigned.
@return: True
"""
res = self.write(cr, uid, ids, {'state': 'assigned'}, context=context)
self.check_recompute_pack_op(cr, uid, ids, context=context)
return res
def check_tracking(self, cr, uid, move, ops, context=None):
""" Checks if serial number is assigned to stock move or not and raise an error if it had to.
"""
if move.picking_id and (move.picking_id.picking_type_id.use_existing_lots or move.picking_id.picking_type_id.use_create_lots) and \
move.product_id.tracking != 'none':
if not (move.restrict_lot_id or (ops and (ops.product_id and ops.pack_lot_ids)) or (ops and not ops.product_id)):
raise UserError(_('You need to provide a Lot/Serial Number for product %s') % move.product_id.name)
def check_recompute_pack_op(self, cr, uid, ids, context=None):
pickings = list(set([x.picking_id for x in self.browse(cr, uid, ids, context=context) if x.picking_id]))
pickings_partial = []
pickings_write = []
pick_obj = self.pool['stock.picking']
for pick in pickings:
if pick.state in ('waiting', 'confirmed'): #In case of 'all at once' delivery method it should not prepare pack operations
continue
# Check if someone was treating the picking already
if not any([x.qty_done > 0 for x in pick.pack_operation_ids]):
pickings_partial.append(pick.id)
else:
pickings_write.append(pick.id)
if pickings_partial:
pick_obj.do_prepare_partial(cr, uid, pickings_partial, context=context)
if pickings_write:
pick_obj.write(cr, uid, pickings_write, {'recompute_pack_op': True}, context=context)
def action_assign(self, cr, uid, ids, no_prepare=False, context=None):
""" Checks the product type and accordingly writes the state.
"""
context = context or {}
quant_obj = self.pool.get("stock.quant")
uom_obj = self.pool['product.uom']
to_assign_moves = set()
main_domain = {}
todo_moves = []
operations = set()
ancestors_list = {}
self.do_unreserve(cr, uid, [x.id for x in self.browse(cr, uid, ids, context=context) if x.reserved_quant_ids and x.state in ['confirmed', 'waiting', 'assigned']], context=context)
for move in self.browse(cr, uid, ids, context=context):
if move.state not in ('confirmed', 'waiting', 'assigned'):
continue
if move.location_id.usage in ('supplier', 'inventory', 'production'):
to_assign_moves.add(move.id)
#in case the move is returned, we want to try to find quants before forcing the assignment
if not move.origin_returned_move_id:
continue
if move.product_id.type == 'consu':
to_assign_moves.add(move.id)
continue
else:
todo_moves.append(move)
#we always search for yet unassigned quants
main_domain[move.id] = [('reservation_id', '=', False), ('qty', '>', 0)]
#if the move is preceeded, restrict the choice of quants in the ones moved previously in original move
ancestors = self.find_move_ancestors(cr, uid, move, context=context)
ancestors_list[move.id] = True if ancestors else False
if move.state == 'waiting' and not ancestors:
#if the waiting move hasn't yet any ancestor (PO/MO not confirmed yet), don't find any quant available in stock
main_domain[move.id] += [('id', '=', False)]
elif ancestors:
main_domain[move.id] += [('history_ids', 'in', ancestors)]
#if the move is returned from another, restrict the choice of quants to the ones that follow the returned move
if move.origin_returned_move_id:
main_domain[move.id] += [('history_ids', 'in', move.origin_returned_move_id.id)]
for link in move.linked_move_operation_ids:
operations.add(link.operation_id)
# Check all ops and sort them: we want to process first the packages, then operations with lot then the rest
operations = list(operations)
operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.pack_lot_ids and -1 or 0))
for ops in operations:
#first try to find quants based on specific domains given by linked operations for the case where we want to rereserve according to existing pack operations
if not (ops.product_id and ops.pack_lot_ids):
for record in ops.linked_move_operation_ids:
move = record.move_id
if move.id in main_domain:
qty = record.qty
domain = main_domain[move.id]
if qty:
quants = quant_obj.quants_get_preferred_domain(cr, uid, qty, move, ops=ops, domain=domain, preferred_domain_list=[], context=context)
quant_obj.quants_reserve(cr, uid, quants, move, record, context=context)
else:
lot_qty = {}
rounding = ops.product_id.uom_id.rounding
for pack_lot in ops.pack_lot_ids:
lot_qty[pack_lot.lot_id.id] = uom_obj._compute_qty(cr, uid, ops.product_uom_id.id, pack_lot.qty, ops.product_id.uom_id.id)
for record in ops.linked_move_operation_ids.filtered(lambda x: x.move_id.id in main_domain):
move_qty = record.qty
move = record.move_id
domain = main_domain[move.id]
for lot in lot_qty:
if float_compare(lot_qty[lot], 0, precision_rounding=rounding) > 0 and float_compare(move_qty, 0, precision_rounding=rounding) > 0:
qty = min(lot_qty[lot], move_qty)
quants = quant_obj.quants_get_preferred_domain(cr, uid, qty, move, ops=ops, lot_id=lot, domain=domain, preferred_domain_list=[], context=context)
quant_obj.quants_reserve(cr, uid, quants, move, record, context=context)
lot_qty[lot] -= qty
move_qty -= qty
# Sort moves to reserve first the ones with ancestors, in case the same product is listed in
# different stock moves.
todo_moves.sort(key=lambda x: -1 if ancestors_list.get(x.id) else 0)
for move in todo_moves:
#then if the move isn't totally assigned, try to find quants without any specific domain
if (move.state != 'assigned') and not context.get("reserve_only_ops"):
qty_already_assigned = move.reserved_availability
qty = move.product_qty - qty_already_assigned
quants = quant_obj.quants_get_preferred_domain(cr, uid, qty, move, domain=main_domain[move.id], preferred_domain_list=[], context=context)
quant_obj.quants_reserve(cr, uid, quants, move, context=context)
#force assignation of consumable products and incoming from supplier/inventory/production
# Do not take force_assign as it would create pack operations
if to_assign_moves:
self.write(cr, uid, list(to_assign_moves), {'state': 'assigned'}, context=context)
if not no_prepare:
self.check_recompute_pack_op(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the moves and if all moves are cancelled it cancels the picking.
@return: True
"""
procurement_obj = self.pool.get('procurement.order')
context = context or {}
procs_to_check = set()
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
raise UserError(_('You cannot cancel a stock move that has been set to \'Done\'.'))
if move.reserved_quant_ids:
self.pool.get("stock.quant").quants_unreserve(cr, uid, move, context=context)
if context.get('cancel_procurement'):
if move.propagate:
procurement_ids = procurement_obj.search(cr, uid, [('move_dest_id', '=', move.id)], context=context)
procurement_obj.cancel(cr, uid, procurement_ids, context=context)
else:
if move.move_dest_id:
if move.propagate:
self.action_cancel(cr, uid, [move.move_dest_id.id], context=context)
elif move.move_dest_id.state == 'waiting':
#If waiting, the chain will be broken and we are not sure if we can still wait for it (=> could take from stock instead)
self.write(cr, uid, [move.move_dest_id.id], {'state': 'confirmed'}, context=context)
if move.procurement_id:
# Does the same as procurement check, only eliminating a refresh
procs_to_check.add(move.procurement_id.id)
res = self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False}, context=context)
if procs_to_check:
procurement_obj.check(cr, uid, list(procs_to_check), context=context)
return res
def _check_package_from_moves(self, cr, uid, ids, context=None):
pack_obj = self.pool.get("stock.quant.package")
packs = set()
for move in self.browse(cr, uid, ids, context=context):
packs |= set([q.package_id for q in move.quant_ids if q.package_id and q.qty > 0])
return pack_obj._check_location_constraint(cr, uid, list(packs), context=context)
def find_move_ancestors(self, cr, uid, move, context=None):
'''Find the first level ancestors of given move '''
ancestors = []
move2 = move
while move2:
ancestors += [x.id for x in move2.move_orig_ids]
#loop on the split_from to find the ancestor of split moves only if the move has not direct ancestor (priority goes to them)
move2 = not move2.move_orig_ids and move2.split_from or False
return ancestors
@api.cr_uid_ids_context
def recalculate_move_state(self, cr, uid, move_ids, context=None):
'''Recompute the state of moves given because their reserved quants were used to fulfill another operation'''
for move in self.browse(cr, uid, move_ids, context=context):
vals = {}
reserved_quant_ids = move.reserved_quant_ids
if len(reserved_quant_ids) > 0 and not move.partially_available:
vals['partially_available'] = True
if len(reserved_quant_ids) == 0 and move.partially_available:
vals['partially_available'] = False
if move.state == 'assigned':
if self.find_move_ancestors(cr, uid, move, context=context):
vals['state'] = 'waiting'
else:
vals['state'] = 'confirmed'
if vals:
self.write(cr, uid, [move.id], vals, context=context)
def _move_quants_by_lot(self, cr, uid, ops, lot_qty, quants_taken, false_quants, lot_move_qty, quant_dest_package_id, context=None):
"""
This function is used to process all the pack operation lots of a pack operation
For every move:
First, we check the quants with lot already reserved (and those are already subtracted from the lots to do)
Then go through all the lots to process:
Add reserved false lots lot by lot
Check if there are not reserved quants or reserved elsewhere with that lot or without lot (with the traditional method)
"""
quant_obj = self.pool['stock.quant']
fallback_domain = [('reservation_id', '=', False)]
fallback_domain2 = ['&', ('reservation_id', 'not in', [x for x in lot_move_qty.keys()]), ('reservation_id', '!=', False)]
preferred_domain_list = [fallback_domain] + [fallback_domain2]
rounding = ops.product_id.uom_id.rounding
for move in lot_move_qty:
move_quants_dict = {}
move_rec = self.pool['stock.move'].browse(cr, uid, move, context=context)
# Assign quants already reserved with lot to the correct
for quant in quants_taken:
if quant[0] <= move_rec.reserved_quant_ids:
move_quants_dict.setdefault(quant[0].lot_id.id, [])
move_quants_dict[quant[0].lot_id.id] += [quant]
false_quants_move = [x for x in false_quants if x[0].reservation_id.id == move]
for lot in lot_qty:
move_quants_dict.setdefault(lot, [])
redo_false_quants = False
# Take remaining reserved quants with no lot first
# (This will be used mainly when incoming had no lot and you do outgoing with)
while false_quants_move and float_compare(lot_qty[lot], 0, precision_rounding=rounding) > 0 and float_compare(lot_move_qty[move], 0, precision_rounding=rounding) > 0:
qty_min = min(lot_qty[lot], lot_move_qty[move])
if false_quants_move[0].qty > qty_min:
move_quants_dict[lot] += [(false_quants_move[0], qty_min)]
qty = qty_min
redo_false_quants = True
else:
qty = false_quants_move[0].qty
move_quants_dict[lot] += [(false_quants_move[0], qty)]
false_quants_move.pop(0)
lot_qty[lot] -= qty
lot_move_qty[move] -= qty
# Search other with first matching lots and then without lots
if float_compare(lot_move_qty[move], 0, precision_rounding=rounding) > 0 and float_compare(lot_qty[lot], 0, precision_rounding=rounding) > 0:
# Search if we can find quants with that lot
domain = [('qty', '>', 0)]
qty = min(lot_qty[lot], lot_move_qty[move])
quants = quant_obj.quants_get_preferred_domain(cr, uid, qty, move_rec, ops=ops, lot_id=lot, domain=domain,
preferred_domain_list=preferred_domain_list, context=context)
move_quants_dict[lot] += quants
lot_qty[lot] -= qty
lot_move_qty[move] -= qty
#Move all the quants related to that lot/move
if move_quants_dict[lot]:
quant_obj.quants_move(cr, uid, move_quants_dict[lot], move_rec, ops.location_dest_id, location_from=ops.location_id,
lot_id=lot, owner_id=ops.owner_id.id, src_package_id=ops.package_id.id,
dest_package_id=quant_dest_package_id, context=context)
if redo_false_quants:
move_rec = self.pool['stock.move'].browse(cr, uid, move, context=context)
false_quants_move = [x for x in move_rec.reserved_quant_ids if (not x.lot_id) and (x.owner_id.id == ops.owner_id.id) \
and (x.location_id.id == ops.location_id.id) and (x.package_id.id == ops.package_id.id)]
def action_done(self, cr, uid, ids, context=None):
""" Process completely the moves given as ids and if all moves are done, it will finish the picking.
"""
context = context or {}
picking_obj = self.pool.get("stock.picking")
quant_obj = self.pool.get("stock.quant")
uom_obj = self.pool.get("product.uom")
todo = [move.id for move in self.browse(cr, uid, ids, context=context) if move.state == "draft"]
if todo:
ids = self.action_confirm(cr, uid, todo, context=context)
pickings = set()
procurement_ids = set()
#Search operations that are linked to the moves
operations = set()
move_qty = {}
for move in self.browse(cr, uid, ids, context=context):
if move.picking_id:
pickings.add(move.picking_id.id)
move_qty[move.id] = move.product_qty
for link in move.linked_move_operation_ids:
operations.add(link.operation_id)
#Sort operations according to entire packages first, then package + lot, package only, lot only
operations = list(operations)
operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.pack_lot_ids and -1 or 0))
for ops in operations:
if ops.picking_id:
pickings.add(ops.picking_id.id)
entire_pack=False
if ops.product_id:
#If a product is given, the result is always put immediately in the result package (if it is False, they are without package)
quant_dest_package_id = ops.result_package_id.id
else:
# When a pack is moved entirely, the quants should not be written anything for the destination package
quant_dest_package_id = False
entire_pack=True
lot_qty = {}
tot_qty = 0.0
for pack_lot in ops.pack_lot_ids:
qty = uom_obj._compute_qty(cr, uid, ops.product_uom_id.id, pack_lot.qty, ops.product_id.uom_id.id)
lot_qty[pack_lot.lot_id.id] = qty
tot_qty += pack_lot.qty
if ops.pack_lot_ids and ops.product_id and float_compare(tot_qty, ops.product_qty, precision_rounding=ops.product_uom_id.rounding) != 0.0:
raise UserError(_('You have a difference between the quantity on the operation and the quantities specified for the lots. '))
quants_taken = []
false_quants = []
lot_move_qty = {}
#Group links by move first
move_qty_ops = {}
for record in ops.linked_move_operation_ids:
move = record.move_id
if not move_qty_ops.get(move):
move_qty_ops[move] = record.qty
else:
move_qty_ops[move] += record.qty
#Process every move only once for every pack operation
for move in move_qty_ops:
main_domain = [('qty', '>', 0)]
self.check_tracking(cr, uid, move, ops, context=context)
preferred_domain = [('reservation_id', '=', move.id)]
fallback_domain = [('reservation_id', '=', False)]
fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)]
if not ops.pack_lot_ids:
preferred_domain_list = [preferred_domain] + [fallback_domain] + [fallback_domain2]
quants = quant_obj.quants_get_preferred_domain(cr, uid, move_qty_ops[move], move, ops=ops, domain=main_domain,
preferred_domain_list=preferred_domain_list, context=context)
quant_obj.quants_move(cr, uid, quants, move, ops.location_dest_id, location_from=ops.location_id,
lot_id=False, owner_id=ops.owner_id.id, src_package_id=ops.package_id.id,
dest_package_id=quant_dest_package_id, entire_pack=entire_pack, context=context)
else:
# Check what you can do with reserved quants already
qty_on_link = move_qty_ops[move]
rounding = ops.product_id.uom_id.rounding
for reserved_quant in move.reserved_quant_ids:
if (reserved_quant.owner_id.id != ops.owner_id.id) or (reserved_quant.location_id.id != ops.location_id.id) or \
(reserved_quant.package_id.id != ops.package_id.id):
continue
if not reserved_quant.lot_id:
false_quants += [reserved_quant]
elif float_compare(lot_qty.get(reserved_quant.lot_id.id, 0), 0, precision_rounding=rounding) > 0:
if float_compare(lot_qty[reserved_quant.lot_id.id], reserved_quant.qty, precision_rounding=rounding) >= 0:
lot_qty[reserved_quant.lot_id.id] -= reserved_quant.qty
quants_taken += [(reserved_quant, reserved_quant.qty)]
qty_on_link -= reserved_quant.qty
else:
quants_taken += [(reserved_quant, lot_qty[reserved_quant.lot_id.id])]
lot_qty[reserved_quant.lot_id.id] = 0
qty_on_link -= lot_qty[reserved_quant.lot_id.id]
lot_move_qty[move.id] = qty_on_link
if not move_qty.get(move.id):
raise UserError(_("The roundings of your Unit of Measures %s on the move vs. %s on the product don't allow to do these operations or you are not transferring the picking at once. ") % (move.product_uom.name, move.product_id.uom_id.name))
move_qty[move.id] -= move_qty_ops[move]
#Handle lots separately
if ops.pack_lot_ids:
self._move_quants_by_lot(cr, uid, ops, lot_qty, quants_taken, false_quants, lot_move_qty, quant_dest_package_id, context=context)
# Handle pack in pack
if not ops.product_id and ops.package_id and ops.result_package_id.id != ops.package_id.parent_id.id:
self.pool.get('stock.quant.package').write(cr, SUPERUSER_ID, [ops.package_id.id], {'parent_id': ops.result_package_id.id}, context=context)
#Check for remaining qtys and unreserve/check move_dest_id in
move_dest_ids = set()
for move in self.browse(cr, uid, ids, context=context):
move_qty_cmp = float_compare(move_qty[move.id], 0, precision_rounding=move.product_id.uom_id.rounding)
if move_qty_cmp > 0: # (=In case no pack operations in picking)
main_domain = [('qty', '>', 0)]
preferred_domain = [('reservation_id', '=', move.id)]
fallback_domain = [('reservation_id', '=', False)]
fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)]
preferred_domain_list = [preferred_domain] + [fallback_domain] + [fallback_domain2]
self.check_tracking(cr, uid, move, False, context=context)
qty = move_qty[move.id]
quants = quant_obj.quants_get_preferred_domain(cr, uid, qty, move, domain=main_domain, preferred_domain_list=preferred_domain_list, context=context)
quant_obj.quants_move(cr, uid, quants, move, move.location_dest_id, lot_id=move.restrict_lot_id.id, owner_id=move.restrict_partner_id.id, context=context)
# If the move has a destination, add it to the list to reserve
if move.move_dest_id and move.move_dest_id.state in ('waiting', 'confirmed'):
move_dest_ids.add(move.move_dest_id.id)
if move.procurement_id:
procurement_ids.add(move.procurement_id.id)
#unreserve the quants and make them available for other operations/moves
quant_obj.quants_unreserve(cr, uid, move, context=context)
# Check the packages have been placed in the correct locations
self._check_package_from_moves(cr, uid, ids, context=context)
#set the move as done
self.write(cr, uid, ids, {'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
self.pool.get('procurement.order').check(cr, uid, list(procurement_ids), context=context)
#assign destination moves
if move_dest_ids:
self.action_assign(cr, uid, list(move_dest_ids), context=context)
#check picking state to set the date_done is needed
done_picking = []
for picking in picking_obj.browse(cr, uid, list(pickings), context=context):
if picking.state == 'done' and not picking.date_done:
done_picking.append(picking.id)
if done_picking:
picking_obj.write(cr, uid, done_picking, {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
context = context or {}
for move in self.browse(cr, uid, ids, context=context):
if move.state not in ('draft', 'cancel'):
raise UserError(_('You can only delete draft moves.'))
return super(stock_move, self).unlink(cr, uid, ids, context=context)
def action_scrap(self, cr, uid, ids, quantity, location_id, restrict_lot_id=False, restrict_partner_id=False, context=None):
""" Move the scrap/damaged product into scrap location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be scrapped
@param quantity : specify scrap qty
@param location_id : specify scrap location
@param context: context arguments
@return: Scraped lines
"""
quant_obj = self.pool.get("stock.quant")
#quantity should be given in MOVE UOM
if quantity <= 0:
raise UserError(_('Please provide a positive quantity to scrap.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
source_location = move.location_id
if move.state == 'done':
source_location = move.location_dest_id
#Previously used to prevent scraping from virtual location but not necessary anymore
#if source_location.usage != 'internal':
#restrict to scrap from a virtual location because it's meaningless and it may introduce errors in stock ('creating' new products from nowhere)
#raise UserError(_('Forbidden operation: it is not allowed to scrap products from a virtual location.'))
move_qty = move.product_qty
default_val = {
'location_id': source_location.id,
'product_uom_qty': quantity,
'state': move.state,
'scrapped': True,
'location_dest_id': location_id,
'restrict_lot_id': restrict_lot_id,
'restrict_partner_id': restrict_partner_id,
}
new_move = self.copy(cr, uid, move.id, default_val)
res += [new_move]
product_obj = self.pool.get('product.product')
for product in product_obj.browse(cr, uid, [move.product_id.id], context=context):
if move.picking_id:
uom = product.uom_id.name if product.uom_id else ''
message = _("%s %s %s has been <b>moved to</b> scrap.") % (quantity, uom, product.name)
move.picking_id.message_post(body=message)
# We "flag" the quant from which we want to scrap the products. To do so:
# - we select the quants related to the move we scrap from
# - we reserve the quants with the scrapped move
# See self.action_done, et particularly how is defined the "preferred_domain" for clarification
scrap_move = self.browse(cr, uid, new_move, context=context)
if move.state == 'done' and scrap_move.location_id.usage not in ('supplier', 'inventory', 'production'):
domain = [('qty', '>', 0), ('history_ids', 'in', [move.id])]
# We use scrap_move data since a reservation makes sense for a move not already done
quants = quant_obj.quants_get_preferred_domain(cr, uid, quantity, scrap_move, domain=domain, context=context)
quant_obj.quants_reserve(cr, uid, quants, scrap_move, context=context)
self.action_done(cr, uid, res, context=context)
return res
def split(self, cr, uid, move, qty, restrict_lot_id=False, restrict_partner_id=False, context=None):
""" Splits qty from move move into a new move
:param move: browse record
:param qty: float. quantity to split (given in product UoM)
:param restrict_lot_id: optional production lot that can be given in order to force the new move to restrict its choice of quants to this lot.
:param restrict_partner_id: optional partner that can be given in order to force the new move to restrict its choice of quants to the ones belonging to this partner.
:param context: dictionay. can contains the special key 'source_location_id' in order to force the source location when copying the move
returns the ID of the backorder move created
"""
if move.state in ('done', 'cancel'):
raise UserError(_('You cannot split a move done'))
if move.state == 'draft':
#we restrict the split of a draft move because if not confirmed yet, it may be replaced by several other moves in
#case of phantom bom (with mrp module). And we don't want to deal with this complexity by copying the product that will explode.
raise UserError(_('You cannot split a draft move. It needs to be confirmed first.'))
if move.product_qty <= qty or qty == 0:
return move.id
uom_obj = self.pool.get('product.uom')
context = context or {}
#HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM
uom_qty = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, qty, move.product_uom, rounding_method='HALF-UP', context=context)
defaults = {
'product_uom_qty': uom_qty,
'procure_method': 'make_to_stock',
'restrict_lot_id': restrict_lot_id,
'split_from': move.id,
'procurement_id': move.procurement_id.id,
'move_dest_id': move.move_dest_id.id,
'origin_returned_move_id': move.origin_returned_move_id.id,
}
if restrict_partner_id:
defaults['restrict_partner_id'] = restrict_partner_id
if context.get('source_location_id'):
defaults['location_id'] = context['source_location_id']
new_move = self.copy(cr, uid, move.id, defaults, context=context)
ctx = context.copy()
ctx['do_not_propagate'] = True
self.write(cr, uid, [move.id], {
'product_uom_qty': move.product_uom_qty - uom_qty,
}, context=ctx)
if move.move_dest_id and move.propagate and move.move_dest_id.state not in ('done', 'cancel'):
new_move_prop = self.split(cr, uid, move.move_dest_id, qty, context=context)
self.write(cr, uid, [new_move], {'move_dest_id': new_move_prop}, context=context)
#returning the first element of list returned by action_confirm is ok because we checked it wouldn't be exploded (and
#thus the result of action_confirm should always be a list of 1 element length)
return self.action_confirm(cr, uid, [new_move], context=context)[0]
def get_code_from_locs(self, cr, uid, move, location_id=False, location_dest_id=False, context=None):
"""
Returns the code the picking type should have. This can easily be used
to check if a move is internal or not
move, location_id and location_dest_id are browse records
"""
code = 'internal'
src_loc = location_id or move.location_id
dest_loc = location_dest_id or move.location_dest_id
if src_loc.usage == 'internal' and dest_loc.usage != 'internal':
code = 'outgoing'
if src_loc.usage != 'internal' and dest_loc.usage == 'internal':
code = 'incoming'
return code
def show_picking(self, cr, uid, ids, context=None):
assert len(ids) > 0
picking_id = self.browse(cr, uid, ids[0], context=context).picking_id.id
if picking_id:
data_obj = self.pool['ir.model.data']
view = data_obj.xmlid_to_res_id(cr, uid, 'stock.view_picking_form')
return {
'name': _('Transfer'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.picking',
'views': [(view, 'form')],
'view_id': view,
'target': 'new',
'res_id': picking_id,
}
class stock_inventory(osv.osv):
_name = "stock.inventory"
_description = "Inventory"
def _get_move_ids_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = False
if inv.move_ids:
res[inv.id] = True
return res
def _get_available_filters(self, cr, uid, context=None):
"""
This function will return the list of filter allowed according to the options checked
in 'Settings\Warehouse'.
:rtype: list of tuple
"""
#default available choices
res_filter = [('none', _('All products')), ('partial', _('Select products manually')), ('product', _('One product only'))]
if self.pool.get('res.users').has_group(cr, uid, 'stock.group_tracking_owner'):
res_filter.append(('owner', _('One owner only')))
res_filter.append(('product_owner', _('One product for a specific owner')))
if self.pool.get('res.users').has_group(cr, uid, 'stock.group_production_lot'):
res_filter.append(('lot', _('One Lot/Serial Number')))
if self.pool.get('res.users').has_group(cr, uid, 'stock.group_tracking_lot'):
res_filter.append(('pack', _('A Pack')))
return res_filter
def _get_total_qty(self, cr, uid, ids, field_name, args, context=None):
res = {}
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = sum([x.product_qty for x in inv.line_ids])
return res
INVENTORY_STATE_SELECTION = [
('draft', 'Draft'),
('cancel', 'Cancelled'),
('confirm', 'In Progress'),
('done', 'Validated'),
]
_columns = {
'name': fields.char('Inventory Reference', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="Inventory Name."),
'date': fields.datetime('Inventory Date', required=True, readonly=True, help="The date that will be used for the stock level check of the products and the validation of the stock move related to this inventory."),
'line_ids': fields.one2many('stock.inventory.line', 'inventory_id', 'Inventories', readonly=False, states={'done': [('readonly', True)]}, help="Inventory Lines.", copy=True),
'move_ids': fields.one2many('stock.move', 'inventory_id', 'Created Moves', help="Inventory Moves.", states={'done': [('readonly', True)]}),
'state': fields.selection(INVENTORY_STATE_SELECTION, 'Status', readonly=True, select=True, copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, readonly=True, states={'draft': [('readonly', False)]}),
'location_id': fields.many2one('stock.location', 'Inventoried Location', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_id': fields.many2one('product.product', 'Inventoried Product', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Product to focus your inventory on a particular Product."),
'package_id': fields.many2one('stock.quant.package', 'Inventoried Pack', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Pack to focus your inventory on a particular Pack."),
'partner_id': fields.many2one('res.partner', 'Inventoried Owner', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Owner to focus your inventory on a particular Owner."),
'lot_id': fields.many2one('stock.production.lot', 'Inventoried Lot/Serial Number', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Lot/Serial Number to focus your inventory on a particular Lot/Serial Number.", copy=False),
# technical field for attrs in view
'move_ids_exist': fields.function(_get_move_ids_exist, type='boolean', string='Has Stock Moves', help='Check the existance of stock moves linked to this inventory'),
'filter': fields.selection(_get_available_filters, 'Inventory of', required=True,
help="If you do an entire inventory, you can choose 'All Products' and it will prefill the inventory with the current stock. If you only do some products "\
"(e.g. Cycle Counting) you can choose 'Manual Selection of Products' and the system won't propose anything. You can also let the "\
"system propose for a single product / lot /... "),
'total_qty': fields.function(_get_total_qty, type="float"),
}
def _default_stock_location(self, cr, uid, context=None):
try:
warehouse = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'warehouse0')
return warehouse.lot_stock_id.id
except:
return False
_defaults = {
'date': fields.datetime.now,
'state': 'draft',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'location_id': _default_stock_location,
'filter': 'none',
}
@api.onchange('location_id')
def onchange_location_id(self):
if self.location_id.company_id:
self.company_id = self.location_id.company_id
def reset_real_qty(self, cr, uid, ids, context=None):
inventory = self.browse(cr, uid, ids[0], context=context)
line_ids = [line.id for line in inventory.line_ids]
self.pool.get('stock.inventory.line').write(cr, uid, line_ids, {'product_qty': 0})
return True
def action_done(self, cr, uid, ids, context=None):
""" Finish the inventory
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
for inventory_line in inv.line_ids:
if inventory_line.product_qty < 0 and inventory_line.product_qty != inventory_line.theoretical_qty:
raise UserError(_('You cannot set a negative product quantity in an inventory line:\n\t%s - qty: %s') % (inventory_line.product_id.name, inventory_line.product_qty))
self.action_check(cr, uid, [inv.id], context=context)
self.write(cr, uid, [inv.id], {'state': 'done'}, context=context)
self.post_inventory(cr, uid, inv, context=context)
return True
def post_inventory(self, cr, uid, inv, context=None):
#The inventory is posted as a single step which means quants cannot be moved from an internal location to another using an inventory
#as they will be moved to inventory loss, and other quants will be created to the encoded quant location. This is a normal behavior
#as quants cannot be reuse from inventory location (users can still manually move the products before/after the inventory if they want).
move_obj = self.pool.get('stock.move')
move_obj.action_done(cr, uid, [x.id for x in inv.move_ids if x.state != 'done'], context=context)
def action_check(self, cr, uid, ids, context=None):
""" Checks the inventory and computes the stock move to do
@return: True
"""
inventory_line_obj = self.pool.get('stock.inventory.line')
stock_move_obj = self.pool.get('stock.move')
for inventory in self.browse(cr, uid, ids, context=context):
#first remove the existing stock moves linked to this inventory
move_ids = [move.id for move in inventory.move_ids]
stock_move_obj.unlink(cr, uid, move_ids, context=context)
for line in inventory.line_ids:
#compare the checked quantities on inventory lines to the theorical one
stock_move = inventory_line_obj._resolve_inventory_line(cr, uid, line, context=context)
def action_cancel_draft(self, cr, uid, ids, context=None):
""" Cancels the stock move and change inventory state to draft.
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [inv.id], {'line_ids': [(5,)]}, context=context)
self.pool.get('stock.move').action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state': 'draft'}, context=context)
return True
def action_cancel_inventory(self, cr, uid, ids, context=None):
self.action_cancel_draft(cr, uid, ids, context=context)
def prepare_inventory(self, cr, uid, ids, context=None):
inventory_line_obj = self.pool.get('stock.inventory.line')
for inventory in self.browse(cr, uid, ids, context=context):
# If there are inventory lines already (e.g. from import), respect those and set their theoretical qty
line_ids = [line.id for line in inventory.line_ids]
if not line_ids and inventory.filter != 'partial':
#compute the inventory lines and create them
vals = self._get_inventory_lines(cr, uid, inventory, context=context)
for product_line in vals:
inventory_line_obj.create(cr, uid, product_line, context=context)
return self.write(cr, uid, ids, {'state': 'confirm', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
def _get_inventory_lines(self, cr, uid, inventory, context=None):
location_obj = self.pool.get('stock.location')
product_obj = self.pool.get('product.product')
location_ids = location_obj.search(cr, uid, [('id', 'child_of', [inventory.location_id.id])], context=context)
domain = ' location_id in %s'
args = (tuple(location_ids),)
if inventory.company_id.id:
domain += ' and company_id = %s'
args += (inventory.company_id.id,)
if inventory.partner_id:
domain += ' and owner_id = %s'
args += (inventory.partner_id.id,)
if inventory.lot_id:
domain += ' and lot_id = %s'
args += (inventory.lot_id.id,)
if inventory.product_id:
domain += ' and product_id = %s'
args += (inventory.product_id.id,)
if inventory.package_id:
domain += ' and package_id = %s'
args += (inventory.package_id.id,)
cr.execute('''
SELECT product_id, sum(qty) as product_qty, location_id, lot_id as prod_lot_id, package_id, owner_id as partner_id
FROM stock_quant WHERE''' + domain + '''
GROUP BY product_id, location_id, lot_id, package_id, partner_id
''', args)
vals = []
for product_line in cr.dictfetchall():
#replace the None the dictionary by False, because falsy values are tested later on
for key, value in product_line.items():
if not value:
product_line[key] = False
product_line['inventory_id'] = inventory.id
product_line['theoretical_qty'] = product_line['product_qty']
if product_line['product_id']:
product = product_obj.browse(cr, uid, product_line['product_id'], context=context)
product_line['product_uom_id'] = product.uom_id.id
vals.append(product_line)
return vals
def _check_filter_product(self, cr, uid, ids, context=None):
for inventory in self.browse(cr, uid, ids, context=context):
if inventory.filter == 'none' and inventory.product_id and inventory.location_id and inventory.lot_id:
return True
if inventory.filter not in ('product', 'product_owner') and inventory.product_id:
return False
if inventory.filter != 'lot' and inventory.lot_id:
return False
if inventory.filter not in ('owner', 'product_owner') and inventory.partner_id:
return False
if inventory.filter != 'pack' and inventory.package_id:
return False
return True
def onchange_filter(self, cr, uid, ids, filter, context=None):
to_clean = { 'value': {} }
if filter not in ('product', 'product_owner'):
to_clean['value']['product_id'] = False
if filter != 'lot':
to_clean['value']['lot_id'] = False
if filter not in ('owner', 'product_owner'):
to_clean['value']['partner_id'] = False
if filter != 'pack':
to_clean['value']['package_id'] = False
return to_clean
_constraints = [
(_check_filter_product, 'The selected inventory options are not coherent.',
['filter', 'product_id', 'lot_id', 'partner_id', 'package_id']),
]
class stock_inventory_line(osv.osv):
_name = "stock.inventory.line"
_description = "Inventory Line"
_order = "inventory_id, location_name, product_code, product_name, prodlot_name"
def _get_product_name_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('product_id', 'in', ids)], context=context)
def _get_location_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('location_id', 'in', ids)], context=context)
def _get_prodlot_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('prod_lot_id', 'in', ids)], context=context)
def _get_theoretical_qty(self, cr, uid, ids, name, args, context=None):
res = {}
quant_obj = self.pool["stock.quant"]
uom_obj = self.pool["product.uom"]
for line in self.browse(cr, uid, ids, context=context):
quant_ids = self._get_quants(cr, uid, line, context=context)
quants = quant_obj.browse(cr, uid, quant_ids, context=context)
tot_qty = sum([x.qty for x in quants])
if line.product_uom_id and line.product_id.uom_id.id != line.product_uom_id.id:
tot_qty = uom_obj._compute_qty_obj(cr, uid, line.product_id.uom_id, tot_qty, line.product_uom_id, context=context)
res[line.id] = tot_qty
return res
_columns = {
'inventory_id': fields.many2one('stock.inventory', 'Inventory', ondelete='cascade', select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True, select=True),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'package_id': fields.many2one('stock.quant.package', 'Pack', select=True),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_qty': fields.float('Checked Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
'company_id': fields.related('inventory_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, select=True, readonly=True),
'prod_lot_id': fields.many2one('stock.production.lot', 'Serial Number', domain="[('product_id','=',product_id)]"),
'state': fields.related('inventory_id', 'state', type='char', string='Status', readonly=True),
'theoretical_qty': fields.function(_get_theoretical_qty, type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
store={'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id', 'product_id', 'package_id', 'product_uom_id', 'company_id', 'prod_lot_id', 'partner_id'], 20),},
readonly=True, string="Theoretical Quantity"),
'partner_id': fields.many2one('res.partner', 'Owner'),
'product_name': fields.related('product_id', 'name', type='char', string='Product Name', store={
'product.product': (_get_product_name_change, ['name', 'default_code'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}),
'product_code': fields.related('product_id', 'default_code', type='char', string='Product Code', store={
'product.product': (_get_product_name_change, ['name', 'default_code'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}),
'location_name': fields.related('location_id', 'complete_name', type='char', string='Location Name', store={
'stock.location': (_get_location_change, ['name', 'location_id', 'active'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id'], 20),}),
'prodlot_name': fields.related('prod_lot_id', 'name', type='char', string='Serial Number Name', store={
'stock.production.lot': (_get_prodlot_change, ['name'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['prod_lot_id'], 20),}),
}
_defaults = {
'product_qty': 0,
'product_uom_id': lambda self, cr, uid, ctx=None: self.pool['ir.model.data'].get_object_reference(cr, uid, 'product', 'product_uom_unit')[1]
}
def create(self, cr, uid, values, context=None):
product_obj = self.pool.get('product.product')
dom = [('product_id', '=', values.get('product_id')), ('inventory_id.state', '=', 'confirm'),
('location_id', '=', values.get('location_id')), ('partner_id', '=', values.get('partner_id')),
('package_id', '=', values.get('package_id')), ('prod_lot_id', '=', values.get('prod_lot_id'))]
res = self.search(cr, uid, dom, context=context)
if res:
location = self.pool['stock.location'].browse(cr, uid, values.get('location_id'), context=context)
product = product_obj.browse(cr, uid, values.get('product_id'), context=context)
raise UserError(_("You cannot have two inventory adjustements in state 'in Progess' with the same product(%s), same location(%s), same package, same owner and same lot. Please first validate the first inventory adjustement with this product before creating another one.") % (product.name, location.name))
if 'product_id' in values and not 'product_uom_id' in values:
values['product_uom_id'] = product_obj.browse(cr, uid, values.get('product_id'), context=context).uom_id.id
return super(stock_inventory_line, self).create(cr, uid, values, context=context)
def _get_quants(self, cr, uid, line, context=None):
quant_obj = self.pool["stock.quant"]
dom = [('company_id', '=', line.company_id.id), ('location_id', '=', line.location_id.id), ('lot_id', '=', line.prod_lot_id.id),
('product_id','=', line.product_id.id), ('owner_id', '=', line.partner_id.id), ('package_id', '=', line.package_id.id)]
quants = quant_obj.search(cr, uid, dom, context=context)
return quants
def onchange_createline(self, cr, uid, ids, location_id=False, product_id=False, uom_id=False, package_id=False, prod_lot_id=False, partner_id=False, company_id=False, context=None):
quant_obj = self.pool["stock.quant"]
uom_obj = self.pool["product.uom"]
res = {'value': {}}
# If no UoM already put the default UoM of the product
if product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool['product.uom'].browse(cr, uid, uom_id, context=context)
if product.uom_id.category_id.id != uom.category_id.id:
res['value']['product_uom_id'] = product.uom_id.id
res['domain'] = {'product_uom_id': [('category_id','=',product.uom_id.category_id.id)]}
uom_id = product.uom_id.id
# Calculate theoretical quantity by searching the quants as in quants_get
if product_id and location_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
if not company_id:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
dom = [('company_id', '=', company_id), ('location_id', '=', location_id), ('lot_id', '=', prod_lot_id),
('product_id','=', product_id), ('owner_id', '=', partner_id), ('package_id', '=', package_id)]
quants = quant_obj.search(cr, uid, dom, context=context)
th_qty = sum([x.qty for x in quant_obj.browse(cr, uid, quants, context=context)])
if product_id and uom_id and product.uom_id.id != uom_id:
th_qty = uom_obj._compute_qty(cr, uid, product.uom_id.id, th_qty, uom_id)
res['value']['theoretical_qty'] = th_qty
res['value']['product_qty'] = th_qty
return res
def _resolve_inventory_line(self, cr, uid, inventory_line, context=None):
stock_move_obj = self.pool.get('stock.move')
quant_obj = self.pool.get('stock.quant')
diff = inventory_line.theoretical_qty - inventory_line.product_qty
if not diff:
return
#each theorical_lines where difference between theoretical and checked quantities is not 0 is a line for which we need to create a stock move
vals = {
'name': _('INV:') + (inventory_line.inventory_id.name or ''),
'product_id': inventory_line.product_id.id,
'product_uom': inventory_line.product_uom_id.id,
'date': inventory_line.inventory_id.date,
'company_id': inventory_line.inventory_id.company_id.id,
'inventory_id': inventory_line.inventory_id.id,
'state': 'confirmed',
'restrict_lot_id': inventory_line.prod_lot_id.id,
'restrict_partner_id': inventory_line.partner_id.id,
}
inventory_location_id = inventory_line.product_id.property_stock_inventory.id
if diff < 0:
#found more than expected
vals['location_id'] = inventory_location_id
vals['location_dest_id'] = inventory_line.location_id.id
vals['product_uom_qty'] = -diff
else:
#found less than expected
vals['location_id'] = inventory_line.location_id.id
vals['location_dest_id'] = inventory_location_id
vals['product_uom_qty'] = diff
move_id = stock_move_obj.create(cr, uid, vals, context=context)
move = stock_move_obj.browse(cr, uid, move_id, context=context)
if diff > 0:
domain = [('qty', '>', 0.0), ('package_id', '=', inventory_line.package_id.id), ('lot_id', '=', inventory_line.prod_lot_id.id), ('location_id', '=', inventory_line.location_id.id)]
preferred_domain_list = [[('reservation_id', '=', False)], [('reservation_id.inventory_id', '!=', inventory_line.inventory_id.id)]]
quants = quant_obj.quants_get_preferred_domain(cr, uid, move.product_qty, move, domain=domain, preferred_domain_list=preferred_domain_list)
quant_obj.quants_reserve(cr, uid, quants, move, context=context)
elif inventory_line.package_id:
stock_move_obj.action_done(cr, uid, move_id, context=context)
quants = [x.id for x in move.quant_ids]
quant_obj.write(cr, SUPERUSER_ID, quants, {'package_id': inventory_line.package_id.id}, context=context)
res = quant_obj.search(cr, uid, [('qty', '<', 0.0), ('product_id', '=', move.product_id.id),
('location_id', '=', move.location_dest_id.id), ('package_id', '!=', False)], limit=1, context=context)
if res:
for quant in move.quant_ids:
if quant.location_id.id == move.location_dest_id.id: #To avoid we take a quant that was reconcile already
quant_obj._quant_reconcile_negative(cr, uid, quant, move, context=context)
return move_id
# Should be left out in next version
def restrict_change(self, cr, uid, ids, theoretical_qty, context=None):
return {}
# Should be left out in next version
def on_change_product_id(self, cr, uid, ids, product, uom, theoretical_qty, context=None):
""" Changes UoM
@param location_id: Location id
@param product: Changed product_id
@param uom: UoM product
@return: Dictionary of changed values
"""
if not product:
return {'value': {'product_uom_id': False}}
obj_product = self.pool.get('product.product').browse(cr, uid, product, context=context)
return {'value': {'product_uom_id': uom or obj_product.uom_id.id}}
#----------------------------------------------------------
# Stock Warehouse
#----------------------------------------------------------
class stock_warehouse(osv.osv):
_name = "stock.warehouse"
_description = "Warehouse"
_columns = {
'name': fields.char('Warehouse Name', required=True, select=True),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, select=True),
'partner_id': fields.many2one('res.partner', 'Address'),
'view_location_id': fields.many2one('stock.location', 'View Location', required=True, domain=[('usage', '=', 'view')]),
'lot_stock_id': fields.many2one('stock.location', 'Location Stock', domain=[('usage', '=', 'internal')], required=True),
'code': fields.char('Short Name', size=5, required=True, help="Short name used to identify your warehouse"),
'route_ids': fields.many2many('stock.location.route', 'stock_route_warehouse', 'warehouse_id', 'route_id', 'Routes', domain="[('warehouse_selectable', '=', True)]", help='Defaults routes through the warehouse'),
'reception_steps': fields.selection([
('one_step', 'Receive goods directly in stock (1 step)'),
('two_steps', 'Unload in input location then go to stock (2 steps)'),
('three_steps', 'Unload in input location, go through a quality control before being admitted in stock (3 steps)')], 'Incoming Shipments',
help="Default incoming route to follow", required=True),
'delivery_steps': fields.selection([
('ship_only', 'Ship directly from stock (Ship only)'),
('pick_ship', 'Bring goods to output location before shipping (Pick + Ship)'),
('pick_pack_ship', 'Make packages into a dedicated location, then bring them to the output location for shipping (Pick + Pack + Ship)')], 'Outgoing Shippings',
help="Default outgoing route to follow", required=True),
'wh_input_stock_loc_id': fields.many2one('stock.location', 'Input Location'),
'wh_qc_stock_loc_id': fields.many2one('stock.location', 'Quality Control Location'),
'wh_output_stock_loc_id': fields.many2one('stock.location', 'Output Location'),
'wh_pack_stock_loc_id': fields.many2one('stock.location', 'Packing Location'),
'mto_pull_id': fields.many2one('procurement.rule', 'MTO rule'),
'pick_type_id': fields.many2one('stock.picking.type', 'Pick Type'),
'pack_type_id': fields.many2one('stock.picking.type', 'Pack Type'),
'out_type_id': fields.many2one('stock.picking.type', 'Out Type'),
'in_type_id': fields.many2one('stock.picking.type', 'In Type'),
'int_type_id': fields.many2one('stock.picking.type', 'Internal Type'),
'crossdock_route_id': fields.many2one('stock.location.route', 'Crossdock Route'),
'reception_route_id': fields.many2one('stock.location.route', 'Receipt Route'),
'delivery_route_id': fields.many2one('stock.location.route', 'Delivery Route'),
'resupply_from_wh': fields.boolean('Resupply From Other Warehouses', help='Unused field'),
'resupply_wh_ids': fields.many2many('stock.warehouse', 'stock_wh_resupply_table', 'supplied_wh_id', 'supplier_wh_id', 'Resupply Warehouses'),
'resupply_route_ids': fields.one2many('stock.location.route', 'supplied_wh_id', 'Resupply Routes',
help="Routes will be created for these resupply warehouses and you can select them on products and product categories"),
'default_resupply_wh_id': fields.many2one('stock.warehouse', 'Default Resupply Warehouse', help="Goods will always be resupplied from this warehouse"),
}
def onchange_filter_default_resupply_wh_id(self, cr, uid, ids, default_resupply_wh_id, resupply_wh_ids, context=None):
resupply_wh_ids = set([x['id'] for x in (self.resolve_2many_commands(cr, uid, 'resupply_wh_ids', resupply_wh_ids, ['id']))])
if default_resupply_wh_id: #If we are removing the default resupply, we don't have default_resupply_wh_id
resupply_wh_ids.add(default_resupply_wh_id)
resupply_wh_ids = list(resupply_wh_ids)
return {'value': {'resupply_wh_ids': resupply_wh_ids}}
def _get_external_transit_location(self, cr, uid, warehouse, context=None):
''' returns browse record of inter company transit location, if found'''
data_obj = self.pool.get('ir.model.data')
location_obj = self.pool.get('stock.location')
try:
inter_wh_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_inter_wh')[1]
except:
return False
return location_obj.browse(cr, uid, inter_wh_loc, context=context)
def _get_inter_wh_route(self, cr, uid, warehouse, wh, context=None):
return {
'name': _('%s: Supply Product from %s') % (warehouse.name, wh.name),
'warehouse_selectable': False,
'product_selectable': True,
'product_categ_selectable': True,
'supplied_wh_id': warehouse.id,
'supplier_wh_id': wh.id,
}
def _create_resupply_routes(self, cr, uid, warehouse, supplier_warehouses, default_resupply_wh, context=None):
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
#create route selectable on the product to resupply the warehouse from another one
external_transit_location = self._get_external_transit_location(cr, uid, warehouse, context=context)
internal_transit_location = warehouse.company_id.internal_transit_location_id
input_loc = warehouse.wh_input_stock_loc_id
if warehouse.reception_steps == 'one_step':
input_loc = warehouse.lot_stock_id
for wh in supplier_warehouses:
transit_location = wh.company_id.id == warehouse.company_id.id and internal_transit_location or external_transit_location
if transit_location:
output_loc = wh.wh_output_stock_loc_id
if wh.delivery_steps == 'ship_only':
output_loc = wh.lot_stock_id
# Create extra MTO rule (only for 'ship only' because in the other cases MTO rules already exists)
mto_pull_vals = self._get_mto_pull_rule(cr, uid, wh, [(output_loc, transit_location, wh.out_type_id.id)], context=context)[0]
pull_obj.create(cr, uid, mto_pull_vals, context=context)
inter_wh_route_vals = self._get_inter_wh_route(cr, uid, warehouse, wh, context=context)
inter_wh_route_id = route_obj.create(cr, uid, vals=inter_wh_route_vals, context=context)
values = [(output_loc, transit_location, wh.out_type_id.id, wh), (transit_location, input_loc, warehouse.in_type_id.id, warehouse)]
pull_rules_list = self._get_supply_pull_rules(cr, uid, wh.id, values, inter_wh_route_id, context=context)
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#if the warehouse is also set as default resupply method, assign this route automatically to the warehouse
if default_resupply_wh and default_resupply_wh.id == wh.id:
self.write(cr, uid, [warehouse.id, wh.id], {'route_ids': [(4, inter_wh_route_id)]}, context=context)
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'reception_steps': 'one_step',
'delivery_steps': 'ship_only',
}
_sql_constraints = [
('warehouse_name_uniq', 'unique(name, company_id)', 'The name of the warehouse must be unique per company!'),
('warehouse_code_uniq', 'unique(code, company_id)', 'The code of the warehouse must be unique per company!'),
]
def _get_partner_locations(self, cr, uid, ids, context=None):
''' returns a tuple made of the browse record of customer location and the browse record of supplier location'''
data_obj = self.pool.get('ir.model.data')
location_obj = self.pool.get('stock.location')
try:
customer_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_customers')[1]
supplier_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_suppliers')[1]
except:
customer_loc = location_obj.search(cr, uid, [('usage', '=', 'customer')], context=context)
customer_loc = customer_loc and customer_loc[0] or False
supplier_loc = location_obj.search(cr, uid, [('usage', '=', 'supplier')], context=context)
supplier_loc = supplier_loc and supplier_loc[0] or False
if not (customer_loc and supplier_loc):
raise UserError(_('Can\'t find any customer or supplier location.'))
return location_obj.browse(cr, uid, [customer_loc, supplier_loc], context=context)
def _location_used(self, cr, uid, location_id, warehouse, context=None):
pull_obj = self.pool['procurement.rule']
push_obj = self.pool['stock.location.path']
domain = ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]),
'|', ('location_src_id', '=', location_id), # noqa
('location_id', '=', location_id)
]
pulls = pull_obj.search_count(cr, uid, domain, context=context)
domain = ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]),
'|', ('location_from_id', '=', location_id), # noqa
('location_dest_id', '=', location_id)
]
pushs = push_obj.search_count(cr, uid, domain, context=context)
if pulls or pushs:
return True
return False
def switch_location(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
location_obj = self.pool.get('stock.location')
new_reception_step = new_reception_step or warehouse.reception_steps
new_delivery_step = new_delivery_step or warehouse.delivery_steps
if warehouse.reception_steps != new_reception_step:
if not self._location_used(cr, uid, warehouse.wh_input_stock_loc_id.id, warehouse, context=context):
location_obj.write(cr, uid, [warehouse.wh_input_stock_loc_id.id, warehouse.wh_qc_stock_loc_id.id], {'active': False}, context=context)
if new_reception_step != 'one_step':
location_obj.write(cr, uid, warehouse.wh_input_stock_loc_id.id, {'active': True}, context=context)
if new_reception_step == 'three_steps':
location_obj.write(cr, uid, warehouse.wh_qc_stock_loc_id.id, {'active': True}, context=context)
if warehouse.delivery_steps != new_delivery_step:
if not self._location_used(cr, uid, warehouse.wh_output_stock_loc_id.id, warehouse, context=context):
location_obj.write(cr, uid, [warehouse.wh_output_stock_loc_id.id], {'active': False}, context=context)
if not self._location_used(cr, uid, warehouse.wh_pack_stock_loc_id.id, warehouse, context=context):
location_obj.write(cr, uid, [warehouse.wh_pack_stock_loc_id.id], {'active': False}, context=context)
if new_delivery_step != 'ship_only':
location_obj.write(cr, uid, warehouse.wh_output_stock_loc_id.id, {'active': True}, context=context)
if new_delivery_step == 'pick_pack_ship':
location_obj.write(cr, uid, warehouse.wh_pack_stock_loc_id.id, {'active': True}, context=context)
return True
def _get_reception_delivery_route(self, cr, uid, warehouse, route_name, context=None):
return {
'name': self._format_routename(cr, uid, warehouse, route_name, context=context),
'product_categ_selectable': True,
'product_selectable': False,
'sequence': 10,
}
def _get_supply_pull_rules(self, cr, uid, supply_warehouse, values, new_route_id, context=None):
pull_rules_list = []
for from_loc, dest_loc, pick_type_id, warehouse in values:
pull_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': new_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': warehouse.lot_stock_id.id != from_loc.id and 'make_to_order' or 'make_to_stock', # first part of the resuply route is MTS
'warehouse_id': warehouse.id,
'propagate_warehouse_id': supply_warehouse,
})
return pull_rules_list
def _get_push_pull_rules(self, cr, uid, warehouse, active, values, new_route_id, context=None):
first_rule = True
push_rules_list = []
pull_rules_list = []
for from_loc, dest_loc, pick_type_id in values:
push_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_from_id': from_loc.id,
'location_dest_id': dest_loc.id,
'route_id': new_route_id,
'auto': 'manual',
'picking_type_id': pick_type_id,
'active': active,
'warehouse_id': warehouse.id,
})
pull_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': new_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': first_rule is True and 'make_to_stock' or 'make_to_order',
'active': active,
'warehouse_id': warehouse.id,
})
first_rule = False
return push_rules_list, pull_rules_list
def _get_mto_route(self, cr, uid, context=None):
route_obj = self.pool.get('stock.location.route')
data_obj = self.pool.get('ir.model.data')
try:
mto_route_id = data_obj.get_object_reference(cr, uid, 'stock', 'route_warehouse0_mto')[1]
except:
mto_route_id = route_obj.search(cr, uid, [('name', 'like', _('Make To Order'))], context=context)
mto_route_id = mto_route_id and mto_route_id[0] or False
if not mto_route_id:
raise UserError(_('Can\'t find any generic Make To Order route.'))
return mto_route_id
def _check_remove_mto_resupply_rules(self, cr, uid, warehouse, context=None):
""" Checks that the moves from the different """
pull_obj = self.pool.get('procurement.rule')
mto_route_id = self._get_mto_route(cr, uid, context=context)
rules = pull_obj.search(cr, uid, ['&', ('location_src_id', '=', warehouse.lot_stock_id.id), ('location_id.usage', '=', 'transit')], context=context)
pull_obj.unlink(cr, uid, rules, context=context)
def _get_mto_pull_rule(self, cr, uid, warehouse, values, context=None):
mto_route_id = self._get_mto_route(cr, uid, context=context)
res = []
for value in values:
from_loc, dest_loc, pick_type_id = value
res += [{
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context) + _(' MTO'),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': mto_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': 'make_to_order',
'active': True,
'warehouse_id': warehouse.id,
}]
return res
def _get_crossdock_route(self, cr, uid, warehouse, route_name, context=None):
return {
'name': self._format_routename(cr, uid, warehouse, route_name, context=context),
'warehouse_selectable': False,
'product_selectable': True,
'product_categ_selectable': True,
'active': warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step',
'sequence': 20,
}
def create_routes(self, cr, uid, ids, warehouse, context=None):
wh_route_ids = []
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context)
#create reception route and rules
route_name, values = routes_dict[warehouse.reception_steps]
route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context)
reception_route_id = route_obj.create(cr, uid, route_vals, context=context)
wh_route_ids.append((4, reception_route_id))
push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, reception_route_id, context=context)
#create the push/procurement rules
for push_rule in push_rules_list:
push_obj.create(cr, uid, vals=push_rule, context=context)
for pull_rule in pull_rules_list:
#all procurement rules in reception route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create MTS route and procurement rules for delivery and a specific route MTO to be set on the product
route_name, values = routes_dict[warehouse.delivery_steps]
route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context)
#create the route and its procurement rules
delivery_route_id = route_obj.create(cr, uid, route_vals, context=context)
wh_route_ids.append((4, delivery_route_id))
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, delivery_route_id, context=context)
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create MTO procurement rule and link it to the generic MTO route
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0]
mto_pull_id = pull_obj.create(cr, uid, mto_pull_vals, context=context)
#create a route for cross dock operations, that can be set on products and product categories
route_name, values = routes_dict['crossdock']
crossdock_route_vals = self._get_crossdock_route(cr, uid, warehouse, route_name, context=context)
crossdock_route_id = route_obj.create(cr, uid, vals=crossdock_route_vals, context=context)
wh_route_ids.append((4, crossdock_route_id))
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step', values, crossdock_route_id, context=context)
for pull_rule in pull_rules_list:
# Fixed cross-dock is logically mto
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create route selectable on the product to resupply the warehouse from another one
self._create_resupply_routes(cr, uid, warehouse, warehouse.resupply_wh_ids, warehouse.default_resupply_wh_id, context=context)
#return routes and mto procurement rule to store on the warehouse
return {
'route_ids': wh_route_ids,
'mto_pull_id': mto_pull_id,
'reception_route_id': reception_route_id,
'delivery_route_id': delivery_route_id,
'crossdock_route_id': crossdock_route_id,
}
def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
picking_type_obj = self.pool.get('stock.picking.type')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
route_obj = self.pool.get('stock.location.route')
new_reception_step = new_reception_step or warehouse.reception_steps
new_delivery_step = new_delivery_step or warehouse.delivery_steps
#change the default source and destination location and (de)activate picking types
input_loc = warehouse.wh_input_stock_loc_id
if new_reception_step == 'one_step':
input_loc = warehouse.lot_stock_id
output_loc = warehouse.wh_output_stock_loc_id
if new_delivery_step == 'ship_only':
output_loc = warehouse.lot_stock_id
picking_type_obj.write(cr, uid, warehouse.in_type_id.id, {'default_location_dest_id': input_loc.id}, context=context)
picking_type_obj.write(cr, uid, warehouse.out_type_id.id, {'default_location_src_id': output_loc.id}, context=context)
picking_type_obj.write(cr, uid, warehouse.pick_type_id.id, {
'active': new_delivery_step != 'ship_only',
'default_location_dest_id': output_loc.id if new_delivery_step == 'pick_ship' else warehouse.wh_pack_stock_loc_id.id,
}, context=context)
picking_type_obj.write(cr, uid, warehouse.pack_type_id.id, {'active': new_delivery_step == 'pick_pack_ship'}, context=context)
routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context)
#update delivery route and rules: unlink the existing rules of the warehouse delivery route and recreate it
pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.delivery_route_id.pull_ids], context=context)
route_name, values = routes_dict[new_delivery_step]
route_obj.write(cr, uid, warehouse.delivery_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context)
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.delivery_route_id.id, context=context)
#create the procurement rules
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#update receipt route and rules: unlink the existing rules of the warehouse receipt route and recreate it
pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.pull_ids], context=context)
push_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.push_ids], context=context)
route_name, values = routes_dict[new_reception_step]
route_obj.write(cr, uid, warehouse.reception_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context)
push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.reception_route_id.id, context=context)
#create the push/procurement rules
for push_rule in push_rules_list:
push_obj.create(cr, uid, vals=push_rule, context=context)
for pull_rule in pull_rules_list:
#all procurement rules in receipt route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
route_obj.write(cr, uid, warehouse.crossdock_route_id.id, {'active': new_reception_step != 'one_step' and new_delivery_step != 'ship_only'}, context=context)
#change MTO rule
dummy, values = routes_dict[new_delivery_step]
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0]
pull_obj.write(cr, uid, warehouse.mto_pull_id.id, mto_pull_vals, context=context)
return True
def create_sequences_and_picking_types(self, cr, uid, warehouse, context=None):
seq_obj = self.pool.get('ir.sequence')
picking_type_obj = self.pool.get('stock.picking.type')
#create new sequences
in_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence in'), 'prefix': warehouse.code + '/IN/', 'padding': 5}, context=context)
out_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence out'), 'prefix': warehouse.code + '/OUT/', 'padding': 5}, context=context)
pack_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence packing'), 'prefix': warehouse.code + '/PACK/', 'padding': 5}, context=context)
pick_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence picking'), 'prefix': warehouse.code + '/PICK/', 'padding': 5}, context=context)
int_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence internal'), 'prefix': warehouse.code + '/INT/', 'padding': 5}, context=context)
wh_stock_loc = warehouse.lot_stock_id
wh_input_stock_loc = warehouse.wh_input_stock_loc_id
wh_output_stock_loc = warehouse.wh_output_stock_loc_id
wh_pack_stock_loc = warehouse.wh_pack_stock_loc_id
#create in, out, internal picking types for warehouse
input_loc = wh_input_stock_loc
if warehouse.reception_steps == 'one_step':
input_loc = wh_stock_loc
output_loc = wh_output_stock_loc
if warehouse.delivery_steps == 'ship_only':
output_loc = wh_stock_loc
#choose the next available color for the picking types of this warehouse
color = 0
available_colors = [0, 3, 4, 5, 6, 7, 8, 1, 2] # put white color first
all_used_colors = self.pool.get('stock.picking.type').search_read(cr, uid, [('warehouse_id', '!=', False), ('color', '!=', False)], ['color'], order='color')
#don't use sets to preserve the list order
for x in all_used_colors:
if x['color'] in available_colors:
available_colors.remove(x['color'])
if available_colors:
color = available_colors[0]
#order the picking types with a sequence allowing to have the following suit for each warehouse: reception, internal, pick, pack, ship.
max_sequence = self.pool.get('stock.picking.type').search_read(cr, uid, [], ['sequence'], order='sequence desc')
max_sequence = max_sequence and max_sequence[0]['sequence'] or 0
internal_active_false = (warehouse.reception_steps == 'one_step') and (warehouse.delivery_steps == 'ship_only')
internal_active_false = internal_active_false and not self.user_has_groups(cr, uid, 'stock.group_locations')
in_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Receipts'),
'warehouse_id': warehouse.id,
'code': 'incoming',
'use_create_lots': True,
'use_existing_lots': False,
'sequence_id': in_seq_id,
'default_location_src_id': False,
'default_location_dest_id': input_loc.id,
'sequence': max_sequence + 1,
'color': color}, context=context)
out_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Delivery Orders'),
'warehouse_id': warehouse.id,
'code': 'outgoing',
'use_create_lots': False,
'use_existing_lots': True,
'sequence_id': out_seq_id,
'return_picking_type_id': in_type_id,
'default_location_src_id': output_loc.id,
'default_location_dest_id': False,
'sequence': max_sequence + 4,
'color': color}, context=context)
picking_type_obj.write(cr, uid, [in_type_id], {'return_picking_type_id': out_type_id}, context=context)
int_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Internal Transfers'),
'warehouse_id': warehouse.id,
'code': 'internal',
'use_create_lots': False,
'use_existing_lots': True,
'sequence_id': int_seq_id,
'default_location_src_id': wh_stock_loc.id,
'default_location_dest_id': wh_stock_loc.id,
'active': not internal_active_false,
'sequence': max_sequence + 2,
'color': color}, context=context)
pack_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Pack'),
'warehouse_id': warehouse.id,
'code': 'internal',
'use_create_lots': False,
'use_existing_lots': True,
'sequence_id': pack_seq_id,
'default_location_src_id': wh_pack_stock_loc.id,
'default_location_dest_id': output_loc.id,
'active': warehouse.delivery_steps == 'pick_pack_ship',
'sequence': max_sequence + 3,
'color': color}, context=context)
pick_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Pick'),
'warehouse_id': warehouse.id,
'code': 'internal',
'use_create_lots': False,
'use_existing_lots': True,
'sequence_id': pick_seq_id,
'default_location_src_id': wh_stock_loc.id,
'default_location_dest_id': output_loc.id if warehouse.delivery_steps == 'pick_ship' else wh_pack_stock_loc.id,
'active': warehouse.delivery_steps != 'ship_only',
'sequence': max_sequence + 2,
'color': color}, context=context)
#write picking types on WH
vals = {
'in_type_id': in_type_id,
'out_type_id': out_type_id,
'pack_type_id': pack_type_id,
'pick_type_id': pick_type_id,
'int_type_id': int_type_id,
}
super(stock_warehouse, self).write(cr, uid, warehouse.id, vals=vals, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals is None:
vals = {}
data_obj = self.pool.get('ir.model.data')
seq_obj = self.pool.get('ir.sequence')
picking_type_obj = self.pool.get('stock.picking.type')
location_obj = self.pool.get('stock.location')
#create view location for warehouse
loc_vals = {
'name': _(vals.get('code')),
'usage': 'view',
'location_id': data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_locations')[1],
}
if vals.get('company_id'):
loc_vals['company_id'] = vals.get('company_id')
wh_loc_id = location_obj.create(cr, uid, loc_vals, context=context)
vals['view_location_id'] = wh_loc_id
#create all location
def_values = self.default_get(cr, uid, ['reception_steps', 'delivery_steps'])
reception_steps = vals.get('reception_steps', def_values['reception_steps'])
delivery_steps = vals.get('delivery_steps', def_values['delivery_steps'])
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
sub_locations = [
{'name': _('Stock'), 'active': True, 'field': 'lot_stock_id'},
{'name': _('Input'), 'active': reception_steps != 'one_step', 'field': 'wh_input_stock_loc_id'},
{'name': _('Quality Control'), 'active': reception_steps == 'three_steps', 'field': 'wh_qc_stock_loc_id'},
{'name': _('Output'), 'active': delivery_steps != 'ship_only', 'field': 'wh_output_stock_loc_id'},
{'name': _('Packing Zone'), 'active': delivery_steps == 'pick_pack_ship', 'field': 'wh_pack_stock_loc_id'},
]
for values in sub_locations:
loc_vals = {
'name': values['name'],
'usage': 'internal',
'location_id': wh_loc_id,
'active': values['active'],
}
if vals.get('company_id'):
loc_vals['company_id'] = vals.get('company_id')
location_id = location_obj.create(cr, uid, loc_vals, context=context_with_inactive)
vals[values['field']] = location_id
#create WH
new_id = super(stock_warehouse, self).create(cr, uid, vals=vals, context=context)
warehouse = self.browse(cr, uid, new_id, context=context)
self.create_sequences_and_picking_types(cr, uid, warehouse, context=context)
#create routes and push/procurement rules
new_objects_dict = self.create_routes(cr, uid, new_id, warehouse, context=context)
self.write(cr, uid, warehouse.id, new_objects_dict, context=context)
# If partner assigned
if vals.get('partner_id'):
comp_obj = self.pool['res.company']
if vals.get('company_id'):
transit_loc = comp_obj.browse(cr, uid, vals.get('company_id'), context=context).internal_transit_location_id.id
else:
transit_loc = comp_obj.browse(cr, uid, comp_obj._company_default_get(cr, uid, 'stock.warehouse', context=context)).internal_transit_location_id.id
self.pool['res.partner'].write(cr, uid, [vals['partner_id']], {'property_stock_customer': transit_loc,
'property_stock_supplier': transit_loc}, context=context)
return new_id
def _format_rulename(self, cr, uid, obj, from_loc, dest_loc, context=None):
return obj.code + ': ' + from_loc.name + ' -> ' + dest_loc.name
def _format_routename(self, cr, uid, obj, name, context=None):
return obj.name + ': ' + name
def get_routes_dict(self, cr, uid, ids, warehouse, context=None):
#fetch customer and supplier locations, for references
customer_loc, supplier_loc = self._get_partner_locations(cr, uid, ids, context=context)
return {
'one_step': (_('Receipt in 1 step'), []),
'two_steps': (_('Receipt in 2 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]),
'three_steps': (_('Receipt in 3 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_qc_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_qc_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]),
'crossdock': (_('Cross-Dock'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
'ship_only': (_('Ship Only'), [(warehouse.lot_stock_id, customer_loc, warehouse.out_type_id.id)]),
'pick_ship': (_('Pick + Ship'), [(warehouse.lot_stock_id, warehouse.wh_output_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
'pick_pack_ship': (_('Pick + Pack + Ship'), [(warehouse.lot_stock_id, warehouse.wh_pack_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_pack_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.pack_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
}
def _handle_renaming(self, cr, uid, warehouse, name, code, context=None):
location_obj = self.pool.get('stock.location')
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
#rename location
location_id = warehouse.lot_stock_id.location_id.id
location_obj.write(cr, uid, location_id, {'name': code}, context=context)
#rename route and push-procurement rules
for route in warehouse.route_ids:
route_obj.write(cr, uid, route.id, {'name': route.name.replace(warehouse.name, name, 1)}, context=context)
for pull in route.pull_ids:
pull_obj.write(cr, uid, pull.id, {'name': pull.name.replace(warehouse.name, name, 1)}, context=context)
for push in route.push_ids:
push_obj.write(cr, uid, push.id, {'name': push.name.replace(warehouse.name, name, 1)}, context=context)
#change the mto procurement rule name
if warehouse.mto_pull_id.id:
pull_obj.write(cr, uid, warehouse.mto_pull_id.id, {'name': warehouse.mto_pull_id.name.replace(warehouse.name, name, 1)}, context=context)
def _check_delivery_resupply(self, cr, uid, warehouse, new_location, change_to_multiple, context=None):
""" Will check if the resupply routes from this warehouse follow the changes of number of delivery steps """
#Check routes that are being delivered by this warehouse and change the rule going to transit location
route_obj = self.pool.get("stock.location.route")
pull_obj = self.pool.get("procurement.rule")
routes = route_obj.search(cr, uid, [('supplier_wh_id','=', warehouse.id)], context=context)
pulls = pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_id.usage', '=', 'transit')], context=context)
if pulls:
pull_obj.write(cr, uid, pulls, {'location_src_id': new_location, 'procure_method': change_to_multiple and "make_to_order" or "make_to_stock"}, context=context)
# Create or clean MTO rules
mto_route_id = self._get_mto_route(cr, uid, context=context)
if not change_to_multiple:
# If single delivery we should create the necessary MTO rules for the resupply
# pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context)
pull_recs = pull_obj.browse(cr, uid, pulls, context=context)
transfer_locs = list(set([x.location_id for x in pull_recs]))
vals = [(warehouse.lot_stock_id , x, warehouse.out_type_id.id) for x in transfer_locs]
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, vals, context=context)
for mto_pull_val in mto_pull_vals:
pull_obj.create(cr, uid, mto_pull_val, context=context)
else:
# We need to delete all the MTO procurement rules, otherwise they risk to be used in the system
pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context)
if pulls:
pull_obj.unlink(cr, uid, pulls, context=context)
def _check_reception_resupply(self, cr, uid, warehouse, new_location, context=None):
"""
Will check if the resupply routes to this warehouse follow the changes of number of receipt steps
"""
#Check routes that are being delivered by this warehouse and change the rule coming from transit location
route_obj = self.pool.get("stock.location.route")
pull_obj = self.pool.get("procurement.rule")
routes = route_obj.search(cr, uid, [('supplied_wh_id','=', warehouse.id)], context=context)
pulls= pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_src_id.usage', '=', 'transit')])
if pulls:
pull_obj.write(cr, uid, pulls, {'location_id': new_location}, context=context)
def _check_resupply(self, cr, uid, warehouse, reception_new, delivery_new, context=None):
if reception_new:
old_val = warehouse.reception_steps
new_val = reception_new
change_to_one = (old_val != 'one_step' and new_val == 'one_step')
change_to_multiple = (old_val == 'one_step' and new_val != 'one_step')
if change_to_one or change_to_multiple:
new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_input_stock_loc_id.id
self._check_reception_resupply(cr, uid, warehouse, new_location, context=context)
if delivery_new:
old_val = warehouse.delivery_steps
new_val = delivery_new
change_to_one = (old_val != 'ship_only' and new_val == 'ship_only')
change_to_multiple = (old_val == 'ship_only' and new_val != 'ship_only')
if change_to_one or change_to_multiple:
new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_output_stock_loc_id.id
self._check_delivery_resupply(cr, uid, warehouse, new_location, change_to_multiple, context=context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
seq_obj = self.pool.get('ir.sequence')
route_obj = self.pool.get('stock.location.route')
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
for warehouse in self.browse(cr, uid, ids, context=context_with_inactive):
#first of all, check if we need to delete and recreate route
if vals.get('reception_steps') or vals.get('delivery_steps'):
#activate and deactivate location according to reception and delivery option
self.switch_location(cr, uid, warehouse.id, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context)
# switch between route
self.change_route(cr, uid, ids, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context_with_inactive)
# Check if we need to change something to resupply warehouses and associated MTO rules
self._check_resupply(cr, uid, warehouse, vals.get('reception_steps'), vals.get('delivery_steps'), context=context)
if vals.get('code') or vals.get('name'):
name = warehouse.name
#rename sequence
if vals.get('name'):
name = vals.get('name', warehouse.name)
self._handle_renaming(cr, uid, warehouse, name, vals.get('code', warehouse.code), context=context_with_inactive)
if warehouse.in_type_id:
seq_obj.write(cr, uid, warehouse.in_type_id.sequence_id.id, {'name': name + _(' Sequence in'), 'prefix': vals.get('code', warehouse.code) + '/IN/'}, context=context)
if warehouse.out_type_id:
seq_obj.write(cr, uid, warehouse.out_type_id.sequence_id.id, {'name': name + _(' Sequence out'), 'prefix': vals.get('code', warehouse.code) + '/OUT/'}, context=context)
if warehouse.pack_type_id:
seq_obj.write(cr, uid, warehouse.pack_type_id.sequence_id.id, {'name': name + _(' Sequence packing'), 'prefix': vals.get('code', warehouse.code) + '/PACK/'}, context=context)
if warehouse.pick_type_id:
seq_obj.write(cr, uid, warehouse.pick_type_id.sequence_id.id, {'name': name + _(' Sequence picking'), 'prefix': vals.get('code', warehouse.code) + '/PICK/'}, context=context)
if warehouse.int_type_id:
seq_obj.write(cr, uid, warehouse.int_type_id.sequence_id.id, {'name': name + _(' Sequence internal'), 'prefix': vals.get('code', warehouse.code) + '/INT/'}, context=context)
if vals.get('resupply_wh_ids') and not vals.get('resupply_route_ids'):
for cmd in vals.get('resupply_wh_ids'):
if cmd[0] == 6:
new_ids = set(cmd[2])
old_ids = set([wh.id for wh in warehouse.resupply_wh_ids])
to_add_wh_ids = new_ids - old_ids
if to_add_wh_ids:
supplier_warehouses = self.browse(cr, uid, list(to_add_wh_ids), context=context)
self._create_resupply_routes(cr, uid, warehouse, supplier_warehouses, warehouse.default_resupply_wh_id, context=context)
to_remove_wh_ids = old_ids - new_ids
if to_remove_wh_ids:
to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', 'in', list(to_remove_wh_ids))], context=context)
if to_remove_route_ids:
route_obj.unlink(cr, uid, to_remove_route_ids, context=context)
else:
#not implemented
pass
if 'default_resupply_wh_id' in vals:
if vals.get('default_resupply_wh_id') == warehouse.id:
raise UserError(_('The default resupply warehouse should be different than the warehouse itself!'))
if warehouse.default_resupply_wh_id:
#remove the existing resupplying route on the warehouse
to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', warehouse.default_resupply_wh_id.id)], context=context)
for inter_wh_route_id in to_remove_route_ids:
self.write(cr, uid, [warehouse.id], {'route_ids': [(3, inter_wh_route_id)]})
if vals.get('default_resupply_wh_id'):
#assign the new resupplying route on all products
to_assign_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', vals.get('default_resupply_wh_id'))], context=context)
for inter_wh_route_id in to_assign_route_ids:
self.write(cr, uid, [warehouse.id], {'route_ids': [(4, inter_wh_route_id)]})
# If another partner assigned
if vals.get('partner_id'):
if not vals.get('company_id'):
company = self.browse(cr, uid, ids[0], context=context).company_id
else:
company = self.pool['res.company'].browse(cr, uid, vals['company_id'])
transit_loc = company.internal_transit_location_id.id
self.pool['res.partner'].write(cr, uid, [vals['partner_id']], {'property_stock_customer': transit_loc,
'property_stock_supplier': transit_loc}, context=context)
return super(stock_warehouse, self).write(cr, uid, ids, vals=vals, context=context)
def get_all_routes_for_wh(self, cr, uid, warehouse, context=None):
route_obj = self.pool.get("stock.location.route")
all_routes = [route.id for route in warehouse.route_ids]
all_routes += route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id)], context=context)
all_routes += [warehouse.mto_pull_id.route_id.id]
return all_routes
def view_all_routes_for_wh(self, cr, uid, ids, context=None):
all_routes = []
for wh in self.browse(cr, uid, ids, context=context):
all_routes += self.get_all_routes_for_wh(cr, uid, wh, context=context)
domain = [('id', 'in', all_routes)]
return {
'name': _('Warehouse\'s Routes'),
'domain': domain,
'res_model': 'stock.location.route',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'tree,form',
'view_type': 'form',
'limit': 20
}
class stock_location_path(osv.osv):
_name = "stock.location.path"
_description = "Pushed Flows"
_order = "name"
def _get_rules(self, cr, uid, ids, context=None):
res = []
for route in self.browse(cr, uid, ids, context=context):
res += [x.id for x in route.push_ids]
return res
_columns = {
'name': fields.char('Operation Name', required=True),
'company_id': fields.many2one('res.company', 'Company'),
'route_id': fields.many2one('stock.location.route', 'Route'),
'location_from_id': fields.many2one('stock.location', 'Source Location', ondelete='cascade', select=1, required=True,
help="This rule can be applied when a move is confirmed that has this location as destination location"),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', ondelete='cascade', select=1, required=True,
help="The new location where the goods need to go"),
'delay': fields.integer('Delay (days)', help="Number of days needed to transfer the goods"),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', required=True,
help="This is the picking type that will be put on the stock moves"),
'auto': fields.selection(
[('auto','Automatic Move'), ('manual','Manual Operation'),('transparent','Automatic No Step Added')],
'Automatic Move',
required=True, select=1,
help="The 'Automatic Move' / 'Manual Operation' value will create a stock move after the current one. " \
"With 'Automatic No Step Added', the location is replaced in the original move."
),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move is cancelled or split, the move generated by this move will too'),
'active': fields.boolean('Active'),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'),
'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence',
store={
'stock.location.route': (_get_rules, ['sequence'], 10),
'stock.location.path': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10),
}),
'sequence': fields.integer('Sequence'),
}
_defaults = {
'auto': 'auto',
'delay': 0,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'procurement.order', context=c),
'propagate': True,
'active': True,
}
def _prepare_push_apply(self, cr, uid, rule, move, context=None):
newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return {
'origin': move.origin or move.picking_id.name or "/",
'location_id': move.location_dest_id.id,
'location_dest_id': rule.location_dest_id.id,
'date': newdate,
'company_id': rule.company_id and rule.company_id.id or False,
'date_expected': newdate,
'picking_id': False,
'picking_type_id': rule.picking_type_id and rule.picking_type_id.id or False,
'propagate': rule.propagate,
'push_rule_id': rule.id,
'warehouse_id': rule.warehouse_id and rule.warehouse_id.id or False,
'procurement_id': False,
}
def _apply(self, cr, uid, rule, move, context=None):
move_obj = self.pool.get('stock.move')
newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if rule.auto == 'transparent':
old_dest_location = move.location_dest_id.id
move_obj.write(cr, uid, [move.id], {
'date': newdate,
'date_expected': newdate,
'location_dest_id': rule.location_dest_id.id
})
#avoid looping if a push rule is not well configured
if rule.location_dest_id.id != old_dest_location:
#call again push_apply to see if a next step is defined
move_obj._push_apply(cr, uid, [move], context=context)
else:
vals = self._prepare_push_apply(cr, uid, rule, move, context=context)
move_id = move_obj.copy(cr, uid, move.id, vals, context=context)
move_obj.write(cr, uid, [move.id], {
'move_dest_id': move_id,
})
move_obj.action_confirm(cr, uid, [move_id], context=None)
# -------------------------
# Packaging related stuff
# -------------------------
from openerp.report import report_sxw
class stock_package(osv.osv):
"""
These are the packages, containing quants and/or other packages
"""
_name = "stock.quant.package"
_description = "Physical Packages"
_parent_name = "parent_id"
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
def name_get(self, cr, uid, ids, context=None):
res = self._complete_name(cr, uid, ids, 'complete_name', None, context=context)
return res.items()
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = m.name
parent = m.parent_id
while parent:
res[m.id] = parent.name + ' / ' + res[m.id]
parent = parent.parent_id
return res
def _get_packages(self, cr, uid, ids, context=None):
"""Returns packages from quants for store"""
res = set()
for quant in self.browse(cr, uid, ids, context=context):
pack = quant.package_id
while pack:
res.add(pack.id)
pack = pack.parent_id
return list(res)
def _get_package_info(self, cr, uid, ids, name, args, context=None):
quant_obj = self.pool.get("stock.quant")
default_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
res = dict((res_id, {'location_id': False, 'company_id': default_company_id, 'owner_id': False}) for res_id in ids)
for pack in self.browse(cr, uid, ids, context=context):
quants = quant_obj.search(cr, uid, [('package_id', 'child_of', pack.id)], context=context)
if quants:
quant = quant_obj.browse(cr, uid, quants[0], context=context)
res[pack.id]['location_id'] = quant.location_id.id
res[pack.id]['owner_id'] = quant.owner_id.id
res[pack.id]['company_id'] = quant.company_id.id
else:
res[pack.id]['location_id'] = False
res[pack.id]['owner_id'] = False
res[pack.id]['company_id'] = False
return res
def _get_packages_to_relocate(self, cr, uid, ids, context=None):
res = set()
for pack in self.browse(cr, uid, ids, context=context):
res.add(pack.id)
if pack.parent_id:
res.add(pack.parent_id.id)
return list(res)
_columns = {
'name': fields.char('Package Reference', select=True, copy=False),
'complete_name': fields.function(_complete_name, type='char', string="Package Name",),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'packaging_id': fields.many2one('product.packaging', 'Packaging', help="This field should be completed only if everything inside the package share the same product, otherwise it doesn't really makes sense.", select=True),
'location_id': fields.function(_get_package_info, type='many2one', relation='stock.location', string='Location', multi="package",
store={
'stock.quant': (_get_packages, ['location_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
'quant_ids': fields.one2many('stock.quant', 'package_id', 'Bulk Content', readonly=True),
'parent_id': fields.many2one('stock.quant.package', 'Parent Package', help="The package containing this item", ondelete='restrict', readonly=True),
'children_ids': fields.one2many('stock.quant.package', 'parent_id', 'Contained Packages', readonly=True),
'company_id': fields.function(_get_package_info, type="many2one", relation='res.company', string='Company', multi="package",
store={
'stock.quant': (_get_packages, ['company_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
'owner_id': fields.function(_get_package_info, type='many2one', relation='res.partner', string='Owner', multi="package",
store={
'stock.quant': (_get_packages, ['owner_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
}
_defaults = {
'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').next_by_code(cr, uid, 'stock.quant.package') or _('Unknown Pack')
}
def _check_location_constraint(self, cr, uid, packs, context=None):
'''checks that all quants in a package are stored in the same location. This function cannot be used
as a constraint because it needs to be checked on pack operations (they may not call write on the
package)
'''
quant_obj = self.pool.get('stock.quant')
for pack in packs:
parent = pack
while parent.parent_id:
parent = parent.parent_id
quant_ids = self.get_content(cr, uid, [parent.id], context=context)
quants = [x for x in quant_obj.browse(cr, uid, quant_ids, context=context) if x.qty > 0]
location_id = quants and quants[0].location_id.id or False
if not [quant.location_id.id == location_id for quant in quants]:
raise UserError(_('Everything inside a package should be in the same location'))
return True
def action_print(self, cr, uid, ids, context=None):
context = dict(context or {}, active_ids=ids)
return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_package_barcode_small', context=context)
def unpack(self, cr, uid, ids, context=None):
quant_obj = self.pool.get('stock.quant')
for package in self.browse(cr, uid, ids, context=context):
quant_ids = [quant.id for quant in package.quant_ids]
quant_obj.write(cr, SUPERUSER_ID, quant_ids, {'package_id': package.parent_id.id or False}, context=context)
children_package_ids = [child_package.id for child_package in package.children_ids]
self.write(cr, uid, children_package_ids, {'parent_id': package.parent_id.id or False}, context=context)
return self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'action_package_view', context=context)
def get_content(self, cr, uid, ids, context=None):
child_package_ids = self.search(cr, uid, [('id', 'child_of', ids)], context=context)
return self.pool.get('stock.quant').search(cr, uid, [('package_id', 'in', child_package_ids)], context=context)
def get_content_package(self, cr, uid, ids, context=None):
quants_ids = self.get_content(cr, uid, ids, context=context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'quantsact', context=context)
res['domain'] = [('id', 'in', quants_ids)]
return res
def _get_product_total_qty(self, cr, uid, package_record, product_id, context=None):
''' find the total of given product 'product_id' inside the given package 'package_id'''
quant_obj = self.pool.get('stock.quant')
all_quant_ids = self.get_content(cr, uid, [package_record.id], context=context)
total = 0
for quant in quant_obj.browse(cr, uid, all_quant_ids, context=context):
if quant.product_id.id == product_id:
total += quant.qty
return total
def _get_all_products_quantities(self, cr, uid, package_id, context=None):
'''This function computes the different product quantities for the given package
'''
quant_obj = self.pool.get('stock.quant')
res = {}
for quant in quant_obj.browse(cr, uid, self.get_content(cr, uid, package_id, context=context)):
if quant.product_id not in res:
res[quant.product_id] = 0
res[quant.product_id] += quant.qty
return res
#Remove me?
def copy_pack(self, cr, uid, id, default_pack_values=None, default=None, context=None):
stock_pack_operation_obj = self.pool.get('stock.pack.operation')
if default is None:
default = {}
new_package_id = self.copy(cr, uid, id, default_pack_values, context=context)
default['result_package_id'] = new_package_id
op_ids = stock_pack_operation_obj.search(cr, uid, [('result_package_id', '=', id)], context=context)
for op_id in op_ids:
stock_pack_operation_obj.copy(cr, uid, op_id, default, context=context)
class stock_pack_operation(osv.osv):
_name = "stock.pack.operation"
_description = "Packing Operation"
_order = "result_package_id desc, id"
def _get_remaining_prod_quantities(self, cr, uid, operation, context=None):
'''Get the remaining quantities per product on an operation with a package. This function returns a dictionary'''
#if the operation doesn't concern a package, it's not relevant to call this function
if not operation.package_id or operation.product_id:
return {operation.product_id: operation.remaining_qty}
#get the total of products the package contains
res = self.pool.get('stock.quant.package')._get_all_products_quantities(cr, uid, operation.package_id.id, context=context)
#reduce by the quantities linked to a move
for record in operation.linked_move_operation_ids:
if record.move_id.product_id.id not in res:
res[record.move_id.product_id] = 0
res[record.move_id.product_id] -= record.qty
return res
def _get_remaining_qty(self, cr, uid, ids, name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for ops in self.browse(cr, uid, ids, context=context):
res[ops.id] = 0
if ops.package_id and not ops.product_id:
#dont try to compute the remaining quantity for packages because it's not relevant (a package could include different products).
#should use _get_remaining_prod_quantities instead
continue
else:
qty = ops.product_qty
if ops.product_uom_id:
qty = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context)
for record in ops.linked_move_operation_ids:
qty -= record.qty
res[ops.id] = float_round(qty, precision_rounding=ops.product_id.uom_id.rounding)
return res
def product_id_change(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None):
res = self.on_change_tests(cr, uid, ids, product_id, product_uom_id, product_qty, context=context)
uom_obj = self.pool['product.uom']
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
if product_id and not product_uom_id or uom_obj.browse(cr, uid, product_uom_id, context=context).category_id.id != product.uom_id.category_id.id:
res['value']['product_uom_id'] = product.uom_id.id
if product:
res['value']['lots_visible'] = (product.tracking != 'none')
res['domain'] = {'product_uom_id': [('category_id','=',product.uom_id.category_id.id)]}
else:
res['domain'] = {'product_uom_id': []}
return res
def on_change_tests(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None):
res = {'value': {}}
uom_obj = self.pool.get('product.uom')
if product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
product_uom_id = product_uom_id or product.uom_id.id
selected_uom = uom_obj.browse(cr, uid, product_uom_id, context=context)
if selected_uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {
'title': _('Warning: wrong UoM!'),
'message': _('The selected UoM for product %s is not compatible with the UoM set on the product form. \nPlease choose an UoM within the same UoM category.') % (product.name)
}
if product_qty and 'warning' not in res:
rounded_qty = uom_obj._compute_qty(cr, uid, product_uom_id, product_qty, product_uom_id, round=True)
if rounded_qty != product_qty:
res['warning'] = {
'title': _('Warning: wrong quantity!'),
'message': _('The chosen quantity for product %s is not compatible with the UoM rounding. It will be automatically converted at confirmation') % (product.name)
}
return res
def _compute_location_description(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for op in self.browse(cr, uid, ids, context=context):
from_name = op.location_id.name
to_name = op.location_dest_id.name
if op.package_id and op.product_id:
from_name += " : " + op.package_id.name
if op.result_package_id:
to_name += " : " + op.result_package_id.name
res[op.id] = {'from_loc': from_name,
'to_loc': to_name}
return res
def _get_bool(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pack in self.browse(cr, uid, ids, context=context):
res[pack.id] = (pack.qty_done > 0.0)
return res
def _set_processed_qty(self, cr, uid, id, field_name, field_value, arg, context=None):
op = self.browse(cr, uid, id, context=context)
if not op.product_id:
if field_value and op.qty_done == 0:
self.write(cr, uid, [id], {'qty_done': 1.0}, context=context)
if not field_value and op.qty_done != 0:
self.write(cr, uid, [id], {'qty_done': 0.0}, context=context)
return True
def _compute_lots_visible(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pack in self.browse(cr, uid, ids, context=context):
if pack.pack_lot_ids:
res[pack.id] = True
continue
pick = pack.picking_id
product_requires = (pack.product_id.tracking != 'none')
if pick.picking_type_id:
res[pack.id] = (pick.picking_type_id.use_existing_lots or pick.picking_type_id.use_create_lots) and product_requires
else:
res[pack.id] = product_requires
return res
def _get_default_from_loc(self, cr, uid, context=None):
default_loc = context.get('default_location_id')
if default_loc:
return self.pool['stock.location'].browse(cr, uid, default_loc, context=context).name
def _get_default_to_loc(self, cr, uid, context=None):
default_loc = context.get('default_location_dest_id')
if default_loc:
return self.pool['stock.location'].browse(cr, uid, default_loc, context=context).name
_columns = {
'picking_id': fields.many2one('stock.picking', 'Stock Picking', help='The stock operation where the packing has been made', required=True),
'product_id': fields.many2one('product.product', 'Product', ondelete="CASCADE"), # 1
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_qty': fields.float('To Do', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'qty_done': fields.float('Done', digits_compute=dp.get_precision('Product Unit of Measure')),
'processed_boolean': fields.function(_get_bool, fnct_inv=_set_processed_qty, type='boolean', string='Done'),
'package_id': fields.many2one('stock.quant.package', 'Source Package'), # 2
'pack_lot_ids': fields.one2many('stock.pack.operation.lot', 'operation_id', 'Lots Used'),
'result_package_id': fields.many2one('stock.quant.package', 'Destination Package', help="If set, the operations are packed into this package", required=False, ondelete='cascade'),
'date': fields.datetime('Date', required=True),
'owner_id': fields.many2one('res.partner', 'Owner', help="Owner of the quants"),
'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'operation_id', string='Linked Moves', readonly=True, help='Moves impacted by this operation for the computation of the remaining quantities'),
'remaining_qty': fields.function(_get_remaining_qty, type='float', digits = 0, string="Remaining Qty", help="Remaining quantity in default UoM according to moves matched with this operation. "),
'location_id': fields.many2one('stock.location', 'Source Location', required=True),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True),
'picking_source_location_id': fields.related('picking_id', 'location_id', type='many2one', relation='stock.location'),
'picking_destination_location_id': fields.related('picking_id', 'location_dest_id', type='many2one', relation='stock.location'),
'from_loc': fields.function(_compute_location_description, type='char', string='From', multi='loc'),
'to_loc': fields.function(_compute_location_description, type='char', string='To', multi='loc'),
'fresh_record': fields.boolean('Newly created pack operation'),
'lots_visible': fields.function(_compute_lots_visible, type='boolean'),
'state': fields.related('picking_id', 'state', type='selection', selection=[
('draft', 'Draft'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('partially_available', 'Partially Available'),
('assigned', 'Available'),
('done', 'Done'),
]),
}
_defaults = {
'date': fields.date.context_today,
'qty_done': 0.0,
'product_qty': 0.0,
'processed_boolean': lambda *a: False,
'fresh_record': True,
'from_loc': _get_default_from_loc,
'to_loc': _get_default_to_loc,
}
def split_quantities(self, cr, uid, ids, context=None):
for pack in self.browse(cr, uid, ids, context=context):
if pack.product_qty - pack.qty_done > 0.0 and pack.qty_done < pack.product_qty:
pack2 = self.copy(cr, uid, pack.id, default={'qty_done': 0.0, 'product_qty': pack.product_qty - pack.qty_done}, context=context)
self.write(cr, uid, [pack.id], {'product_qty': pack.qty_done}, context=context)
self._copy_remaining_pack_lot_ids(cr, uid, pack.id, pack2, context=context)
else:
raise UserError(_('The quantity to split should be smaller than the quantity To Do. '))
return True
def write(self, cr, uid, ids, vals, context=None):
vals['fresh_record'] = False
context = context or {}
res = super(stock_pack_operation, self).write(cr, uid, ids, vals, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
if any([x.state in ('done', 'cancel') for x in self.browse(cr, uid, ids, context=context)]):
raise UserError(_('You can not delete pack operations of a done picking'))
return super(stock_pack_operation, self).unlink(cr, uid, ids, context=context)
def check_tracking(self, cr, uid, ids, context=None):
""" Checks if serial number is assigned to stock move or not and raise an error if it had to.
"""
operations = self.browse(cr, uid, ids, context=context)
for ops in operations:
if ops.picking_id and (ops.picking_id.picking_type_id.use_existing_lots or ops.picking_id.picking_type_id.use_create_lots) and \
ops.product_id and ops.product_id.tracking != 'none' and ops.qty_done > 0.0:
if not ops.pack_lot_ids:
raise UserError(_('You need to provide a Lot/Serial Number for product %s') % ops.product_id.name)
if ops.product_id.tracking == 'serial':
for opslot in ops.pack_lot_ids:
if opslot.qty not in (1.0, 0.0):
raise UserError(_('You should provide a different serial number for each piece'))
def save(self, cr, uid, ids, context=None):
for pack in self.browse(cr, uid, ids, context=context):
if pack.product_id.tracking != 'none':
qty_done = sum([x.qty for x in pack.pack_lot_ids])
self.pool['stock.pack.operation'].write(cr, uid, [pack.id], {'qty_done': qty_done}, context=context)
return {'type': 'ir.actions.act_window_close'}
def split_lot(self, cr, uid, ids, context=None):
context = context or {}
ctx=context.copy()
assert len(ids) > 0
data_obj = self.pool['ir.model.data']
pack = self.browse(cr, uid, ids[0], context=context)
picking_type = pack.picking_id.picking_type_id
serial = (pack.product_id.tracking == 'serial')
view = data_obj.xmlid_to_res_id(cr, uid, 'stock.view_pack_operation_lot_form')
# If it's a returned stock move, we do not want to create a lot
returned_move = pack.linked_move_operation_ids.mapped('move_id').mapped('origin_returned_move_id')
only_create = picking_type.use_create_lots and not picking_type.use_existing_lots and not returned_move
show_reserved = any([x for x in pack.pack_lot_ids if x.qty_todo > 0.0])
ctx.update({'serial': serial,
'only_create': only_create,
'create_lots': picking_type.use_create_lots,
'state_done': pack.picking_id.state == 'done',
'show_reserved': show_reserved})
return {
'name': _('Lot Details'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.pack.operation',
'views': [(view, 'form')],
'view_id': view,
'target': 'new',
'res_id': pack.id,
'context': ctx,
}
def show_details(self, cr, uid, ids, context=None):
data_obj = self.pool['ir.model.data']
view = data_obj.xmlid_to_res_id(cr, uid, 'stock.view_pack_operation_details_form_save')
pack = self.browse(cr, uid, ids[0], context=context)
return {
'name': _('Operation Details'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.pack.operation',
'views': [(view, 'form')],
'view_id': view,
'target': 'new',
'res_id': pack.id,
'context': context,
}
def _copy_remaining_pack_lot_ids(self, cr, uid, id, new_operation_id, context=None):
stock_pack_operation_lot_obj = self.pool["stock.pack.operation.lot"]
old_operation = self.browse(cr, uid, id, context=context)
for pack_lot_id in old_operation.pack_lot_ids:
new_qty_todo = pack_lot_id.qty_todo - pack_lot_id.qty
if float_compare(new_qty_todo, 0, precision_rounding=old_operation.product_uom_id.rounding) > 0:
stock_pack_operation_lot_obj.copy(cr, uid, pack_lot_id.id, {'operation_id': new_operation_id,
'qty_todo': new_qty_todo,
'qty': 0}, context=context)
class stock_pack_operation_lot(osv.osv):
_name = "stock.pack.operation.lot"
_description = "Specifies lot/serial number for pack operations that need it"
def _get_plus(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for packlot in self.browse(cr, uid, ids, context=context):
if packlot.operation_id.product_id.tracking == 'serial':
res[packlot.id] = (packlot.qty == 0.0)
else:
res[packlot.id] = (packlot.qty_todo == 0.0) or (packlot.qty < packlot.qty_todo)
return res
_columns = {
'operation_id': fields.many2one('stock.pack.operation'),
'qty': fields.float('Done'),
'lot_id': fields.many2one('stock.production.lot', 'Lot/Serial Number'),
'lot_name': fields.char('Lot Name'),
'qty_todo': fields.float('To Do'),
'plus_visible': fields.function(_get_plus, type='boolean'),
}
_defaults = {
'qty': lambda cr, uid, ids, c: 1.0,
'qty_todo': lambda cr, uid, ids, c: 0.0,
'plus_visible': True,
}
def _check_lot(self, cr, uid, ids, context=None):
for packlot in self.browse(cr, uid, ids, context=context):
if not packlot.lot_name and not packlot.lot_id:
return False
return True
_constraints = [
(_check_lot,
'Lot is required',
['lot_id', 'lot_name']),
]
_sql_constraints = [
('qty', 'CHECK(qty >= 0.0)','Quantity must be greater than or equal to 0.0!'),
('uniq_lot_id', 'unique(operation_id, lot_id)', 'You have already mentioned this lot in another line'),
('uniq_lot_name', 'unique(operation_id, lot_name)', 'You have already mentioned this lot name in another line')]
def do_plus(self, cr, uid, ids, context=None):
pack_obj = self.pool['stock.pack.operation']
for packlot in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [packlot.id], {'qty': packlot.qty + 1}, context=context)
pack = self.browse(cr, uid, ids[0], context=context).operation_id
qty_done = sum([x.qty for x in pack.pack_lot_ids])
pack_obj.write(cr, uid, [pack.id], {'qty_done': qty_done}, context=context)
return pack_obj.split_lot(cr, uid, [pack.id], context=context)
def do_minus(self, cr, uid, ids, context=None):
pack_obj = self.pool['stock.pack.operation']
for packlot in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [packlot.id], {'qty': packlot.qty - 1}, context=context)
pack = self.browse(cr, uid, ids[0], context=context).operation_id
qty_done = sum([x.qty for x in pack.pack_lot_ids])
pack_obj.write(cr, uid, [pack.id], {'qty_done': qty_done}, context=context)
return pack_obj.split_lot(cr, uid, [pack.id], context=context)
class stock_move_operation_link(osv.osv):
"""
Table making the link between stock.moves and stock.pack.operations to compute the remaining quantities on each of these objects
"""
_name = "stock.move.operation.link"
_description = "Link between stock moves and pack operations"
_columns = {
'qty': fields.float('Quantity', help="Quantity of products to consider when talking about the contribution of this pack operation towards the remaining quantity of the move (and inverse). Given in the product main uom."),
'operation_id': fields.many2one('stock.pack.operation', 'Operation', required=True, ondelete="cascade"),
'move_id': fields.many2one('stock.move', 'Move', required=True, ondelete="cascade"),
'reserved_quant_id': fields.many2one('stock.quant', 'Reserved Quant', help="Technical field containing the quant that created this link between an operation and a stock move. Used at the stock_move_obj.action_done() time to avoid seeking a matching quant again"),
}
class stock_warehouse_orderpoint(osv.osv):
"""
Defines Minimum stock rules.
"""
_name = "stock.warehouse.orderpoint"
_description = "Minimum Inventory Rule"
def subtract_procurements_from_orderpoints(self, cr, uid, orderpoint_ids, context=None):
'''This function returns quantity of product that needs to be deducted from the orderpoint computed quantity because there's already a procurement created with aim to fulfill it.
'''
cr.execute("""select op.id, p.id, p.product_uom, p.product_qty, pt.uom_id, sm.product_qty from procurement_order as p left join stock_move as sm ON sm.procurement_id = p.id,
stock_warehouse_orderpoint op, product_product pp, product_template pt
WHERE p.orderpoint_id = op.id AND p.state not in ('done', 'cancel') AND (sm.state IS NULL OR sm.state not in ('draft'))
AND pp.id = p.product_id AND pp.product_tmpl_id = pt.id
AND op.id IN %s
ORDER BY op.id, p.id
""", (tuple(orderpoint_ids),))
results = cr.fetchall()
current_proc = False
current_op = False
uom_obj = self.pool.get("product.uom")
op_qty = 0
res = dict.fromkeys(orderpoint_ids, 0.0)
for move_result in results:
op = move_result[0]
if current_op != op:
if current_op:
res[current_op] = op_qty
current_op = op
op_qty = 0
proc = move_result[1]
if proc != current_proc:
op_qty += uom_obj._compute_qty(cr, uid, move_result[2], move_result[3], move_result[4], round=False)
current_proc = proc
if move_result[5]: #If a move is associated (is move qty)
op_qty -= move_result[5]
if current_op:
res[current_op] = op_qty
return res
def _check_product_uom(self, cr, uid, ids, context=None):
'''
Check if the UoM has the same category as the product standard UoM
'''
if not context:
context = {}
for rule in self.browse(cr, uid, ids, context=context):
if rule.product_id.uom_id.category_id.id != rule.product_uom.category_id.id:
return False
return True
_columns = {
'name': fields.char('Name', required=True, copy=False),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the orderpoint without removing it."),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True, ondelete="cascade"),
'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="cascade"),
'product_id': fields.many2one('product.product', 'Product', required=True, ondelete='cascade', domain=[('type', '=', 'product')]),
'product_uom': fields.related('product_id', 'uom_id', type='many2one', relation='product.uom', string='Product Unit of Measure', readonly=True, required=True),
'product_min_qty': fields.float('Minimum Quantity', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="When the virtual stock goes below the Min Quantity specified for this field, Odoo generates "\
"a procurement to bring the forecasted quantity to the Max Quantity."),
'product_max_qty': fields.float('Maximum Quantity', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="When the virtual stock goes below the Min Quantity, Odoo generates "\
"a procurement to bring the forecasted quantity to the Quantity specified as Max Quantity."),
'qty_multiple': fields.float('Qty Multiple', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="The procurement quantity will be rounded up to this multiple. If it is 0, the exact quantity will be used. "),
'procurement_ids': fields.one2many('procurement.order', 'orderpoint_id', 'Created Procurements'),
'group_id': fields.many2one('procurement.group', 'Procurement Group', help="Moves created through this orderpoint will be put in this procurement group. If none is given, the moves generated by procurement rules will be grouped into one big picking.", copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True),
'lead_days': fields.integer('Lead Time', help="Number of days after the orderpoint is triggered to receive the products or to order to the vendor"),
'lead_type': fields.selection([
('net', 'Day(s) to get the products'),
('supplier', 'Day(s) to purchase')
], 'Lead Type', required=True)
}
_defaults = {
'active': lambda *a: 1,
'lead_days': lambda *a: 1,
'lead_type': lambda *a: 'supplier',
'qty_multiple': lambda *a: 1,
'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').next_by_code(cr, uid, 'stock.orderpoint') or '',
'product_uom': lambda self, cr, uid, context: context.get('product_uom', False),
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.warehouse.orderpoint', context=context)
}
_sql_constraints = [
('qty_multiple_check', 'CHECK( qty_multiple >= 0 )', 'Qty Multiple must be greater than or equal to zero.'),
]
_constraints = [
(_check_product_uom, 'You have to select a product unit of measure in the same category than the default unit of measure of the product', ['product_id', 'product_uom']),
]
def default_get(self, cr, uid, fields, context=None):
warehouse_obj = self.pool.get('stock.warehouse')
res = super(stock_warehouse_orderpoint, self).default_get(cr, uid, fields, context)
# default 'warehouse_id' and 'location_id'
if 'warehouse_id' not in res:
warehouse_ids = res.get('company_id') and warehouse_obj.search(cr, uid, [('company_id', '=', res['company_id'])], limit=1, context=context) or []
res['warehouse_id'] = warehouse_ids and warehouse_ids[0] or False
if 'location_id' not in res:
res['location_id'] = res.get('warehouse_id') and warehouse_obj.browse(cr, uid, res['warehouse_id'], context).lot_stock_id.id or False
return res
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
""" Finds location id for changed warehouse.
@param warehouse_id: Changed id of warehouse.
@return: Dictionary of values.
"""
if warehouse_id:
w = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
v = {'location_id': w.lot_stock_id.id}
return {'value': v}
return {}
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Finds UoM for changed product.
@param product_id: Changed id of product.
@return: Dictionary of values.
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
d = {'product_uom': [('category_id', '=', prod.uom_id.category_id.id)]}
v = {'product_uom': prod.uom_id.id}
return {'value': v, 'domain': d}
return {'domain': {'product_uom': []}}
class stock_picking_type(osv.osv):
_name = "stock.picking.type"
_description = "The picking type determines the picking view"
_order = 'sequence'
def open_barcode_interface(self, cr, uid, ids, context=None):
final_url = "/stock/barcode/#action=stock.ui&picking_type_id=" + str(ids[0]) if len(ids) else '0'
return {'type': 'ir.actions.act_url', 'url': final_url, 'target': 'self'}
def _get_tristate_values(self, cr, uid, ids, field_name, arg, context=None):
picking_obj = self.pool.get('stock.picking')
res = {}
for picking_type_id in ids:
#get last 10 pickings of this type
picking_ids = picking_obj.search(cr, uid, [('picking_type_id', '=', picking_type_id), ('state', '=', 'done')], order='date_done desc', limit=10, context=context)
tristates = []
for picking in picking_obj.browse(cr, uid, picking_ids, context=context):
if picking.date_done > picking.date:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Late'), 'value': -1})
elif picking.backorder_id:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Backorder exists'), 'value': 0})
else:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('OK'), 'value': 1})
res[picking_type_id] = json.dumps(tristates)
return res
def _get_picking_count(self, cr, uid, ids, field_names, arg, context=None):
obj = self.pool.get('stock.picking')
domains = {
'count_picking_draft': [('state', '=', 'draft')],
'count_picking_waiting': [('state', 'in', ('confirmed', 'waiting'))],
'count_picking_ready': [('state', 'in', ('assigned', 'partially_available'))],
'count_picking': [('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))],
'count_picking_late': [('min_date', '<', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))],
'count_picking_backorders': [('backorder_id', '!=', False), ('state', 'in', ('confirmed', 'assigned', 'waiting', 'partially_available'))],
}
result = {}
for field in domains:
data = obj.read_group(cr, uid, domains[field] +
[('state', 'not in', ('done', 'cancel')), ('picking_type_id', 'in', ids)],
['picking_type_id'], ['picking_type_id'], context=context)
count = dict(map(lambda x: (x['picking_type_id'] and x['picking_type_id'][0], x['picking_type_id_count']), data))
for tid in ids:
result.setdefault(tid, {})[field] = count.get(tid, 0)
for tid in ids:
if result[tid]['count_picking']:
result[tid]['rate_picking_late'] = result[tid]['count_picking_late'] * 100 / result[tid]['count_picking']
result[tid]['rate_picking_backorders'] = result[tid]['count_picking_backorders'] * 100 / result[tid]['count_picking']
else:
result[tid]['rate_picking_late'] = 0
result[tid]['rate_picking_backorders'] = 0
return result
def _get_action(self, cr, uid, ids, action, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.xmlid_to_res_id(cr, uid, action, raise_if_not_found=True)
result = act_obj.read(cr, uid, [result], context=context)[0]
if ids:
picking_type = self.browse(cr, uid, ids[0], context=context)
result['display_name'] = picking_type.display_name
return result
def get_action_picking_tree_late(self, cr, uid, ids, context=None):
return self._get_action(cr, uid, ids, 'stock.action_picking_tree_late', context=context)
def get_action_picking_tree_backorder(self, cr, uid, ids, context=None):
return self._get_action(cr, uid, ids, 'stock.action_picking_tree_backorder', context=context)
def get_action_picking_tree_waiting(self, cr, uid, ids, context=None):
return self._get_action(cr, uid, ids, 'stock.action_picking_tree_waiting', context=context)
def get_action_picking_tree_ready(self, cr, uid, ids, context=None):
return self._get_action(cr, uid, ids, 'stock.action_picking_tree_ready', context=context)
def get_stock_picking_action_picking_type(self, cr, uid, ids, context=None):
return self._get_action(cr, uid, ids, 'stock.stock_picking_action_picking_type', context=context)
def onchange_picking_code(self, cr, uid, ids, picking_code=False):
if not picking_code:
return False
obj_data = self.pool.get('ir.model.data')
stock_loc = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_stock')
result = {
'default_location_src_id': stock_loc,
'default_location_dest_id': stock_loc,
}
if picking_code == 'incoming':
result['default_location_src_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_suppliers')
elif picking_code == 'outgoing':
result['default_location_dest_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_customers')
return {'value': result}
def _get_name(self, cr, uid, ids, field_names, arg, context=None):
return dict(self.name_get(cr, uid, ids, context=context))
def name_get(self, cr, uid, ids, context=None):
"""Overides orm name_get method to display 'Warehouse_name: PickingType_name' """
if context is None:
context = {}
if not isinstance(ids, list):
ids = [ids]
res = []
if not ids:
return res
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if record.warehouse_id:
name = record.warehouse_id.name + ': ' +name
res.append((record.id, name))
return res
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
domain = []
if name:
domain = ['|', ('name', operator, name), ('warehouse_id.name', operator, name)]
picks = self.search(domain + args, limit=limit)
return picks.name_get()
def _default_warehouse(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context)
res = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', user.company_id.id)], limit=1, context=context)
return res and res[0] or False
_columns = {
'name': fields.char('Picking Type Name', translate=True, required=True),
'complete_name': fields.function(_get_name, type='char', string='Name'),
'color': fields.integer('Color'),
'sequence': fields.integer('Sequence', help="Used to order the 'All Operations' kanban view"),
'sequence_id': fields.many2one('ir.sequence', 'Reference Sequence', required=True),
'default_location_src_id': fields.many2one('stock.location', 'Default Source Location', help="This is the default source location when you create a picking manually with this picking type. It is possible however to change it or that the routes put another location. If it is empty, it will check for the supplier location on the partner. "),
'default_location_dest_id': fields.many2one('stock.location', 'Default Destination Location', help="This is the default destination location when you create a picking manually with this picking type. It is possible however to change it or that the routes put another location. If it is empty, it will check for the customer location on the partner. "),
'code': fields.selection([('incoming', 'Suppliers'), ('outgoing', 'Customers'), ('internal', 'Internal')], 'Type of Operation', required=True),
'return_picking_type_id': fields.many2one('stock.picking.type', 'Picking Type for Returns'),
'show_entire_packs': fields.boolean('Allow moving packs', help="If checked, this shows the packs to be moved as a whole in the Operations tab all the time, even if there was no entire pack reserved."),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', ondelete='cascade'),
'active': fields.boolean('Active'),
'use_create_lots': fields.boolean('Create New Lots', help="If this is checked only, it will suppose you want to create new Serial Numbers / Lots, so you can provide them in a text field. "),
'use_existing_lots': fields.boolean('Use Existing Lots', help="If this is checked, you will be able to choose the Serial Number / Lots. You can also decide to not put lots in this picking type. This means it will create stock with no lot or not put a restriction on the lot taken. "),
# Statistics for the kanban view
'last_done_picking': fields.function(_get_tristate_values,
type='char',
string='Last 10 Done Pickings'),
'count_picking_draft': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_ready': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_waiting': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_late': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_backorders': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'rate_picking_late': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'rate_picking_backorders': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
# Barcode nomenclature
'barcode_nomenclature_id': fields.many2one('barcode.nomenclature','Barcode Nomenclature', help='A barcode nomenclature'),
}
_defaults = {
'warehouse_id': _default_warehouse,
'active': True,
'use_existing_lots': True,
'use_create_lots': True,
}
class barcode_rule(models.Model):
_inherit = 'barcode.rule'
def _get_type_selection(self):
types = sets.Set(super(barcode_rule,self)._get_type_selection())
types.update([
('weight', _('Weighted Product')),
('location', _('Location')),
('lot', _('Lot')),
('package', _('Package'))
])
return list(types)
class StockPackOperation(models.Model):
_inherit = 'stock.pack.operation'
@api.onchange('pack_lot_ids')
def _onchange_packlots(self):
self.qty_done = sum([x.qty for x in self.pack_lot_ids])
| agpl-3.0 | -7,653,917,894,173,250,000 | 58.807486 | 420 | 0.595567 | false |
TheDSCPL/SSRE_2017-2018_group8 | Projeto/Python/cryptopy/crypto/passwords/passwordfactory.py | 1 | 3751 | # -*- coding: utf-8 -*-
""" crypto.passwords.passwordfactory
Python classes to create and recover passwords. Currently contains
simple password generation. <need to merge the dictionary based pws>
Copyright © (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
August 14, 2002
"""
from random import Random
from sha import sha # the SHA1 algorithm for cryptographic hashing
from math import log, ceil
#from binascii_plus import b2a_p
class PasswordFactory:
""" Make passwords using pseudo random seeds.
Also used to recover passwords by using same pwSeed.
If the seed is not saved, the password can not be recovered!!
"""
def __init__(self, pwFactorySeed, minSize=10, maxSize=10 ):
""" An abstract class to create passwords """
self._factorySeed = pwFactorySeed
self.minSize = minSize
self.maxSize = maxSize
self.rand = Random( self._factorySeed )
def getPassword(self, pwSeed):
raise "MUST be overloaded"
def __call__(self, pwSeed):
""" Create a new password as a 'call' """
return self.getPassword(pwSeed)
def entropy(self):
""" Calculate the security of the password generation as a power of 2 """
total = 0
for pwSize in range(self.minSize, self.maxSize+1):
total = total + self.passwordsForSize(pwSize)
return powof2( total )
def powof2(x):
""" Convert x to a power of 2 """
return log(x)/log(2)
class PasswordFactorySimple(PasswordFactory):
""" This class implements a very secure but simple selection of numbers and letters.
Some characters have been removed to prevent confusion between similar shapes
The removed characters are: (O,0,o), (l,1,I) , (u,v),(U,V)
"""
def __init__(self, pwFactorySeed, minSize=10, maxSize=10 ):
""" Initialize password generation """
PasswordFactory.__init__(self, pwFactorySeed, minSize, maxSize )
self.lettersReduced = 'abcdefghijkmnpqrstwxyzABCDEFGHJKLMNPQRSTWXYZ'
self.digitsReduced = '23456789'
self.specialCharacters = '#%*+$'
def getPassword(self, pwSeed):
""" Create a new password from pwSeed. """
self.rand.seed( pwSeed + 'getPassword' + self._factorySeed ) # reset prf sequence
self.passwordSize = self.rand.randrange(self.minSize, self.maxSize+1)
password = ''
for i in range(self.passwordSize):
password = password + self.rand.choice(self.lettersReduced+self.digitsReduced)
return password
def passwordsForSize(self,pwSize):
return (len(self.lettersReduced)+len(self.digitsReduced))**pwSize
consonants_01 = 'bcdfghjklmnpqrstvwxz'
vowels_01 = 'aeiouy'
class PasswordFactoryReadable_01(PasswordFactory):
""" Readable passwords created by alternating consonate/vowel/consonate ... etc.
"""
def getPassword(self, pwSeed):
""" Create a new password. Also used to recover passwords by using same pwSeed """
#self.rand.seed( 'getPassword'+self.__factorySeed+pwSeed ) # reset prf sequence
self.passwordSize = self.rand.randrange(self.minSize, self.maxSize+1)
password = ''
for i in range(self.passwordSize):
if i == 0 :
password = password + self.rand.choice(consonants_01)
else:
if password[-1] in consonants_01 :
password = password + self.rand.choice(vowels_01)
else:
password = password + self.rand.choice(consonants_01)
return password
def passwordsForSize(self,pwSize):
return (len(vowels_01)**(pwSize/2))*(len(consonants_01)**ceil(pwSize/2))
| mit | -202,067,140,215,519,550 | 38.484211 | 94 | 0.647561 | false |
mirjalil/ml-visual-recognition | codes/logisticRegression.py | 1 | 3262 | import numpy as np
import pandas
import scipy, scipy.spatial
import sklearn
import sys
from sklearn import linear_model
from sklearn.metrics import precision_score, recall_score, f1_score
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('train', help='Training Data')
parser.add_argument('labels', help='Training Labels')
parser.add_argument('test', help='Test Data')
parser.add_argument('data_cv', help='Data for CrossValidation')
parser.add_argument('label_cv', help='Labels for CrossValidation')
parser.add_argument('plab', type=int, help='The class to be predicted')
parser.add_argument('cost', type=float, help='The cost variable (C)')
parser.add_argument('out', help='Output file name')
args = parser.parse_args()
y_all = pandas.read_table(args.labels, header=None, sep=' ')
print(y_all.head())
ndim = pandas.read_table(args.train, sep=' ', header=None, nrows=3).shape[1]
featstat = pandas.read_csv('data/feat_stats.csv')
print(featstat.head())
# ## Logistic Regression
clf = linear_model.LogisticRegression(penalty='l2', dual=False, tol=0.00001, C=args.cost, \
fit_intercept=True, intercept_scaling=1, class_weight=None, \
random_state=None, solver='liblinear', max_iter=10000)
y = np.empty(shape=y_all.shape[0], dtype=int)
ic = args.plab
y[np.where(y_all[0] != ic)[0]] = -1
y[np.where(y_all[0] == ic)[0]] = 1
print('Training set: %d Pos: %d Neg: %d'%(y.shape[0], np.sum(y==1), np.sum(y==-1)))
chunks=500000
for i in range(1):
sys.stdout.write('%d '%(i))
n = 0
for df in pandas.read_table(args.train, sep=' ', header=None, iterator=True, chunksize=chunks):
n0, n1 = n*chunks, (n+1)*chunks
if n1 > y.shape[0]:
n1 = y.shape[0] - n0
ysub = y[n0:n1]
#sys.stdout.write('%d (%d-%d) %d\t'%(n, n0, n1, ysub.shape[0]))
df = (df - featstat['mean']) / featstat['sigma']
clf.fit(df, ysub)
n += 1
break
### Reading cross-validation set
Xcv = pandas.read_table(args.data_cv, sep=' ', header=None)
print(ic, Xcv.shape)
ycv = pandas.read_table(args.label_cv, sep=' ', header=None)[0].values
ycv[np.where(ycv != ic)[0]] = -1
ycv[np.where(ycv == ic)[0]] = 1
print('CrossValidation %d %d for label=%d ==>\tPos: %d Neg: %d' \
%(Xcv.shape[0], ycv.shape[0], ic, np.sum(ycv == 1), np.sum(ycv == -1)))
ypred_cv = clf.predict(Xcv)
prec = precision_score(ycv, ypred_cv)
rec = recall_score(ycv, ypred_cv)
f1score = f1_score(ycv, ypred_cv)
print('Precision=%.3f Recall=%.3f F1Score=%.3f'%(prec, rec, f1score))
print('CrossVal: ==> TP+FP=%d \t TP+FN=%d'%(np.sum(ypred_cv == 1), np.sum(ycv == 1)))
n = 0
for Xtest in pandas.read_table(args.test, sep=' ', header=None, iterator=True, chunksize=10000):
ypred = clf.predict(Xtest)
print('TestSet part %d ==> pos-predicted=%d '%(n, np.sum(ypred == 1)))
if n==0:
mode='w'
else:
mode = 'a'
pandas.DataFrame({'pred':ypred}).to_csv(args.out, mode=mode, header='%.3f %.3f %.3f'%(prec, rec, f1score))
n += 1
if __name__ == '__main__':
main()
| apache-2.0 | -325,843,194,361,663,000 | 28.926606 | 107 | 0.598712 | false |
mitodl/bootcamp-ecommerce | cms/migrations/0025_add_resource_pages_settings.py | 1 | 2756 | # Generated by Django 2.2.13 on 2020-06-29 18:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0045_assign_unlock_grouppagepermission"),
("cms", "0024_lettertemplatepage"),
]
operations = [
migrations.CreateModel(
name="ResourcePagesSettings",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"about_us_page",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="wagtailcore.Page",
),
),
(
"apply_page",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="wagtailcore.Page",
),
),
(
"bootcamps_programs_page",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="wagtailcore.Page",
),
),
(
"privacy_policy_page",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="wagtailcore.Page",
),
),
(
"site",
models.OneToOneField(
editable=False,
on_delete=django.db.models.deletion.CASCADE,
to="wagtailcore.Site",
),
),
(
"terms_of_service_page",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="wagtailcore.Page",
),
),
],
options={"verbose_name": "Resource Pages"},
)
]
| bsd-3-clause | -725,346,538,183,442,300 | 32.204819 | 69 | 0.359216 | false |
cedadev/ndg_security_common | ndg/security/common/openssl.py | 1 | 20390 | """OpenSSL utilities module - contains OpenSSLConfig class for
parsing OpenSSL configuration files
NERC Data Grid Project
"""
__author__ = "P J Kershaw"
__date__ = "08/02/07"
__copyright__ = "(C) 2009 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "[email protected]"
__revision__ = '$Id$'
import re
import os
from ConfigParser import SafeConfigParser
def m2_get_dn_field(dn, field_name, field_sep=None, name_val_sep=None):
'''Convenience utility for parsing fields from X.509 subject name returned
from M2Crypto API'''
if field_sep is None:
field_sep = ','
if name_val_sep is None:
name_val_sep = '='
for f in dn.split(field_sep):
name, val = f.strip().split(name_val_sep)
if name.upper() == field_name:
return val
def m2_get_cert_ext_values(cert, ext_name, field_sep=None, field_prefix=None):
'''Get subject alt names from M2Crypto.X509.X509 cert object -
return None if none found
e.g.
``m2_get_cert_ext_values(cert, 'subjectAltName, field_prefix="DNS:", field_sep=",")``
'''
if field_prefix is None:
field_prefix = '' # 'DNS:' for subject alt names prefix
for i in cert.get_ext_count():
ext = cert.get_ext_at(i)
if ext.get_name() == ext_name:
val = ext.get_value()
if field_sep is None:
yield val
else:
for i in val.split(field_sep):
yield i.strip()[len(field_prefix)]
class X500DNError(Exception):
"""Exception handling for NDG X.500 DN class."""
class X500DN(object):
"Manipulation of X.500 Distinguished name / X.509 subject names"
# Class attribute - look-up mapping short name attributes to their long
# name equivalents
# * private *
SHORT_NAME_LUT = {
'commonName': 'CN',
'organisationalUnitName': 'OU',
'organisation': 'O',
'countryName': 'C',
'emailAddress': 'EMAILADDRESS',
'localityName': 'L',
'stateOrProvinceName': 'ST',
'streetAddress': 'STREET',
'domainComponent': 'DC',
'userid': 'UID'
}
SLASH_PARSER_RE_STR = '/(%s)=' % '|'.join(SHORT_NAME_LUT.keys() +
SHORT_NAME_LUT.values())
SLASH_PARSER_RE = re.compile(SLASH_PARSER_RE_STR)
COMMA_PARSER_RE_STR = '[,]?\s*(%s)=' % '|'.join(SHORT_NAME_LUT.keys() +
SHORT_NAME_LUT.values())
COMMA_PARSER_RE = re.compile(COMMA_PARSER_RE_STR)
def __init__(self, dn=None, separator=None):
"""Create a new X.500 Distinguished Name
@type dn: basestring
@param dn: initialise using a distinguished name string
@type separator: basestring
@param: separator: separator used to delimit dn fields - usually '/'
or ','. If dn is input and separator is omitted the separator
character will be automatically parsed from the dn string.
"""
# Private key data
self.__dat = {}.fromkeys(self.__class__.SHORT_NAME_LUT.values())
self.__separator = None
# Check for separator from input
if separator is not None:
if not isinstance(separator, basestring):
raise X500DNError("dn Separator must be a valid string")
separator_ = separator.lstrip()
# Check for single character but allow trailing space chars
if len(separator_) != 1:
raise X500DNError("dn separator must be a single character")
self.__separator = separator_
if dn is not None:
# Separator can be parsed from the input DN string - only attempt
# if no explicit separator was input
if self.__separator is None:
self.__separator = self.parse_separator(dn)
# Split Distinguished name string into constituent fields
self.deserialise(dn)
@classmethod
def from_string(cls, dn):
"""Convenience method for parsing DN string into a new instance
"""
return cls(dn=dn)
def __repr__(self):
"""Give representation based on underlying dict object"""
return repr(self.__dat)
def __str__(self):
"""Behaviour for print and string statements - convert DN into
serialised format."""
return self.serialise()
def __eq__(self, x500dn):
"""Return true if the all the fields of the two DNs are equal"""
if not isinstance(x500dn, X500DN):
return False
return self.__dat.items() == x500dn.items()
def __ne__(self, x500dn):
"""Return true if the all the fields of the two DNs are equal"""
if not isinstance(x500dn, X500DN):
return False
return self.__dat.items() != x500dn.items()
def __delitem__(self, key):
"""Prevent keys from being deleted."""
raise NotImplementedError()
def __getitem__(self, key):
# Check input key
if self.__dat.has_key(key):
# key recognised
return self.__dat[key]
elif X500DN.__shortNameLUT.has_key(key):
# key not recognised - but a long name version of the key may
# have been passed
shortName = X500DN.__shortNameLUT[key]
return self.__dat[shortName]
else:
# key not recognised as a short or long name version
raise KeyError('Key "' + key + '" not recognised for X500DN')
def __setitem__(self, key, item):
# Check input key
if self.__dat.has_key(key):
# key recognised
self.__dat[key] = item
elif X500DN.__shortNameLUT.has_key(key):
# key not recognised - but a long name version of the key may
# have been passed
shortName = X500DN.__shortNameLUT[key]
self.__dat[shortName] = item
else:
# key not recognised as a short or long name version
raise KeyError('Key "' + key + '" not recognised for X500DN')
def clear(self):
raise NotImplementedError()
def copy(self):
import copy
return copy.copy(self)
def keys(self):
return self.__dat.keys()
def items(self):
return self.__dat.items()
def values(self):
return self.__dat.values()
def has_key(self, key):
return self.__dat.has_key(key)
# 'in' operator
def __contains__(self, key):
return self.has_key(key)
def get(self, *arg):
return self.__dat.get(*arg)
def serialise(self, separator=None):
"""Combine fields in Distinguished Name into a single string."""
if separator:
if not isinstance(separator, basestring):
raise X500DNError("Separator must be a valid string")
else:
# Default to / if no separator is set
separator = '/'
# If using '/' then prepend DN with an initial '/' char
if separator == '/':
sDN = separator
else:
sDN = ''
dnList = []
for (key, val) in self.__dat.items():
if val:
if isinstance(val, tuple):
dnList += [separator.join(["%s=%s" % (key, valSub) \
for valSub in val])]
else:
dnList += ["%s=%s" % (key, val)]
sDN += separator.join(dnList)
return sDN
serialize = serialise
def deserialise(self, dn, separator=None):
"""Break up a DN string into it's constituent fields and use to
update the object's dictionary"""
if separator:
if not isinstance(separator, basestring):
raise X500DNError("Separator must be a valid string")
else:
separator = self.__separator
# If no separator has been set, parse if from the DN string
if separator is None:
separator = self.parse_separator(dn)
if separator == '/':
parserRe = self.__class__.SLASH_PARSER_RE
elif separator == ',':
parserRe = self.__class__.COMMA_PARSER_RE
else:
raise X500DNError("DN field separator %r not recognised" %
self.__separator)
try:
dnFields = parserRe.split(dn)
if len(dnFields) < 2:
raise X500DNError("Error parsing DN string: \"%s\"" % dn)
items = zip(dnFields[1::2], dnFields[2::2])
# Reset existing dictionary values
self.__dat.fromkeys(self.__dat, '')
# Strip leading and trailing space chars and convert into a
# dictionary
parsedDN = {}
for key, val in items:
key = key.strip()
if key in parsedDN:
if isinstance(parsedDN[key], tuple):
parsedDN[key] = tuple(list(parsedDN[key]) + [val])
else:
parsedDN[key] = (parsedDN[key], val)
else:
parsedDN[key] = val
# Copy matching DN fields
for key, val in parsedDN.items():
if (key not in self.__dat and
key not in self.__class__.SHORT_NAME_LUT):
raise X500DNError('Invalid field "%s" in input DN string' %
key)
self.__dat[key] = val
except Exception, excep:
raise X500DNError("Error de-serialising DN \"%s\": %s" %
(dn, str(excep)))
deserialize = deserialise
def parse_separator(self, dn):
"""Attempt to parse the separator character from a given input
DN string. If not found, return None
DNs don't use standard separators e.g.
/C=UK/O=eScience/OU=CLRC/L=DL/CN=AN Other
CN=SUM Oneelse,L=Didcot, O=RAL,OU=SSTD
This function isolates and identifies the character. - In the above,
'/' and ',' respectively"""
# Make a regular expression containing all the possible field
# identifiers with equal sign appended and 'or'ed together. \W should
# match the separator which preceeds the field name. \s* allows any
# whitespace between field name and field separator to be taken into
# account.
#
# The resulting match should be a list. The first character in each
# element in the list should be the field separator and should be the
# same
regExpr = '|'.join(['\W\s*'+i+'=' for i in self.__dat.keys()])
match = re.findall(regExpr, dn)
# In the first example above, the resulting match is:
# ['/C=', '/O=', '/OU=', '/L=']
# In each element the first character is the separator
sepList = [i[0:1] for i in match]
# All separators should be the same character - return None if they
# don't match
if not [i for i in sepList if i != sepList[0]]:
return sepList[0]
else:
return None
@classmethod
def parse(cls, dn):
"""Convenience method to create an X500DN object from a DN string
@type dn: basestring
@param dn: Distinguished Name
"""
return cls(dn=dn)
class OpenSSLConfigError(Exception):
"""Exceptions related to OpenSSLConfig class"""
class OpenSSLConfig(SafeConfigParser, object):
"""Wrapper to OpenSSL Configuration file to allow extraction of
required distinguished name used for making certificate requests
@type _certReqDNParamName: tuple
@cvar _certReqDNParamName: permissable keys for Distinguished Name
(not including CN which gets set separately). This is used in __setReqDN
to check input
@type _caDirPat: string
@cvar _caDirPat: sub-directory path to CA config directory
@type __gridCASubDir: string
@cvar __gridCASubDir: sub-directory of globus user for CA settings"""
_certReqDNParamName = [
'C',
'serialNumber',
'organizationName',
'CN',
'SP',
'commonName',
'L',
'stateOrProvinceName',
'ST',
'emailAddress',
'O',
'localityName',
'GN',
'surname',
'OU',
'givenName',
'Email',
'organizationUnitName',
'SN'
]
_caDirPat = re.compile('\$dir')
__gridCASubDir = os.path.join(".globus", "simpleCA")
def __init__(self, filePath=None, caDir=None):
"""Initial OpenSSL configuration optionally setting a file path to
read from
@type filePath: string
@param filePath: path to OpenSSL configuration file
@type caDir: string
@param caDir: directory for SimpleCA. This is substituted for $dir
in OpenSSL config file where present. caDir can be left out in
which case the substitution is not done"""
SafeConfigParser.__init__(self)
self.__reqDN = None
self.__setFilePath(filePath)
# Set-up CA directory
self.setCADir(caDir)
def __setFilePath(self, filePath):
"""Set property method
@type filePath: string
@param filePath: path for OpenSSL configuration file"""
if filePath is not None:
if not isinstance(filePath, basestring):
raise OpenSSLConfigError, \
"Input OpenSSL config file path must be a string"
try:
if not os.access(filePath, os.R_OK):
raise OpenSSLConfigError, "not found or no read access"
except Exception, e:
raise OpenSSLConfigError, \
"OpenSSL config file path is not valid: \"%s\": %s" % \
(filePath, str(e))
self.__filePath = filePath
def __getFilePath(self):
"""Get property method
@rtype: string
@return: file path for OpenSSL configuration file"""
return self.__filePath
filePath = property(fget=__getFilePath,
fset=__setFilePath,
doc="file path for configuration file")
def setCADir(self, caDir):
"""Set property method
@type caDir: string
@param caDir: path for OpenSSL configuration file"""
if caDir is None:
# Try to set default from 'HOME' env variable
homeDir = os.environ.get('HOME')
if homeDir:
self.__caDir = os.path.join(os.environ['HOME'],
self.__gridCASubDir)
else:
self.__caDir = None
else:
if not isinstance(caDir, basestring):
raise OpenSSLConfigError, \
"Input OpenSSL CA directory path must be a string"
try:
if not os.access(caDir, os.R_OK):
raise OpenSSLConfigError, "not found or no read access"
except Exception, e:
raise OpenSSLConfigError, \
"OpenSSL CA directory path is not valid: \"%s\": %s" % \
(caDir, str(e))
self.__caDir = caDir
def __getCADir(self):
"""Get property method
@rtype caDir: string
@return caDir: directory path for CA configuration files"""
return self.__caDir
caDir = property(fget=__getCADir,
fset=setCADir,
doc="directory path for CA configuration files")
def __getReqDN(self):
"""Get property method
@rtype reqDN: dict
@return reqDN: Distinguished Name for certificate request"""
return self.__reqDN
def __setReqDN(self, reqDN):
"""Set property method
@type reqDN: dict
@param reqDN: Distinguished Name for certificate request"""
if not isinstance(reqDN, dict):
raise AttributeError, "Distinguished Name must be dict type"
invalidKw = [k for k in dict \
if k not in self.__class__._certReqDNParamName]
if invalidKw:
raise AttributeError, \
"Invalid certificate request keyword(s): %s. Valid keywords are: %s" % \
(', '.join(invalidKw), ', '.join(self.__class__._certReqDNParamName))
self.__reqDN = reqDN
reqDN = property(fget=__getReqDN,
fset=__setReqDN,
doc="Distinguished Name for certificate request")
def read(self):
"""Override base class version to avoid parsing error with the first
'RANDFILE = ...' part of the openssl file. Also, reformat _sections
to allow for the style of SSL config files where section headings can
have spaces either side of the brackets e.g.
[ sectionName ]
and comments can occur on the same line as an option e.g.
option = blah # This is option blah
Reformat _sections to """
try:
file_ = open(self.__filePath)
fileTxt = file_.read()
except Exception, e:
raise OpenSSLConfigError, \
"Error reading OpenSSL config file \"%s\": %s" % \
(self.__filePath, str(e))
idx = re.search('\[\s*\w*\s*\]', fileTxt).span()[0]
file_.seek(idx)
SafeConfigParser.readfp(self, file_)
# Filter section names and reomve comments from options
for section, val in self._sections.items():
newSection = section
self._sections[newSection.strip()] = \
dict([(opt, self._filtOptVal(optVal))
for opt, optVal in val.items()])
del self._sections[section]
self._set_required_dn_params()
def _filtOptVal(self, optVal):
"""For option value, filter out comments and substitute $dir with
the CA directory location
@type optVal: string
@param optVal: option value"""
filtVal = optVal.split('#')[0].strip()
if self.__caDir:
# Replace $dir with CA directory path
return self.__class__._caDirPat.sub(self.__caDir, filtVal)
else:
# Leave $dir in place as no CA directory has been set
return filtVal
def readfp(self, fp):
"""Set to not implemented as using a file object could be problematic
given read() has to seek ahead to the first actual section to avoid
parsing errors"""
raise NotImplementedError, "Use read method instead"
self._parseReqDN()
def _set_required_dn_params(self):
"""Set Required DN parameters from the configuration file returning
them in a dictionary"""
# Nb. Match over line boundaries
try:
self.__reqDN = \
{
'O': self.get('req_distinguished_name',
'0.organizationName_default'),
'OU': self.get('req_distinguished_name',
'0.organizationalUnitName_default')
}
except Exception, e:
raise OpenSSLConfigError, \
'Error setting content of Distinguished Name from file "%s": %s'%\
(self.__filePath, str(e)) | bsd-3-clause | 2,092,539,897,944,614,100 | 33.444257 | 89 | 0.53384 | false |
churchlab/millstone | genome_designer/conf/global_settings.py | 1 | 16756 | # Django settings for genome_designer project.
import logging
import os
from django.conf import global_settings
# EntrezGene wants an email to use it's API.
EMAIL = "[email protected]"
# The absolute path of the settings.py file's directory.
# Useful for settings that require absolute paths like templates.
PWD = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
# Absolute path to the third-party tools dir where setup.py stores
# downloaded tools that are used internally.
TOOLS_DIR = os.path.join(PWD, 'tools')
# Django DEBUG flag.
DEBUG = True
# Whether to run the app in "Demo" mode (no entity modify).
# Setting DEMO_MODE = True does the following:
# * Automatic login to with demo account (redirects regular login page)
# * Views restricted to conf/demo_settings.DEMO_SAFE_VIEWS
DEMO_MODE = False
DEMO_MODE__USERNAME = 'gmcdev'
DEMO_MODE__PASSWORD = 'g3n3d3z'
# Default URL of demo splash page to include.
DEMO_SPLASH = 'demo_splash.html'
# A boolean that turns on/off template debug mode.
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'gdv2db',
'USER': 'gdv2dbuser',
'PASSWORD': 'g3n3d3z',
'HOST': 'localhost',
'PORT': '',
'OS_USER': 'postgres'
}
# Uncomment to debug with Sqlite.
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': 'temp.db',
# 'USER': '',
# 'PASSWORD': '',
# 'HOST': '',
# 'PORT': '',
# }
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [
'127.0.0.1',
'localhost',
]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PWD, 'temp_data')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Generate SECRET_KEY. This stores the key in secret_key.py, which should
# not be committed to a public repository.
try:
from secret_key import *
except ImportError:
def _generate_secret_key(dest):
from django.utils.crypto import get_random_string
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
new_secret_key = get_random_string(50, chars)
with open(dest, 'w') as secret_key_fh:
secret_key_fh.write('SECRET_KEY = \'%s\'' % new_secret_key)
SETTINGS_DIR = os.path.abspath(os.path.dirname(__file__))
_generate_secret_key(os.path.join(SETTINGS_DIR, 'secret_key.py'))
from secret_key import *
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'main.middleware.DisabledInDemoModeMiddleware', # only active when DEMO_MODE = True
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'genome_designer.urls'
TEMPLATE_DIRS = (
os.path.join(PWD, 'main/templates')
)
INSTALLED_APPS = (
# django built-ins
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# async queue
'djcelery',
# third-party apps
'registration',
# our apps
'main',
# database migrations,
'south',
# Testing
'django_nose',
'djcelery_testworker'
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'simple'
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'default.log', # override in local_settings.py
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers':['console'],
'level':'INFO',
'propagate': True,
},
's3': {
'handlers':['console'],
'level':'INFO',
'propagate': True,
},
# Uncomment to see SQL logs on the console.
# 'django.db.backends': {
# 'handlers':['console'],
# 'level':'DEBUG',
# 'propagate': False,
# },
'debug_logger': {
'handlers':['file'],
'level':'DEBUG',
'propagate': False,
},
}
}
###############################################################################
# Custom Config
###############################################################################
# We use a separate UserProfile from the build-in Django User model so that we
# have the option of extending it.
AUTH_PROFILE_MODULE = 'main.UserProfile'
# Custom template context processors.
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'main.context_processors.common_data',
'main.context_processors.aws_settings',
'main.context_processors.demo_settings',
)
###############################################################################
# Registration / Accounts
###############################################################################
# django-registration
# One-week activation window.
ACCOUNT_ACTIVATION_DAYS = 7
LOGIN_REDIRECT_URL = '/'
# Override to True to enable multiple users to register.
# With this as False, the initial user (i.e. admin) can
# 1) Clone our amazon ami
# 2) Go to the url and register
# 3) No one else can register
# all w/o requiring the user to touch settings.py
REGISTRATION_OPEN = False
###############################################################################
# Django Celery - async task queue management
###############################################################################
import djcelery
djcelery.setup_loader()
# RabbitMQ settings
BROKER_URL = 'amqp://guest:guest@localhost:5672//'
# Must register any modules with celery tasks here.
CELERY_IMPORTS = (
'pipeline.pipeline_runner',
'pipeline.read_alignment',
'pipeline.variant_calling',
'pipeline.variant_calling.freebayes',
'utils.import_util',
'genome_finish.assembly_runner'
)
# When True, forces synchronous behavior so that it's not necessary
# to have a celery server running.
CELERY_ALWAYS_EAGER = False
###############################################################################
# External tools
###############################################################################
BASH_PATH = '/bin/bash'
###############################################################################
# JBrowse
###############################################################################
# Root of the JBrowse installation.
JBROWSE_ROOT = os.path.abspath(os.path.join(PWD, '../jbrowse'))
# Name of the symlink from within JBrowse to the data dir.
# See JBROWSE_DATA_URL_ROOT description below.
JBROWSE_DATA_SYMLINK_NAME = 'gd_data'
# Full path to the JBrowse symlink (links back to the app data dir).
JBROWSE_DATA_SYMLINK_PATH = os.path.join(JBROWSE_ROOT,
JBROWSE_DATA_SYMLINK_NAME)
# The url root to data that JBrowse displays.
# The app admin should create a symlink from the actual data root to this
# location inside of the jbrowse/ dir. For example, the way we display bam
# files is configuring the trackList.json file with a track with the following
# key-value: "urlTemplate" : "/jbrowse/gd_data/users/8fc1f831/projects/58a62c7d/genomes/8dc829ec/align.bam"
JBROWSE_DATA_URL_ROOT= '/jbrowse/' + JBROWSE_DATA_SYMLINK_NAME + '/'
# Set to True if you want to force JBrowse links to be from localhost and thus
# go through nginx. Default of False causes JBrowse to serve through Django in
# dev.
DEBUG_FORCE_JBROWSE_NGINX = False
# How big of a window to view when looking at a single pos in JBrowse
JBROWSE_DEFAULT_VIEW_WINDOW = 100
# What Genbank types to display on Jbrowse
# got this default list by doing:
# grep -Po '^ [^0-9 ]+ ' mg1655.genbank | sort | uniq -c | sort -n
# and skipping gene, mat_peptide, source, and remark (not in that list)
JBROWSE_GBK_TYPES_TO_DISPLAY = ','.join([
'CDS','repeat_region','tRNA','ncRNA',
'mobile_element','misc_feature','tmRNA'])
JBROWSE_DEFAULT_TRACKS = ['DNA','gbk']
# Number of BAM alignment tracks to display - if more than this, then
# display none and warn on mouseover.
JBROWSE_MAX_ALIGN_TRACKS = 5
# Number of BAM coverage tracks to display - if more than this, then
# display none and warn on mouseover.
JBROWSE_MAX_COVERAGE_TRACKS = 10
###############################################################################
# Variant Calling
###############################################################################
ENABLED_VARIANT_CALLERS = [
'freebayes',
# 'lumpy',
# 'pindel',
]
# Path to snpeff java jar.
SNPEFF_JAR_PATH = os.path.abspath(os.path.join(PWD, 'tools','snpEff',
'snpEff.jar'))
# Path to snpeff config template.
SNPEFF_CFG_TEMPLATE_PATH = os.path.join(PWD, 'main',
'templates','snpeff.tmpl.config')
# Upstream/downstream interval where SNPs nearby a gene are tagged. Needs to
# be smaller than default for bacterial genomes.
SNPEFF_UD_INTERVAL_LENGTH = 50
# Run freebayes in parallel across multiple smaller regions, then merge the
# results.
FREEBAYES_PARALLEL = True
# Size of regions to split into when using Freebayes in
# parallel concurrent mode. If your genome is smaller than this number
# multiplied by the number of CPUs, you will not be using the full
# capabilities of parallelization.
# TODO: perhaps this should be determined dynamically based on genome size.
FREEBAYES_REGION_SIZE = 200000
# SNPEff can be multithreaded but for simplicity, let's always keep this at 1.
SNPEFF_THREADS = 1
# If we're debugging snpeff, print the output
SNPEFF_BUILD_DEBUG = True
# Names of SnpEff summary files, which we want to delete after running.
SNPEFF_SUMMARY_FILES = ['snpEff_genes.txt', 'snpEff_summary.html']
###############################################################################
# Callable Loci
###############################################################################
# Minimum mapping quality (out of 60) to be a 'good' call
CL__MIN_MAPQ = 20
# Maximum depth before complaining of excessive depth
CL__MAX_DEPTH = 1000
# Minimum depth for low coverage
CL__MIN_DEPTH = 4
# minimum depth to allow for calling low map quality
CL__MIN_LOWMAPQ_DEPTH = 8
# minimum read fraction that has low map quality for call
CL__MAX_LOWMAP_FRAC = 0.5
# Distance between adjacent features, below which to merge them
CL__MERGE_DIST = 25
###############################################################################
# Coverage-based Deletion Detection
###############################################################################
COVDEL_CUTOFF = 5
COVDEL_CUTOFF_PCT = 0.1
# Region Smoothing:
# no region smoothing if the coverage between regions is greater than this
COVDEL_SMOOTHING_COV_CUTOFF = 3
# coverage smoothing decay rate: join if coverage between regions is less than:
# mean genome coverage * 2 ^ (-dist_between_deletions / decay_half_life)
# (or less than SMOOTHING_COV_CUTOFF)
COVDEL_EXP_COV_DECAY_HALF_LIFE = 500
# Automatically join if both regions are large (>LARGE_DEL_MIN_DEL_LEN) and
# distance between is small (<LARGE_DEL_MAX_SMOOTH_DIST)
COVDEL_LARGE_DEL_MAX_SMOOTH_DIST = 1000
COVDEL_LARGE_DEL_MIN_DEL_LEN = 2000
# Throw away coverage-based deletions below this size cutoff
# (they should be found by SNV tools like Freebayes instead)
COVDEL_SMOOTHED_SIZE_CUTOFF = 15
###############################################################################
# Feature Flags
###############################################################################
FLAG__PRINT_MAGE_OLIGOS_ENABLED = True
FLAG__GENERATE_NEW_REFERENCE_GENOME_ENABLED = True
FLAG__GENOME_FINISHING_ENABLED = True
###############################################################################
# S3
###############################################################################
# Check if we are running on an EC2 instance.
# Ref: http://stackoverflow.com/questions/10907418/how-to-check-application-runs-in-aws-ec2-instance
def is_ec2():
import socket
try:
socket.gethostbyname('instance-data.ec2.internal.')
return True
except socket.gaierror:
return False
RUNNING_ON_EC2 = is_ec2()
# Allows user to create an S3 backed project.
# Set this to False if you want to run on EC2, but not allow
# data to be stored to S3.
# S3_ENABLED = RUNNING_ON_EC2 or False
# NOTE: For now, default is no S3 support.
S3_ENABLED = False
# Don't perform any API call that changes anything on S3.
S3_DRY_RUN = False
# Get them from https://console.aws.amazon.com/iam/home?#security_credential
AWS_CLIENT_SECRET_KEY = ''
AWS_SERVER_PUBLIC_KEY = ''
AWS_SERVER_SECRET_KEY = ''
# Name of S3 bucket to which all files will be uploaded
S3_BUCKET = 'genome-designer-upload'
# Run S3 tests on S3_TEST_BUCKET; fine to use S3_BUCKET for S3_TEST_BUCKET, for
# all test operations will run inside s3://S3_TEST_BUCKET/__tests__
S3_TEST_BUCKET = S3_BUCKET
# Maximum file size for user upload
S3_FILE_MAX_SIZE = 1024 ** 3 # 1GB
###############################################################################
# Testing
###############################################################################
TEST_RUNNER = 'test_suite_runner.CustomTestSuiteRunner'
TEST_FILESYSTEM_DIR = os.path.join(PWD, 'temp_test_data')
TEST_S3 = False
# Don't show south DEBUG logs.
south_logger = logging.getLogger('south')
south_logger.setLevel(logging.INFO)
###############################################################################
# Code Profiling
###############################################################################
# Directory where profiler logs will be stored. See README.md.
PROFILE_LOG_BASE = None
| mit | -2,014,074,534,081,506,600 | 30.795066 | 107 | 0.612855 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.