repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
walterbender/portfolio | odf/draw.py | 1 | 5639 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from .namespaces import DRAWNS, STYLENS, PRESENTATIONNS
from .element import Element
def StyleRefElement(stylename=None, classnames=None, **args):
qattrs = {}
if stylename is not None:
f = stylename.getAttrNS(STYLENS, 'family')
if f == 'graphic':
qattrs[(DRAWNS, u'style-name')] = stylename
elif f == 'presentation':
qattrs[(PRESENTATIONNS, u'style-name')] = stylename
else:
raise ValueError(
"Style's family must be either 'graphic' or 'presentation'")
if classnames is not None:
f = classnames[0].getAttrNS(STYLENS, 'family')
if f == 'graphic':
qattrs[(DRAWNS, u'class-names')] = classnames
elif f == 'presentation':
qattrs[(PRESENTATIONNS, u'class-names')] = classnames
else:
raise ValueError(
"Style's family must be either 'graphic' or 'presentation'")
return Element(qattributes=qattrs, **args)
def DrawElement(name=None, **args):
e = Element(name=name, **args)
if 'displayname' not in args:
e.setAttrNS(DRAWNS, 'display-name', name)
return e
# Autogenerated
def A(**args):
return Element(qname=(DRAWNS, 'a'), **args)
def Applet(**args):
return Element(qname=(DRAWNS, 'applet'), **args)
def AreaCircle(**args):
return Element(qname=(DRAWNS, 'area-circle'), **args)
def AreaPolygon(**args):
return Element(qname=(DRAWNS, 'area-polygon'), **args)
def AreaRectangle(**args):
return Element(qname=(DRAWNS, 'area-rectangle'), **args)
def Caption(**args):
return StyleRefElement(qname=(DRAWNS, 'caption'), **args)
def Circle(**args):
return StyleRefElement(qname=(DRAWNS, 'circle'), **args)
def Connector(**args):
return StyleRefElement(qname=(DRAWNS, 'connector'), **args)
def ContourPath(**args):
return Element(qname=(DRAWNS, 'contour-path'), **args)
def ContourPolygon(**args):
return Element(qname=(DRAWNS, 'contour-polygon'), **args)
def Control(**args):
return StyleRefElement(qname=(DRAWNS, 'control'), **args)
def CustomShape(**args):
return StyleRefElement(qname=(DRAWNS, 'custom-shape'), **args)
def Ellipse(**args):
return StyleRefElement(qname=(DRAWNS, 'ellipse'), **args)
def EnhancedGeometry(**args):
return Element(qname=(DRAWNS, 'enhanced-geometry'), **args)
def Equation(**args):
return Element(qname=(DRAWNS, 'equation'), **args)
def FillImage(**args):
return DrawElement(qname=(DRAWNS, 'fill-image'), **args)
def FloatingFrame(**args):
return Element(qname=(DRAWNS, 'floating-frame'), **args)
def Frame(**args):
return StyleRefElement(qname=(DRAWNS, 'frame'), **args)
def G(**args):
return StyleRefElement(qname=(DRAWNS, 'g'), **args)
def GluePoint(**args):
return Element(qname=(DRAWNS, 'glue-point'), **args)
def Gradient(**args):
return DrawElement(qname=(DRAWNS, 'gradient'), **args)
def Handle(**args):
return Element(qname=(DRAWNS, 'handle'), **args)
def Hatch(**args):
return DrawElement(qname=(DRAWNS, 'hatch'), **args)
def Image(**args):
return Element(qname=(DRAWNS, 'image'), **args)
def ImageMap(**args):
return Element(qname=(DRAWNS, 'image-map'), **args)
def Layer(**args):
return Element(qname=(DRAWNS, 'layer'), **args)
def LayerSet(**args):
return Element(qname=(DRAWNS, 'layer-set'), **args)
def Line(**args):
return StyleRefElement(qname=(DRAWNS, 'line'), **args)
def Marker(**args):
return DrawElement(qname=(DRAWNS, 'marker'), **args)
def Measure(**args):
return StyleRefElement(qname=(DRAWNS, 'measure'), **args)
def Object(**args):
return Element(qname=(DRAWNS, 'object'), **args)
def ObjectOle(**args):
return Element(qname=(DRAWNS, 'object-ole'), **args)
def Opacity(**args):
return DrawElement(qname=(DRAWNS, 'opacity'), **args)
def Page(**args):
return Element(qname=(DRAWNS, 'page'), **args)
def PageThumbnail(**args):
return StyleRefElement(qname=(DRAWNS, 'page-thumbnail'), **args)
def Param(**args):
return Element(qname=(DRAWNS, 'param'), **args)
def Path(**args):
return StyleRefElement(qname=(DRAWNS, 'path'), **args)
def Plugin(**args):
return Element(qname=(DRAWNS, 'plugin'), **args)
def Polygon(**args):
return StyleRefElement(qname=(DRAWNS, 'polygon'), **args)
def Polyline(**args):
return StyleRefElement(qname=(DRAWNS, 'polyline'), **args)
def Rect(**args):
return StyleRefElement(qname=(DRAWNS, 'rect'), **args)
def RegularPolygon(**args):
return StyleRefElement(qname=(DRAWNS, 'regular-polygon'), **args)
def StrokeDash(**args):
return DrawElement(qname=(DRAWNS, 'stroke-dash'), **args)
def TextBox(**args):
return Element(qname=(DRAWNS, 'text-box'), **args)
| gpl-3.0 | -4,131,335,225,744,125,400 | 23.513043 | 80 | 0.659808 | false |
laiqiqi886/kbengine | kbe/src/lib/python/Lib/concurrent/futures/thread.py | 93 | 4548 | # Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ThreadPoolExecutor."""
__author__ = 'Brian Quinlan ([email protected])'
import atexit
from concurrent.futures import _base
import queue
import threading
import weakref
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
atexit.register(_python_exit)
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException as e:
self.future.set_exception(e)
else:
self.future.set_result(result)
def _worker(executor_reference, work_queue):
try:
while True:
work_item = work_queue.get(block=True)
if work_item is not None:
work_item.run()
# Delete references to object. See issue16284
del work_item
continue
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
# Notice other workers
work_queue.put(None)
return
del executor
except BaseException:
_base.LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(_base.Executor):
def __init__(self, max_workers):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
"""
self._max_workers = max_workers
self._work_queue = queue.Queue()
self._threads = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def _adjust_thread_count(self):
# When the executor gets lost, the weakref callback will wake up
# the worker threads.
def weakref_cb(_, q=self._work_queue):
q.put(None)
# TODO(bquinlan): Should avoid creating new threads if there are more
# idle threads than items in the work queue.
if len(self._threads) < self._max_workers:
t = threading.Thread(target=_worker,
args=(weakref.ref(self, weakref_cb),
self._work_queue))
t.daemon = True
t.start()
self._threads.add(t)
_threads_queues[t] = self._work_queue
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
self._work_queue.put(None)
if wait:
for t in self._threads:
t.join()
shutdown.__doc__ = _base.Executor.shutdown.__doc__
| lgpl-3.0 | 6,511,911,652,504,655,000 | 33.454545 | 80 | 0.599604 | false |
TravisCG/SI_scripts | samerec.py | 1 | 1175 | #!/usr/bin/python
"""
Read a pair of fastq files and filter out reads which IDs not found in both files
"""
import sys
fq1 = open(sys.argv[1])
fq2 = open(sys.argv[2])
out1 = open(sys.argv[3], "w")
out2 = open(sys.argv[4], "w")
stack1 = dict()
stack2 = dict()
counter = 0
same = False
while True:
read1 = fq1.readline()
read2 = fq2.readline()
if not read1 and not read2:
break
#TODO remove /1 and /2 from the end of the line
if counter % 4 == 0:
same = False
if read1 == read2:
same = True
else:
id1 = read1
id2 = read2
for k in stack1.keys():
if k in stack1 and k in stack2:
out1.write(k)
out2.write(k)
out1.write("".join(stack1[k]))
out2.write("".join(stack2[k]))
del stack1[k]
del stack2[k]
else:
if not same:
stack1[id1] = list()
stack2[id2] = list()
stack1[id1].append(read1)
stack2[id2].append(read2)
if same:
out1.write(read1)
out2.write(read2)
counter += 1
if counter % 1000000 == 0:
print counter
if k in stack1 and k in stack2:
out1.write(k)
out2.write(k)
out1.write("".join(stack1[k]))
out2.write("".join(stack2[k]))
del stack1[k]
del stack2[k]
out1.close()
out2.close()
| gpl-3.0 | -8,358,680,412,213,693,000 | 16.279412 | 81 | 0.628085 | false |
firerszd/kbengine | kbe/src/lib/python/Lib/tkinter/test/test_ttk/test_widgets.py | 59 | 56828 | import unittest
import tkinter
from tkinter import ttk
from test.support import requires
import sys
from tkinter.test.test_ttk.test_functions import MockTclObj
from tkinter.test.support import (AbstractTkTest, tcl_version, get_tk_patchlevel,
simulate_mouse_click)
from tkinter.test.widget_tests import (add_standard_options, noconv,
AbstractWidgetTest, StandardOptionsTests, IntegerSizeTests, PixelSizeTests,
setUpModule)
requires('gui')
class StandardTtkOptionsTests(StandardOptionsTests):
def test_class(self):
widget = self.create()
self.assertEqual(widget['class'], '')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0): # actually this was changed in 8.6b3
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'class', 'Foo', errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
def test_padding(self):
widget = self.create()
self.checkParam(widget, 'padding', 0, expected=('0',))
self.checkParam(widget, 'padding', 5, expected=('5',))
self.checkParam(widget, 'padding', (5, 6), expected=('5', '6'))
self.checkParam(widget, 'padding', (5, 6, 7),
expected=('5', '6', '7'))
self.checkParam(widget, 'padding', (5, 6, 7, 8),
expected=('5', '6', '7', '8'))
self.checkParam(widget, 'padding', ('5p', '6p', '7p', '8p'))
self.checkParam(widget, 'padding', (), expected='')
def test_style(self):
widget = self.create()
self.assertEqual(widget['style'], '')
errmsg = 'Layout Foo not found'
if hasattr(self, 'default_orient'):
errmsg = ('Layout %s.Foo not found' %
getattr(self, 'default_orient').title())
self.checkInvalidParam(widget, 'style', 'Foo',
errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
# XXX
pass
class WidgetTest(AbstractTkTest, unittest.TestCase):
"""Tests methods available in every ttk widget."""
def setUp(self):
super().setUp()
self.widget = ttk.Button(self.root, width=0, text="Text")
self.widget.pack()
self.widget.wait_visibility()
def test_identify(self):
self.widget.update_idletasks()
self.assertEqual(self.widget.identify(
int(self.widget.winfo_width() / 2),
int(self.widget.winfo_height() / 2)
), "label")
self.assertEqual(self.widget.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.widget.identify, None, 5)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, None)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, '')
def test_widget_state(self):
# XXX not sure about the portability of all these tests
self.assertEqual(self.widget.state(), ())
self.assertEqual(self.widget.instate(['!disabled']), True)
# changing from !disabled to disabled
self.assertEqual(self.widget.state(['disabled']), ('!disabled', ))
# no state change
self.assertEqual(self.widget.state(['disabled']), ())
# change back to !disable but also active
self.assertEqual(self.widget.state(['!disabled', 'active']),
('!active', 'disabled'))
# no state changes, again
self.assertEqual(self.widget.state(['!disabled', 'active']), ())
self.assertEqual(self.widget.state(['active', '!disabled']), ())
def test_cb(arg1, **kw):
return arg1, kw
self.assertEqual(self.widget.instate(['!disabled'],
test_cb, "hi", **{"msg": "there"}),
('hi', {'msg': 'there'}))
# attempt to set invalid statespec
currstate = self.widget.state()
self.assertRaises(tkinter.TclError, self.widget.instate,
['badstate'])
self.assertRaises(tkinter.TclError, self.widget.instate,
['disabled', 'badstate'])
# verify that widget didn't change its state
self.assertEqual(currstate, self.widget.state())
# ensuring that passing None as state doesn't modify current state
self.widget.state(['active', '!disabled'])
self.assertEqual(self.widget.state(), ('active', ))
class AbstractToplevelTest(AbstractWidgetTest, PixelSizeTests):
_conv_pixels = noconv
@add_standard_options(StandardTtkOptionsTests)
class FrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'padding', 'relief', 'style', 'takefocus',
'width',
)
def create(self, **kwargs):
return ttk.Frame(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class LabelFrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'labelanchor', 'labelwidget',
'padding', 'relief', 'style', 'takefocus',
'text', 'underline', 'width',
)
def create(self, **kwargs):
return ttk.LabelFrame(self.root, **kwargs)
def test_labelanchor(self):
widget = self.create()
self.checkEnumParam(widget, 'labelanchor',
'e', 'en', 'es', 'n', 'ne', 'nw', 's', 'se', 'sw', 'w', 'wn', 'ws',
errmsg='Bad label anchor specification {}')
self.checkInvalidParam(widget, 'labelanchor', 'center')
def test_labelwidget(self):
widget = self.create()
label = ttk.Label(self.root, text='Mupp', name='foo')
self.checkParam(widget, 'labelwidget', label, expected='.foo')
label.destroy()
class AbstractLabelTest(AbstractWidgetTest):
def checkImageParam(self, widget, name):
image = tkinter.PhotoImage(master=self.root, name='image1')
image2 = tkinter.PhotoImage(master=self.root, name='image2')
self.checkParam(widget, name, image, expected=('image1',))
self.checkParam(widget, name, 'image1', expected=('image1',))
self.checkParam(widget, name, (image,), expected=('image1',))
self.checkParam(widget, name, (image, 'active', image2),
expected=('image1', 'active', 'image2'))
self.checkParam(widget, name, 'image1 active image2',
expected=('image1', 'active', 'image2'))
self.checkInvalidParam(widget, name, 'spam',
errmsg='image "spam" doesn\'t exist')
def test_compound(self):
widget = self.create()
self.checkEnumParam(widget, 'compound',
'none', 'text', 'image', 'center',
'top', 'bottom', 'left', 'right')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def test_width(self):
widget = self.create()
self.checkParams(widget, 'width', 402, -402, 0)
@add_standard_options(StandardTtkOptionsTests)
class LabelTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'anchor', 'background',
'class', 'compound', 'cursor', 'font', 'foreground',
'image', 'justify', 'padding', 'relief', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width', 'wraplength',
)
_conv_pixels = noconv
def create(self, **kwargs):
return ttk.Label(self.root, **kwargs)
def test_font(self):
widget = self.create()
self.checkParam(widget, 'font',
'-Adobe-Helvetica-Medium-R-Normal--*-120-*-*-*-*-*-*')
@add_standard_options(StandardTtkOptionsTests)
class ButtonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor', 'default',
'image', 'state', 'style', 'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Button(self.root, **kwargs)
def test_default(self):
widget = self.create()
self.checkEnumParam(widget, 'default', 'normal', 'active', 'disabled')
def test_invoke(self):
success = []
btn = ttk.Button(self.root, command=lambda: success.append(1))
btn.invoke()
self.assertTrue(success)
@add_standard_options(StandardTtkOptionsTests)
class CheckbuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'offvalue', 'onvalue',
'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Checkbutton(self.root, **kwargs)
def test_offvalue(self):
widget = self.create()
self.checkParams(widget, 'offvalue', 1, 2.3, '', 'any string')
def test_onvalue(self):
widget = self.create()
self.checkParams(widget, 'onvalue', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
cbtn = ttk.Checkbutton(self.root, command=cb_test)
# the variable automatically created by ttk.Checkbutton is actually
# undefined till we invoke the Checkbutton
self.assertEqual(cbtn.state(), ('alternate', ))
self.assertRaises(tkinter.TclError, cbtn.tk.globalgetvar,
cbtn['variable'])
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(cbtn['onvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertTrue(success)
cbtn['command'] = ''
res = cbtn.invoke()
self.assertFalse(str(res))
self.assertLessEqual(len(success), 1)
self.assertEqual(cbtn['offvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class ComboboxTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'exportselection', 'height',
'justify', 'postcommand', 'state', 'style',
'takefocus', 'textvariable', 'values', 'width',
)
def setUp(self):
super().setUp()
self.combo = self.create()
def create(self, **kwargs):
return ttk.Combobox(self.root, **kwargs)
def test_height(self):
widget = self.create()
self.checkParams(widget, 'height', 100, 101.2, 102.6, -100, 0, '1i')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def _show_drop_down_listbox(self):
width = self.combo.winfo_width()
self.combo.event_generate('<ButtonPress-1>', x=width - 5, y=5)
self.combo.event_generate('<ButtonRelease-1>', x=width - 5, y=5)
self.combo.update_idletasks()
def test_virtual_event(self):
success = []
self.combo['values'] = [1]
self.combo.bind('<<ComboboxSelected>>',
lambda evt: success.append(True))
self.combo.pack()
self.combo.wait_visibility()
height = self.combo.winfo_height()
self._show_drop_down_listbox()
self.combo.update()
self.combo.event_generate('<Return>')
self.combo.update()
self.assertTrue(success)
def test_postcommand(self):
success = []
self.combo['postcommand'] = lambda: success.append(True)
self.combo.pack()
self.combo.wait_visibility()
self._show_drop_down_listbox()
self.assertTrue(success)
# testing postcommand removal
self.combo['postcommand'] = ''
self._show_drop_down_listbox()
self.assertEqual(len(success), 1)
def test_values(self):
def check_get_current(getval, currval):
self.assertEqual(self.combo.get(), getval)
self.assertEqual(self.combo.current(), currval)
self.assertEqual(self.combo['values'],
() if tcl_version < (8, 5) else '')
check_get_current('', -1)
self.checkParam(self.combo, 'values', 'mon tue wed thur',
expected=('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', ('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', (42, 3.14, '', 'any string'))
self.checkParam(self.combo, 'values', '', expected=())
self.combo['values'] = ['a', 1, 'c']
self.combo.set('c')
check_get_current('c', 2)
self.combo.current(0)
check_get_current('a', 0)
self.combo.set('d')
check_get_current('d', -1)
# testing values with empty string
self.combo.set('')
self.combo['values'] = (1, 2, '', 3)
check_get_current('', 2)
# testing values with empty string set through configure
self.combo.configure(values=[1, '', 2])
self.assertEqual(self.combo['values'],
('1', '', '2') if self.wantobjects else
'1 {} 2')
# testing values with spaces
self.combo['values'] = ['a b', 'a\tb', 'a\nb']
self.assertEqual(self.combo['values'],
('a b', 'a\tb', 'a\nb') if self.wantobjects else
'{a b} {a\tb} {a\nb}')
# testing values with special characters
self.combo['values'] = [r'a\tb', '"a"', '} {']
self.assertEqual(self.combo['values'],
(r'a\tb', '"a"', '} {') if self.wantobjects else
r'a\\tb {"a"} \}\ \{')
# out of range
self.assertRaises(tkinter.TclError, self.combo.current,
len(self.combo['values']))
# it expects an integer (or something that can be converted to int)
self.assertRaises(tkinter.TclError, self.combo.current, '')
# testing creating combobox with empty string in values
combo2 = ttk.Combobox(self.root, values=[1, 2, ''])
self.assertEqual(combo2['values'],
('1', '2', '') if self.wantobjects else '1 2 {}')
combo2.destroy()
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class EntryTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'background', 'class', 'cursor',
'exportselection', 'font',
'invalidcommand', 'justify',
'show', 'state', 'style', 'takefocus', 'textvariable',
'validate', 'validatecommand', 'width', 'xscrollcommand',
)
def setUp(self):
super().setUp()
self.entry = self.create()
def create(self, **kwargs):
return ttk.Entry(self.root, **kwargs)
def test_invalidcommand(self):
widget = self.create()
self.checkCommandParam(widget, 'invalidcommand')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', '*')
self.checkParam(widget, 'show', '')
self.checkParam(widget, 'show', ' ')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state',
'disabled', 'normal', 'readonly')
def test_validate(self):
widget = self.create()
self.checkEnumParam(widget, 'validate',
'all', 'key', 'focus', 'focusin', 'focusout', 'none')
def test_validatecommand(self):
widget = self.create()
self.checkCommandParam(widget, 'validatecommand')
def test_bbox(self):
self.assertIsBoundingBox(self.entry.bbox(0))
self.assertRaises(tkinter.TclError, self.entry.bbox, 'noindex')
self.assertRaises(tkinter.TclError, self.entry.bbox, None)
def test_identify(self):
self.entry.pack()
self.entry.wait_visibility()
self.entry.update_idletasks()
self.assertEqual(self.entry.identify(5, 5), "textarea")
self.assertEqual(self.entry.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.entry.identify, None, 5)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, None)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, '')
def test_validation_options(self):
success = []
test_invalid = lambda: success.append(True)
self.entry['validate'] = 'none'
self.entry['validatecommand'] = lambda: False
self.entry['invalidcommand'] = test_invalid
self.entry.validate()
self.assertTrue(success)
self.entry['invalidcommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['invalidcommand'] = test_invalid
self.entry['validatecommand'] = lambda: True
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = True
self.assertRaises(tkinter.TclError, self.entry.validate)
def test_validation(self):
validation = []
def validate(to_insert):
if not 'a' <= to_insert.lower() <= 'z':
validation.append(False)
return False
validation.append(True)
return True
self.entry['validate'] = 'key'
self.entry['validatecommand'] = self.entry.register(validate), '%S'
self.entry.insert('end', 1)
self.entry.insert('end', 'a')
self.assertEqual(validation, [False, True])
self.assertEqual(self.entry.get(), 'a')
def test_revalidation(self):
def validate(content):
for letter in content:
if not 'a' <= letter.lower() <= 'z':
return False
return True
self.entry['validatecommand'] = self.entry.register(validate), '%P'
self.entry.insert('end', 'avocado')
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
self.entry.delete(0, 'end')
self.assertEqual(self.entry.get(), '')
self.entry.insert('end', 'a1b')
self.assertEqual(self.entry.validate(), False)
self.assertEqual(self.entry.state(), ('invalid', ))
self.entry.delete(1)
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class PanedWindowTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height',
'orient', 'style', 'takefocus', 'width',
)
def setUp(self):
super().setUp()
self.paned = self.create()
def create(self, **kwargs):
return ttk.PanedWindow(self.root, **kwargs)
def test_orient(self):
widget = self.create()
self.assertEqual(str(widget['orient']), 'vertical')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0): # actually this was changed in 8.6b3
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'orient', 'horizontal',
errmsg=errmsg)
widget2 = self.create(orient='horizontal')
self.assertEqual(str(widget2['orient']), 'horizontal')
def test_add(self):
# attempt to add a child that is not a direct child of the paned window
label = ttk.Label(self.paned)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
label.destroy()
child.destroy()
# another attempt
label = ttk.Label(self.root)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
child.destroy()
label.destroy()
good_child = ttk.Label(self.root)
self.paned.add(good_child)
# re-adding a child is not accepted
self.assertRaises(tkinter.TclError, self.paned.add, good_child)
other_child = ttk.Label(self.paned)
self.paned.add(other_child)
self.assertEqual(self.paned.pane(0), self.paned.pane(1))
self.assertRaises(tkinter.TclError, self.paned.pane, 2)
good_child.destroy()
other_child.destroy()
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.paned.forget, None)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
self.paned.add(ttk.Label(self.root))
self.paned.forget(0)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
def test_insert(self):
self.assertRaises(tkinter.TclError, self.paned.insert, None, 0)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, None)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, 0)
child = ttk.Label(self.root)
child2 = ttk.Label(self.root)
child3 = ttk.Label(self.root)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, child)
self.paned.insert('end', child2)
self.paned.insert(0, child)
self.assertEqual(self.paned.panes(), (str(child), str(child2)))
self.paned.insert(0, child2)
self.assertEqual(self.paned.panes(), (str(child2), str(child)))
self.paned.insert('end', child3)
self.assertEqual(self.paned.panes(),
(str(child2), str(child), str(child3)))
# reinserting a child should move it to its current position
panes = self.paned.panes()
self.paned.insert('end', child3)
self.assertEqual(panes, self.paned.panes())
# moving child3 to child2 position should result in child2 ending up
# in previous child position and child ending up in previous child3
# position
self.paned.insert(child2, child3)
self.assertEqual(self.paned.panes(),
(str(child3), str(child2), str(child)))
def test_pane(self):
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
child = ttk.Label(self.root)
self.paned.add(child)
self.assertIsInstance(self.paned.pane(0), dict)
self.assertEqual(self.paned.pane(0, weight=None),
0 if self.wantobjects else '0')
# newer form for querying a single option
self.assertEqual(self.paned.pane(0, 'weight'),
0 if self.wantobjects else '0')
self.assertEqual(self.paned.pane(0), self.paned.pane(str(child)))
self.assertRaises(tkinter.TclError, self.paned.pane, 0,
badoption='somevalue')
def test_sashpos(self):
self.assertRaises(tkinter.TclError, self.paned.sashpos, None)
self.assertRaises(tkinter.TclError, self.paned.sashpos, '')
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child = ttk.Label(self.paned, text='a')
self.paned.add(child, weight=1)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child2 = ttk.Label(self.paned, text='b')
self.paned.add(child2)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 1)
self.paned.pack(expand=True, fill='both')
self.paned.wait_visibility()
curr_pos = self.paned.sashpos(0)
self.paned.sashpos(0, 1000)
self.assertNotEqual(curr_pos, self.paned.sashpos(0))
self.assertIsInstance(self.paned.sashpos(0), int)
@add_standard_options(StandardTtkOptionsTests)
class RadiobuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'value', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Radiobutton(self.root, **kwargs)
def test_value(self):
widget = self.create()
self.checkParams(widget, 'value', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
myvar = tkinter.IntVar(self.root)
cbtn = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=0)
cbtn2 = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=1)
if self.wantobjects:
conv = lambda x: x
else:
conv = int
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(conv(cbtn['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertTrue(success)
cbtn2['command'] = ''
res = cbtn2.invoke()
self.assertEqual(str(res), '')
self.assertLessEqual(len(success), 1)
self.assertEqual(conv(cbtn2['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertEqual(str(cbtn['variable']), str(cbtn2['variable']))
class MenubuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'compound', 'cursor', 'direction',
'image', 'menu', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Menubutton(self.root, **kwargs)
def test_direction(self):
widget = self.create()
self.checkEnumParam(widget, 'direction',
'above', 'below', 'left', 'right', 'flush')
def test_menu(self):
widget = self.create()
menu = tkinter.Menu(widget, name='menu')
self.checkParam(widget, 'menu', menu, conv=str)
menu.destroy()
@add_standard_options(StandardTtkOptionsTests)
class ScaleTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'from', 'length',
'orient', 'style', 'takefocus', 'to', 'value', 'variable',
)
_conv_pixels = noconv
default_orient = 'horizontal'
def setUp(self):
super().setUp()
self.scale = self.create()
self.scale.pack()
self.scale.update()
def create(self, **kwargs):
return ttk.Scale(self.root, **kwargs)
def test_from(self):
widget = self.create()
self.checkFloatParam(widget, 'from', 100, 14.9, 15.1, conv=False)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 130, 131.2, 135.6, '5i')
def test_to(self):
widget = self.create()
self.checkFloatParam(widget, 'to', 300, 14.9, 15.1, -10, conv=False)
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 300, 14.9, 15.1, -10, conv=False)
def test_custom_event(self):
failure = [1, 1, 1] # will need to be empty
funcid = self.scale.bind('<<RangeChanged>>', lambda evt: failure.pop())
self.scale['from'] = 10
self.scale['from_'] = 10
self.scale['to'] = 3
self.assertFalse(failure)
failure = [1, 1, 1]
self.scale.configure(from_=2, to=5)
self.scale.configure(from_=0, to=-2)
self.scale.configure(to=10)
self.assertFalse(failure)
def test_get(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
scale_width = self.scale.winfo_width()
self.assertEqual(self.scale.get(scale_width, 0), self.scale['to'])
self.assertEqual(conv(self.scale.get(0, 0)), conv(self.scale['from']))
self.assertEqual(self.scale.get(), self.scale['value'])
self.scale['value'] = 30
self.assertEqual(self.scale.get(), self.scale['value'])
self.assertRaises(tkinter.TclError, self.scale.get, '', 0)
self.assertRaises(tkinter.TclError, self.scale.get, 0, '')
def test_set(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
# set restricts the max/min values according to the current range
max = conv(self.scale['to'])
new_max = max + 10
self.scale.set(new_max)
self.assertEqual(conv(self.scale.get()), max)
min = conv(self.scale['from'])
self.scale.set(min - 1)
self.assertEqual(conv(self.scale.get()), min)
# changing directly the variable doesn't impose this limitation tho
var = tkinter.DoubleVar(self.root)
self.scale['variable'] = var
var.set(max + 5)
self.assertEqual(conv(self.scale.get()), var.get())
self.assertEqual(conv(self.scale.get()), max + 5)
del var
# the same happens with the value option
self.scale['value'] = max + 10
self.assertEqual(conv(self.scale.get()), max + 10)
self.assertEqual(conv(self.scale.get()), conv(self.scale['value']))
# nevertheless, note that the max/min values we can get specifying
# x, y coords are the ones according to the current range
self.assertEqual(conv(self.scale.get(0, 0)), min)
self.assertEqual(conv(self.scale.get(self.scale.winfo_width(), 0)), max)
self.assertRaises(tkinter.TclError, self.scale.set, None)
@add_standard_options(StandardTtkOptionsTests)
class ProgressbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'length',
'mode', 'maximum', 'phase',
'style', 'takefocus', 'value', 'variable',
)
_conv_pixels = noconv
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Progressbar(self.root, **kwargs)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 100.1, 56.7, '2i')
def test_maximum(self):
widget = self.create()
self.checkFloatParam(widget, 'maximum', 150.2, 77.7, 0, -10, conv=False)
def test_mode(self):
widget = self.create()
self.checkEnumParam(widget, 'mode', 'determinate', 'indeterminate')
def test_phase(self):
# XXX
pass
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 150.2, 77.7, 0, -10,
conv=False)
@unittest.skipIf(sys.platform == 'darwin',
'ttk.Scrollbar is special on MacOSX')
@add_standard_options(StandardTtkOptionsTests)
class ScrollbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'orient', 'style', 'takefocus',
)
default_orient = 'vertical'
def create(self, **kwargs):
return ttk.Scrollbar(self.root, **kwargs)
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class NotebookTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height', 'padding', 'style', 'takefocus',
)
def setUp(self):
super().setUp()
self.nb = self.create(padding=0)
self.child1 = ttk.Label(self.root)
self.child2 = ttk.Label(self.root)
self.nb.add(self.child1, text='a')
self.nb.add(self.child2, text='b')
def create(self, **kwargs):
return ttk.Notebook(self.root, **kwargs)
def test_tab_identifiers(self):
self.nb.forget(0)
self.nb.hide(self.child2)
self.assertRaises(tkinter.TclError, self.nb.tab, self.child1)
self.assertEqual(self.nb.index('end'), 1)
self.nb.add(self.child2)
self.assertEqual(self.nb.index('end'), 1)
self.nb.select(self.child2)
self.assertTrue(self.nb.tab('current'))
self.nb.add(self.child1, text='a')
self.nb.pack()
self.nb.wait_visibility()
if sys.platform == 'darwin':
tb_idx = "@20,5"
else:
tb_idx = "@5,5"
self.assertEqual(self.nb.tab(tb_idx), self.nb.tab('current'))
for i in range(5, 100, 5):
try:
if self.nb.tab('@%d, 5' % i, text=None) == 'a':
break
except tkinter.TclError:
pass
else:
self.fail("Tab with text 'a' not found")
def test_add_and_hidden(self):
self.assertRaises(tkinter.TclError, self.nb.hide, -1)
self.assertRaises(tkinter.TclError, self.nb.hide, 'hi')
self.assertRaises(tkinter.TclError, self.nb.hide, None)
self.assertRaises(tkinter.TclError, self.nb.add, None)
self.assertRaises(tkinter.TclError, self.nb.add, ttk.Label(self.root),
unknown='option')
tabs = self.nb.tabs()
self.nb.hide(self.child1)
self.nb.add(self.child1)
self.assertEqual(self.nb.tabs(), tabs)
child = ttk.Label(self.root)
self.nb.add(child, text='c')
tabs = self.nb.tabs()
curr = self.nb.index('current')
# verify that the tab gets readded at its previous position
child2_index = self.nb.index(self.child2)
self.nb.hide(self.child2)
self.nb.add(self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.assertEqual(self.nb.index(self.child2), child2_index)
self.assertEqual(str(self.child2), self.nb.tabs()[child2_index])
# but the tab next to it (not hidden) is the one selected now
self.assertEqual(self.nb.index('current'), curr + 1)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.nb.forget, -1)
self.assertRaises(tkinter.TclError, self.nb.forget, 'hi')
self.assertRaises(tkinter.TclError, self.nb.forget, None)
tabs = self.nb.tabs()
child1_index = self.nb.index(self.child1)
self.nb.forget(self.child1)
self.assertNotIn(str(self.child1), self.nb.tabs())
self.assertEqual(len(tabs) - 1, len(self.nb.tabs()))
self.nb.add(self.child1)
self.assertEqual(self.nb.index(self.child1), 1)
self.assertNotEqual(child1_index, self.nb.index(self.child1))
def test_index(self):
self.assertRaises(tkinter.TclError, self.nb.index, -1)
self.assertRaises(tkinter.TclError, self.nb.index, None)
self.assertIsInstance(self.nb.index('end'), int)
self.assertEqual(self.nb.index(self.child1), 0)
self.assertEqual(self.nb.index(self.child2), 1)
self.assertEqual(self.nb.index('end'), 2)
def test_insert(self):
# moving tabs
tabs = self.nb.tabs()
self.nb.insert(1, tabs[0])
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert(self.child1, self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert('end', self.child1)
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert('end', 0)
self.assertEqual(self.nb.tabs(), tabs)
# bad moves
self.assertRaises(tkinter.TclError, self.nb.insert, 2, tabs[0])
self.assertRaises(tkinter.TclError, self.nb.insert, -1, tabs[0])
# new tab
child3 = ttk.Label(self.root)
self.nb.insert(1, child3)
self.assertEqual(self.nb.tabs(), (tabs[0], str(child3), tabs[1]))
self.nb.forget(child3)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert(self.child1, child3)
self.assertEqual(self.nb.tabs(), (str(child3), ) + tabs)
self.nb.forget(child3)
self.assertRaises(tkinter.TclError, self.nb.insert, 2, child3)
self.assertRaises(tkinter.TclError, self.nb.insert, -1, child3)
# bad inserts
self.assertRaises(tkinter.TclError, self.nb.insert, 'end', None)
self.assertRaises(tkinter.TclError, self.nb.insert, None, 0)
self.assertRaises(tkinter.TclError, self.nb.insert, None, None)
def test_select(self):
self.nb.pack()
self.nb.wait_visibility()
success = []
tab_changed = []
self.child1.bind('<Unmap>', lambda evt: success.append(True))
self.nb.bind('<<NotebookTabChanged>>',
lambda evt: tab_changed.append(True))
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.select(self.child2)
self.assertTrue(success)
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.update()
self.assertTrue(tab_changed)
def test_tab(self):
self.assertRaises(tkinter.TclError, self.nb.tab, -1)
self.assertRaises(tkinter.TclError, self.nb.tab, 'notab')
self.assertRaises(tkinter.TclError, self.nb.tab, None)
self.assertIsInstance(self.nb.tab(self.child1), dict)
self.assertEqual(self.nb.tab(self.child1, text=None), 'a')
# newer form for querying a single option
self.assertEqual(self.nb.tab(self.child1, 'text'), 'a')
self.nb.tab(self.child1, text='abc')
self.assertEqual(self.nb.tab(self.child1, text=None), 'abc')
self.assertEqual(self.nb.tab(self.child1, 'text'), 'abc')
def test_tabs(self):
self.assertEqual(len(self.nb.tabs()), 2)
self.nb.forget(self.child1)
self.nb.forget(self.child2)
self.assertEqual(self.nb.tabs(), ())
def test_traversal(self):
self.nb.pack()
self.nb.wait_visibility()
self.nb.select(0)
simulate_mouse_click(self.nb, 5, 5)
self.nb.focus_force()
self.nb.event_generate('<Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.tab(self.child1, text='a', underline=0)
self.nb.enable_traversal()
self.nb.focus_force()
simulate_mouse_click(self.nb, 5, 5)
if sys.platform == 'darwin':
self.nb.event_generate('<Option-a>')
else:
self.nb.event_generate('<Alt-a>')
self.assertEqual(self.nb.select(), str(self.child1))
@add_standard_options(StandardTtkOptionsTests)
class TreeviewTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'columns', 'cursor', 'displaycolumns',
'height', 'padding', 'selectmode', 'show',
'style', 'takefocus', 'xscrollcommand', 'yscrollcommand',
)
def setUp(self):
super().setUp()
self.tv = self.create(padding=0)
def create(self, **kwargs):
return ttk.Treeview(self.root, **kwargs)
def test_columns(self):
widget = self.create()
self.checkParam(widget, 'columns', 'a b c',
expected=('a', 'b', 'c'))
self.checkParam(widget, 'columns', ('a', 'b', 'c'))
self.checkParam(widget, 'columns', ())
def test_displaycolumns(self):
widget = self.create()
widget['columns'] = ('a', 'b', 'c')
self.checkParam(widget, 'displaycolumns', 'b a c',
expected=('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', ('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', '#all',
expected=('#all',))
self.checkParam(widget, 'displaycolumns', (2, 1, 0))
self.checkInvalidParam(widget, 'displaycolumns', ('a', 'b', 'd'),
errmsg='Invalid column index d')
self.checkInvalidParam(widget, 'displaycolumns', (1, 2, 3),
errmsg='Column index 3 out of bounds')
self.checkInvalidParam(widget, 'displaycolumns', (1, -2),
errmsg='Column index -2 out of bounds')
def test_height(self):
widget = self.create()
self.checkPixelsParam(widget, 'height', 100, -100, 0, '3c', conv=False)
self.checkPixelsParam(widget, 'height', 101.2, 102.6, conv=noconv)
def test_selectmode(self):
widget = self.create()
self.checkEnumParam(widget, 'selectmode',
'none', 'browse', 'extended')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', 'tree headings',
expected=('tree', 'headings'))
self.checkParam(widget, 'show', ('tree', 'headings'))
self.checkParam(widget, 'show', ('headings', 'tree'))
self.checkParam(widget, 'show', 'tree', expected=('tree',))
self.checkParam(widget, 'show', 'headings', expected=('headings',))
def test_bbox(self):
self.tv.pack()
self.assertEqual(self.tv.bbox(''), '')
self.tv.wait_visibility()
self.tv.update()
item_id = self.tv.insert('', 'end')
children = self.tv.get_children()
self.assertTrue(children)
bbox = self.tv.bbox(children[0])
self.assertIsBoundingBox(bbox)
# compare width in bboxes
self.tv['columns'] = ['test']
self.tv.column('test', width=50)
bbox_column0 = self.tv.bbox(children[0], 0)
root_width = self.tv.column('#0', width=None)
if not self.wantobjects:
root_width = int(root_width)
self.assertEqual(bbox_column0[0], bbox[0] + root_width)
# verify that bbox of a closed item is the empty string
child1 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.bbox(child1), '')
def test_children(self):
# no children yet, should get an empty tuple
self.assertEqual(self.tv.get_children(), ())
item_id = self.tv.insert('', 'end')
self.assertIsInstance(self.tv.get_children(), tuple)
self.assertEqual(self.tv.get_children()[0], item_id)
# add item_id and child3 as children of child2
child2 = self.tv.insert('', 'end')
child3 = self.tv.insert('', 'end')
self.tv.set_children(child2, item_id, child3)
self.assertEqual(self.tv.get_children(child2), (item_id, child3))
# child3 has child2 as parent, thus trying to set child2 as a children
# of child3 should result in an error
self.assertRaises(tkinter.TclError,
self.tv.set_children, child3, child2)
# remove child2 children
self.tv.set_children(child2)
self.assertEqual(self.tv.get_children(child2), ())
# remove root's children
self.tv.set_children('')
self.assertEqual(self.tv.get_children(), ())
def test_column(self):
# return a dict with all options/values
self.assertIsInstance(self.tv.column('#0'), dict)
# return a single value of the given option
if self.wantobjects:
self.assertIsInstance(self.tv.column('#0', width=None), int)
# set a new value for an option
self.tv.column('#0', width=10)
# testing new way to get option value
self.assertEqual(self.tv.column('#0', 'width'),
10 if self.wantobjects else '10')
self.assertEqual(self.tv.column('#0', width=None),
10 if self.wantobjects else '10')
# check read-only option
self.assertRaises(tkinter.TclError, self.tv.column, '#0', id='X')
self.assertRaises(tkinter.TclError, self.tv.column, 'invalid')
invalid_kws = [
{'unknown_option': 'some value'}, {'stretch': 'wrong'},
{'anchor': 'wrong'}, {'width': 'wrong'}, {'minwidth': 'wrong'}
]
for kw in invalid_kws:
self.assertRaises(tkinter.TclError, self.tv.column, '#0',
**kw)
def test_delete(self):
self.assertRaises(tkinter.TclError, self.tv.delete, '#0')
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
self.tv.delete(item_id)
self.assertFalse(self.tv.get_children())
# reattach should fail
self.assertRaises(tkinter.TclError,
self.tv.reattach, item_id, '', 'end')
# test multiple item delete
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
self.assertEqual(self.tv.get_children(), (item1, item2))
self.tv.delete(item1, item2)
self.assertFalse(self.tv.get_children())
def test_detach_reattach(self):
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
# calling detach without items is valid, although it does nothing
prev = self.tv.get_children()
self.tv.detach() # this should do nothing
self.assertEqual(prev, self.tv.get_children())
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# detach item with children
self.tv.detach(item_id)
self.assertFalse(self.tv.get_children())
# reattach item with children
self.tv.reattach(item_id, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# move a children to the root
self.tv.move(item2, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, item2))
self.assertEqual(self.tv.get_children(item_id), ())
# bad values
self.assertRaises(tkinter.TclError,
self.tv.reattach, 'nonexistent', '', 'end')
self.assertRaises(tkinter.TclError,
self.tv.detach, 'nonexistent')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, 'otherparent', 'end')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, '', 'invalid')
# multiple detach
self.tv.detach(item_id, item2)
self.assertEqual(self.tv.get_children(), ())
self.assertEqual(self.tv.get_children(item_id), ())
def test_exists(self):
self.assertEqual(self.tv.exists('something'), False)
self.assertEqual(self.tv.exists(''), True)
self.assertEqual(self.tv.exists({}), False)
# the following will make a tk.call equivalent to
# tk.call(treeview, "exists") which should result in an error
# in the tcl interpreter since tk requires an item.
self.assertRaises(tkinter.TclError, self.tv.exists, None)
def test_focus(self):
# nothing is focused right now
self.assertEqual(self.tv.focus(), '')
item1 = self.tv.insert('', 'end')
self.tv.focus(item1)
self.assertEqual(self.tv.focus(), item1)
self.tv.delete(item1)
self.assertEqual(self.tv.focus(), '')
# try focusing inexistent item
self.assertRaises(tkinter.TclError, self.tv.focus, 'hi')
def test_heading(self):
# check a dict is returned
self.assertIsInstance(self.tv.heading('#0'), dict)
# check a value is returned
self.tv.heading('#0', text='hi')
self.assertEqual(self.tv.heading('#0', 'text'), 'hi')
self.assertEqual(self.tv.heading('#0', text=None), 'hi')
# invalid option
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
background=None)
# invalid value
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
anchor=1)
def test_heading_callback(self):
def simulate_heading_click(x, y):
simulate_mouse_click(self.tv, x, y)
self.tv.update()
success = [] # no success for now
self.tv.pack()
self.tv.wait_visibility()
self.tv.heading('#0', command=lambda: success.append(True))
self.tv.column('#0', width=100)
self.tv.update()
# assuming that the coords (5, 5) fall into heading #0
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
success = []
commands = self.tv.master._tclCommands
self.tv.heading('#0', command=str(self.tv.heading('#0', command=None)))
self.assertEqual(commands, self.tv.master._tclCommands)
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
# XXX The following raises an error in a tcl interpreter, but not in
# Python
#self.tv.heading('#0', command='I dont exist')
#simulate_heading_click(5, 5)
def test_index(self):
# item 'what' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.index, 'what')
self.assertEqual(self.tv.index(''), 0)
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
c1 = self.tv.insert(item1, 'end')
c2 = self.tv.insert(item1, 'end')
self.assertEqual(self.tv.index(item1), 0)
self.assertEqual(self.tv.index(c1), 0)
self.assertEqual(self.tv.index(c2), 1)
self.assertEqual(self.tv.index(item2), 1)
self.tv.move(item2, '', 0)
self.assertEqual(self.tv.index(item2), 0)
self.assertEqual(self.tv.index(item1), 1)
# check that index still works even after its parent and siblings
# have been detached
self.tv.detach(item1)
self.assertEqual(self.tv.index(c2), 1)
self.tv.detach(c1)
self.assertEqual(self.tv.index(c2), 0)
# but it fails after item has been deleted
self.tv.delete(item1)
self.assertRaises(tkinter.TclError, self.tv.index, c2)
def test_insert_item(self):
# parent 'none' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.insert, 'none', 'end')
# open values
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='please')
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=True)))
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=False)))
# invalid index
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'middle')
# trying to duplicate item id is invalid
itemid = self.tv.insert('', 'end', 'first-item')
self.assertEqual(itemid, 'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
MockTclObj('first-item'))
# unicode values
value = '\xe1ba'
item = self.tv.insert('', 'end', values=(value, ))
self.assertEqual(self.tv.item(item, 'values'),
(value,) if self.wantobjects else value)
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.tv.item(item, values=self.root.splitlist(self.tv.item(item, values=None)))
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.assertIsInstance(self.tv.item(item), dict)
# erase item values
self.tv.item(item, values='')
self.assertFalse(self.tv.item(item, values=None))
# item tags
item = self.tv.insert('', 'end', tags=[1, 2, value])
self.assertEqual(self.tv.item(item, tags=None),
('1', '2', value) if self.wantobjects else
'1 2 %s' % value)
self.tv.item(item, tags=[])
self.assertFalse(self.tv.item(item, tags=None))
self.tv.item(item, tags=(1, 2))
self.assertEqual(self.tv.item(item, tags=None),
('1', '2') if self.wantobjects else '1 2')
# values with spaces
item = self.tv.insert('', 'end', values=('a b c',
'%s %s' % (value, value)))
self.assertEqual(self.tv.item(item, values=None),
('a b c', '%s %s' % (value, value)) if self.wantobjects else
'{a b c} {%s %s}' % (value, value))
# text
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text="Label here"), text=None),
"Label here")
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text=value), text=None),
value)
def test_set(self):
self.tv['columns'] = ['A', 'B']
item = self.tv.insert('', 'end', values=['a', 'b'])
self.assertEqual(self.tv.set(item), {'A': 'a', 'B': 'b'})
self.tv.set(item, 'B', 'a')
self.assertEqual(self.tv.item(item, values=None),
('a', 'a') if self.wantobjects else 'a a')
self.tv['columns'] = ['B']
self.assertEqual(self.tv.set(item), {'B': 'a'})
self.tv.set(item, 'B', 'b')
self.assertEqual(self.tv.set(item, column='B'), 'b')
self.assertEqual(self.tv.item(item, values=None),
('b', 'a') if self.wantobjects else 'b a')
self.tv.set(item, 'B', 123)
self.assertEqual(self.tv.set(item, 'B'),
123 if self.wantobjects else '123')
self.assertEqual(self.tv.item(item, values=None),
(123, 'a') if self.wantobjects else '123 a')
self.assertEqual(self.tv.set(item),
{'B': 123} if self.wantobjects else {'B': '123'})
# inexistent column
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A')
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A', 'b')
# inexistent item
self.assertRaises(tkinter.TclError, self.tv.set, 'notme')
def test_tag_bind(self):
events = []
item1 = self.tv.insert('', 'end', tags=['call'])
item2 = self.tv.insert('', 'end', tags=['call'])
self.tv.tag_bind('call', '<ButtonPress-1>',
lambda evt: events.append(1))
self.tv.tag_bind('call', '<ButtonRelease-1>',
lambda evt: events.append(2))
self.tv.pack()
self.tv.wait_visibility()
self.tv.update()
pos_y = set()
found = set()
for i in range(0, 100, 10):
if len(found) == 2: # item1 and item2 already found
break
item_id = self.tv.identify_row(i)
if item_id and item_id not in found:
pos_y.add(i)
found.add(item_id)
self.assertEqual(len(pos_y), 2) # item1 and item2 y pos
for y in pos_y:
simulate_mouse_click(self.tv, 0, y)
# by now there should be 4 things in the events list, since each
# item had a bind for two events that were simulated above
self.assertEqual(len(events), 4)
for evt in zip(events[::2], events[1::2]):
self.assertEqual(evt, (1, 2))
def test_tag_configure(self):
# Just testing parameter passing for now
self.assertRaises(TypeError, self.tv.tag_configure)
self.assertRaises(tkinter.TclError, self.tv.tag_configure,
'test', sky='blue')
self.tv.tag_configure('test', foreground='blue')
self.assertEqual(str(self.tv.tag_configure('test', 'foreground')),
'blue')
self.assertEqual(str(self.tv.tag_configure('test', foreground=None)),
'blue')
self.assertIsInstance(self.tv.tag_configure('test'), dict)
@add_standard_options(StandardTtkOptionsTests)
class SeparatorTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'style', 'takefocus',
# 'state'?
)
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Separator(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class SizegripTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'style', 'takefocus',
# 'state'?
)
def create(self, **kwargs):
return ttk.Sizegrip(self.root, **kwargs)
tests_gui = (
ButtonTest, CheckbuttonTest, ComboboxTest, EntryTest,
FrameTest, LabelFrameTest, LabelTest, MenubuttonTest,
NotebookTest, PanedWindowTest, ProgressbarTest,
RadiobuttonTest, ScaleTest, ScrollbarTest, SeparatorTest,
SizegripTest, TreeviewTest, WidgetTest,
)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 | 5,589,900,279,530,257,000 | 34.561952 | 87 | 0.588953 | false |
berendkleinhaneveld/VTK | ThirdParty/Twisted/twisted/internet/protocol.py | 31 | 26263 | # -*- test-case-name: twisted.test.test_factories,twisted.internet.test.test_protocol -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Standard implementations of Twisted protocol-related interfaces.
Start here if you are looking to write a new protocol implementation for
Twisted. The Protocol class contains some introductory material.
"""
from __future__ import division, absolute_import
import random
from zope.interface import implementer
from twisted.python import log, failure, components
from twisted.internet import interfaces, error, defer
@implementer(interfaces.IProtocolFactory, interfaces.ILoggingContext)
class Factory:
"""
This is a factory which produces protocols.
By default, buildProtocol will create a protocol of the class given in
self.protocol.
"""
# put a subclass of Protocol here:
protocol = None
numPorts = 0
noisy = True
@classmethod
def forProtocol(cls, protocol, *args, **kwargs):
"""
Create a factory for the given protocol.
It sets the C{protocol} attribute and returns the constructed factory
instance.
@param protocol: A L{Protocol} subclass
@param args: Positional arguments for the factory.
@param kwargs: Keyword arguments for the factory.
@return: A L{Factory} instance wired up to C{protocol}.
"""
factory = cls(*args, **kwargs)
factory.protocol = protocol
return factory
def logPrefix(self):
"""
Describe this factory for log messages.
"""
return self.__class__.__name__
def doStart(self):
"""Make sure startFactory is called.
Users should not call this function themselves!
"""
if not self.numPorts:
if self.noisy:
log.msg("Starting factory %r" % self)
self.startFactory()
self.numPorts = self.numPorts + 1
def doStop(self):
"""Make sure stopFactory is called.
Users should not call this function themselves!
"""
if self.numPorts == 0:
# this shouldn't happen, but does sometimes and this is better
# than blowing up in assert as we did previously.
return
self.numPorts = self.numPorts - 1
if not self.numPorts:
if self.noisy:
log.msg("Stopping factory %r" % self)
self.stopFactory()
def startFactory(self):
"""This will be called before I begin listening on a Port or Connector.
It will only be called once, even if the factory is connected
to multiple ports.
This can be used to perform 'unserialization' tasks that
are best put off until things are actually running, such
as connecting to a database, opening files, etcetera.
"""
def stopFactory(self):
"""This will be called before I stop listening on all Ports/Connectors.
This can be overridden to perform 'shutdown' tasks such as disconnecting
database connections, closing files, etc.
It will be called, for example, before an application shuts down,
if it was connected to a port. User code should not call this function
directly.
"""
def buildProtocol(self, addr):
"""Create an instance of a subclass of Protocol.
The returned instance will handle input on an incoming server
connection, and an attribute \"factory\" pointing to the creating
factory.
Override this method to alter how Protocol instances get created.
@param addr: an object implementing L{twisted.internet.interfaces.IAddress}
"""
p = self.protocol()
p.factory = self
return p
class ClientFactory(Factory):
"""A Protocol factory for clients.
This can be used together with the various connectXXX methods in
reactors.
"""
def startedConnecting(self, connector):
"""Called when a connection has been started.
You can call connector.stopConnecting() to stop the connection attempt.
@param connector: a Connector object.
"""
def clientConnectionFailed(self, connector, reason):
"""Called when a connection has failed to connect.
It may be useful to call connector.connect() - this will reconnect.
@type reason: L{twisted.python.failure.Failure}
"""
def clientConnectionLost(self, connector, reason):
"""Called when an established connection is lost.
It may be useful to call connector.connect() - this will reconnect.
@type reason: L{twisted.python.failure.Failure}
"""
class _InstanceFactory(ClientFactory):
"""
Factory used by ClientCreator.
@ivar deferred: The L{Deferred} which represents this connection attempt and
which will be fired when it succeeds or fails.
@ivar pending: After a connection attempt succeeds or fails, a delayed call
which will fire the L{Deferred} representing this connection attempt.
"""
noisy = False
pending = None
def __init__(self, reactor, instance, deferred):
self.reactor = reactor
self.instance = instance
self.deferred = deferred
def __repr__(self):
return "<ClientCreator factory: %r>" % (self.instance, )
def buildProtocol(self, addr):
"""
Return the pre-constructed protocol instance and arrange to fire the
waiting L{Deferred} to indicate success establishing the connection.
"""
self.pending = self.reactor.callLater(
0, self.fire, self.deferred.callback, self.instance)
self.deferred = None
return self.instance
def clientConnectionFailed(self, connector, reason):
"""
Arrange to fire the waiting L{Deferred} with the given failure to
indicate the connection could not be established.
"""
self.pending = self.reactor.callLater(
0, self.fire, self.deferred.errback, reason)
self.deferred = None
def fire(self, func, value):
"""
Clear C{self.pending} to avoid a reference cycle and then invoke func
with the value.
"""
self.pending = None
func(value)
class ClientCreator:
"""
Client connections that do not require a factory.
The various connect* methods create a protocol instance using the given
protocol class and arguments, and connect it, returning a Deferred of the
resulting protocol instance.
Useful for cases when we don't really need a factory. Mainly this
is when there is no shared state between protocol instances, and no need
to reconnect.
The C{connectTCP}, C{connectUNIX}, and C{connectSSL} methods each return a
L{Deferred} which will fire with an instance of the protocol class passed to
L{ClientCreator.__init__}. These Deferred can be cancelled to abort the
connection attempt (in a very unlikely case, cancelling the Deferred may not
prevent the protocol from being instantiated and connected to a transport;
if this happens, it will be disconnected immediately afterwards and the
Deferred will still errback with L{CancelledError}).
"""
def __init__(self, reactor, protocolClass, *args, **kwargs):
self.reactor = reactor
self.protocolClass = protocolClass
self.args = args
self.kwargs = kwargs
def _connect(self, method, *args, **kwargs):
"""
Initiate a connection attempt.
@param method: A callable which will actually start the connection
attempt. For example, C{reactor.connectTCP}.
@param *args: Positional arguments to pass to C{method}, excluding the
factory.
@param **kwargs: Keyword arguments to pass to C{method}.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
def cancelConnect(deferred):
connector.disconnect()
if f.pending is not None:
f.pending.cancel()
d = defer.Deferred(cancelConnect)
f = _InstanceFactory(
self.reactor, self.protocolClass(*self.args, **self.kwargs), d)
connector = method(factory=f, *args, **kwargs)
return d
def connectTCP(self, host, port, timeout=30, bindAddress=None):
"""
Connect to a TCP server.
The parameters are all the same as to L{IReactorTCP.connectTCP} except
that the factory parameter is omitted.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
return self._connect(
self.reactor.connectTCP, host, port, timeout=timeout,
bindAddress=bindAddress)
def connectUNIX(self, address, timeout=30, checkPID=False):
"""
Connect to a Unix socket.
The parameters are all the same as to L{IReactorUNIX.connectUNIX} except
that the factory parameter is omitted.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
return self._connect(
self.reactor.connectUNIX, address, timeout=timeout,
checkPID=checkPID)
def connectSSL(self, host, port, contextFactory, timeout=30, bindAddress=None):
"""
Connect to an SSL server.
The parameters are all the same as to L{IReactorSSL.connectSSL} except
that the factory parameter is omitted.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
return self._connect(
self.reactor.connectSSL, host, port,
contextFactory=contextFactory, timeout=timeout,
bindAddress=bindAddress)
class ReconnectingClientFactory(ClientFactory):
"""
Factory which auto-reconnects clients with an exponential back-off.
Note that clients should call my resetDelay method after they have
connected successfully.
@ivar maxDelay: Maximum number of seconds between connection attempts.
@ivar initialDelay: Delay for the first reconnection attempt.
@ivar factor: A multiplicitive factor by which the delay grows
@ivar jitter: Percentage of randomness to introduce into the delay length
to prevent stampeding.
@ivar clock: The clock used to schedule reconnection. It's mainly useful to
be parametrized in tests. If the factory is serialized, this attribute
will not be serialized, and the default value (the reactor) will be
restored when deserialized.
@type clock: L{IReactorTime}
@ivar maxRetries: Maximum number of consecutive unsuccessful connection
attempts, after which no further connection attempts will be made. If
this is not explicitly set, no maximum is applied.
"""
maxDelay = 3600
initialDelay = 1.0
# Note: These highly sensitive factors have been precisely measured by
# the National Institute of Science and Technology. Take extreme care
# in altering them, or you may damage your Internet!
# (Seriously: <http://physics.nist.gov/cuu/Constants/index.html>)
factor = 2.7182818284590451 # (math.e)
# Phi = 1.6180339887498948 # (Phi is acceptable for use as a
# factor if e is too large for your application.)
jitter = 0.11962656472 # molar Planck constant times c, joule meter/mole
delay = initialDelay
retries = 0
maxRetries = None
_callID = None
connector = None
clock = None
continueTrying = 1
def clientConnectionFailed(self, connector, reason):
if self.continueTrying:
self.connector = connector
self.retry()
def clientConnectionLost(self, connector, unused_reason):
if self.continueTrying:
self.connector = connector
self.retry()
def retry(self, connector=None):
"""
Have this connector connect again, after a suitable delay.
"""
if not self.continueTrying:
if self.noisy:
log.msg("Abandoning %s on explicit request" % (connector,))
return
if connector is None:
if self.connector is None:
raise ValueError("no connector to retry")
else:
connector = self.connector
self.retries += 1
if self.maxRetries is not None and (self.retries > self.maxRetries):
if self.noisy:
log.msg("Abandoning %s after %d retries." %
(connector, self.retries))
return
self.delay = min(self.delay * self.factor, self.maxDelay)
if self.jitter:
self.delay = random.normalvariate(self.delay,
self.delay * self.jitter)
if self.noisy:
log.msg("%s will retry in %d seconds" % (connector, self.delay,))
def reconnector():
self._callID = None
connector.connect()
if self.clock is None:
from twisted.internet import reactor
self.clock = reactor
self._callID = self.clock.callLater(self.delay, reconnector)
def stopTrying(self):
"""
Put a stop to any attempt to reconnect in progress.
"""
# ??? Is this function really stopFactory?
if self._callID:
self._callID.cancel()
self._callID = None
self.continueTrying = 0
if self.connector:
try:
self.connector.stopConnecting()
except error.NotConnectingError:
pass
def resetDelay(self):
"""
Call this method after a successful connection: it resets the delay and
the retry counter.
"""
self.delay = self.initialDelay
self.retries = 0
self._callID = None
self.continueTrying = 1
def __getstate__(self):
"""
Remove all of the state which is mutated by connection attempts and
failures, returning just the state which describes how reconnections
should be attempted. This will make the unserialized instance
behave just as this one did when it was first instantiated.
"""
state = self.__dict__.copy()
for key in ['connector', 'retries', 'delay',
'continueTrying', '_callID', 'clock']:
if key in state:
del state[key]
return state
class ServerFactory(Factory):
"""Subclass this to indicate that your protocol.Factory is only usable for servers.
"""
class BaseProtocol:
"""
This is the abstract superclass of all protocols.
Some methods have helpful default implementations here so that they can
easily be shared, but otherwise the direct subclasses of this class are more
interesting, L{Protocol} and L{ProcessProtocol}.
"""
connected = 0
transport = None
def makeConnection(self, transport):
"""Make a connection to a transport and a server.
This sets the 'transport' attribute of this Protocol, and calls the
connectionMade() callback.
"""
self.connected = 1
self.transport = transport
self.connectionMade()
def connectionMade(self):
"""Called when a connection is made.
This may be considered the initializer of the protocol, because
it is called when the connection is completed. For clients,
this is called once the connection to the server has been
established; for servers, this is called after an accept() call
stops blocking and a socket has been received. If you need to
send any greeting or initial message, do it here.
"""
connectionDone=failure.Failure(error.ConnectionDone())
connectionDone.cleanFailure()
@implementer(interfaces.IProtocol, interfaces.ILoggingContext)
class Protocol(BaseProtocol):
"""
This is the base class for streaming connection-oriented protocols.
If you are going to write a new connection-oriented protocol for Twisted,
start here. Any protocol implementation, either client or server, should
be a subclass of this class.
The API is quite simple. Implement L{dataReceived} to handle both
event-based and synchronous input; output can be sent through the
'transport' attribute, which is to be an instance that implements
L{twisted.internet.interfaces.ITransport}. Override C{connectionLost} to be
notified when the connection ends.
Some subclasses exist already to help you write common types of protocols:
see the L{twisted.protocols.basic} module for a few of them.
"""
def logPrefix(self):
"""
Return a prefix matching the class name, to identify log messages
related to this protocol instance.
"""
return self.__class__.__name__
def dataReceived(self, data):
"""Called whenever data is received.
Use this method to translate to a higher-level message. Usually, some
callback will be made upon the receipt of each complete protocol
message.
@param data: a string of indeterminate length. Please keep in mind
that you will probably need to buffer some data, as partial
(or multiple) protocol messages may be received! I recommend
that unit tests for protocols call through to this method with
differing chunk sizes, down to one byte at a time.
"""
def connectionLost(self, reason=connectionDone):
"""Called when the connection is shut down.
Clear any circular references here, and any external references
to this Protocol. The connection has been closed.
@type reason: L{twisted.python.failure.Failure}
"""
@implementer(interfaces.IConsumer)
class ProtocolToConsumerAdapter(components.Adapter):
def write(self, data):
self.original.dataReceived(data)
def registerProducer(self, producer, streaming):
pass
def unregisterProducer(self):
pass
components.registerAdapter(ProtocolToConsumerAdapter, interfaces.IProtocol,
interfaces.IConsumer)
@implementer(interfaces.IProtocol)
class ConsumerToProtocolAdapter(components.Adapter):
def dataReceived(self, data):
self.original.write(data)
def connectionLost(self, reason):
pass
def makeConnection(self, transport):
pass
def connectionMade(self):
pass
components.registerAdapter(ConsumerToProtocolAdapter, interfaces.IConsumer,
interfaces.IProtocol)
@implementer(interfaces.IProcessProtocol)
class ProcessProtocol(BaseProtocol):
"""
Base process protocol implementation which does simple dispatching for
stdin, stdout, and stderr file descriptors.
"""
def childDataReceived(self, childFD, data):
if childFD == 1:
self.outReceived(data)
elif childFD == 2:
self.errReceived(data)
def outReceived(self, data):
"""
Some data was received from stdout.
"""
def errReceived(self, data):
"""
Some data was received from stderr.
"""
def childConnectionLost(self, childFD):
if childFD == 0:
self.inConnectionLost()
elif childFD == 1:
self.outConnectionLost()
elif childFD == 2:
self.errConnectionLost()
def inConnectionLost(self):
"""
This will be called when stdin is closed.
"""
def outConnectionLost(self):
"""
This will be called when stdout is closed.
"""
def errConnectionLost(self):
"""
This will be called when stderr is closed.
"""
def processExited(self, reason):
"""
This will be called when the subprocess exits.
@type reason: L{twisted.python.failure.Failure}
"""
def processEnded(self, reason):
"""
Called when the child process exits and all file descriptors
associated with it have been closed.
@type reason: L{twisted.python.failure.Failure}
"""
class AbstractDatagramProtocol:
"""
Abstract protocol for datagram-oriented transports, e.g. IP, ICMP, ARP, UDP.
"""
transport = None
numPorts = 0
noisy = True
def __getstate__(self):
d = self.__dict__.copy()
d['transport'] = None
return d
def doStart(self):
"""Make sure startProtocol is called.
This will be called by makeConnection(), users should not call it.
"""
if not self.numPorts:
if self.noisy:
log.msg("Starting protocol %s" % self)
self.startProtocol()
self.numPorts = self.numPorts + 1
def doStop(self):
"""Make sure stopProtocol is called.
This will be called by the port, users should not call it.
"""
assert self.numPorts > 0
self.numPorts = self.numPorts - 1
self.transport = None
if not self.numPorts:
if self.noisy:
log.msg("Stopping protocol %s" % self)
self.stopProtocol()
def startProtocol(self):
"""Called when a transport is connected to this protocol.
Will only be called once, even if multiple ports are connected.
"""
def stopProtocol(self):
"""Called when the transport is disconnected.
Will only be called once, after all ports are disconnected.
"""
def makeConnection(self, transport):
"""Make a connection to a transport and a server.
This sets the 'transport' attribute of this DatagramProtocol, and calls the
doStart() callback.
"""
assert self.transport == None
self.transport = transport
self.doStart()
def datagramReceived(self, datagram, addr):
"""Called when a datagram is received.
@param datagram: the string received from the transport.
@param addr: tuple of source of datagram.
"""
@implementer(interfaces.ILoggingContext)
class DatagramProtocol(AbstractDatagramProtocol):
"""
Protocol for datagram-oriented transport, e.g. UDP.
@type transport: C{NoneType} or
L{IUDPTransport<twisted.internet.interfaces.IUDPTransport>} provider
@ivar transport: The transport with which this protocol is associated,
if it is associated with one.
"""
def logPrefix(self):
"""
Return a prefix matching the class name, to identify log messages
related to this protocol instance.
"""
return self.__class__.__name__
def connectionRefused(self):
"""Called due to error from write in connected mode.
Note this is a result of ICMP message generated by *previous*
write.
"""
class ConnectedDatagramProtocol(DatagramProtocol):
"""Protocol for connected datagram-oriented transport.
No longer necessary for UDP.
"""
def datagramReceived(self, datagram):
"""Called when a datagram is received.
@param datagram: the string received from the transport.
"""
def connectionFailed(self, failure):
"""Called if connecting failed.
Usually this will be due to a DNS lookup failure.
"""
@implementer(interfaces.ITransport)
class FileWrapper:
"""A wrapper around a file-like object to make it behave as a Transport.
This doesn't actually stream the file to the attached protocol,
and is thus useful mainly as a utility for debugging protocols.
"""
closed = 0
disconnecting = 0
producer = None
streamingProducer = 0
def __init__(self, file):
self.file = file
def write(self, data):
try:
self.file.write(data)
except:
self.handleException()
# self._checkProducer()
def _checkProducer(self):
# Cheating; this is called at "idle" times to allow producers to be
# found and dealt with
if self.producer:
self.producer.resumeProducing()
def registerProducer(self, producer, streaming):
"""From abstract.FileDescriptor
"""
self.producer = producer
self.streamingProducer = streaming
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
self.producer = None
def stopConsuming(self):
self.unregisterProducer()
self.loseConnection()
def writeSequence(self, iovec):
self.write("".join(iovec))
def loseConnection(self):
self.closed = 1
try:
self.file.close()
except (IOError, OSError):
self.handleException()
def getPeer(self):
# XXX: According to ITransport, this should return an IAddress!
return 'file', 'file'
def getHost(self):
# XXX: According to ITransport, this should return an IAddress!
return 'file'
def handleException(self):
pass
def resumeProducing(self):
# Never sends data anyways
pass
def pauseProducing(self):
# Never sends data anyways
pass
def stopProducing(self):
self.loseConnection()
__all__ = ["Factory", "ClientFactory", "ReconnectingClientFactory", "connectionDone",
"Protocol", "ProcessProtocol", "FileWrapper", "ServerFactory",
"AbstractDatagramProtocol", "DatagramProtocol", "ConnectedDatagramProtocol",
"ClientCreator"]
| bsd-3-clause | 4,923,557,377,898,244,000 | 29.970519 | 89 | 0.639036 | false |
gnowledge/ncert_nroer | gstudio/templatetags/gstudio_tags.py | 1 | 27697 |
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Template tags and filters for Gstudio"""
from hashlib import md5
from random import sample
from urllib import urlencode
from datetime import datetime
from django.db.models import Q
from django.db import connection
from django.template import Node as nd
from django.template import Library
from django.template import TemplateSyntaxError
from django.contrib.comments.models import CommentFlag
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_unicode
from django.contrib.comments import get_model as get_comment_model
from unidecode import unidecode
from tagging.models import Tag
from tagging.utils import calculate_cloud
from gstudio.models import Nodetype
from gstudio.models import Author
from gstudio.models import Metatype
from gstudio.gnowql import get_node
from gstudio.managers import tags_published
from gstudio.comparison import VectorBuilder
from gstudio.comparison import pearson_score
from gstudio.templatetags.zcalendar import GstudioCalendar
from gstudio.templatetags.zbreadcrumbs import retrieve_breadcrumbs
from django.http import HttpResponseRedirect
from gstudio.CNL import *
from gstudio.methods import check_release_or_not
import os
from settings import STATIC_URL,ADMIN_MEDIA_PREFIX
import tarfile
from gstudio.methods import *
import contextlib
register = Library()
VECTORS = None
VECTORS_FACTORY = lambda: VectorBuilder(Nodetype.published.all(),
['title', 'excerpt', 'content'])
CACHE_NODETYPES_RELATED = {}
@contextlib.contextmanager
def cd_change(tmp_location):
cd = os.getcwd()
os.chdir(tmp_location)
try:
yield
finally:
os.chdir(cd)
@register.assignment_tag
def get_each_activity(each):
try:
spl=each.split(",")
print spl
if spl[1]:
strn=spl[1].split("!")
else:
return "empty"
print strn
strg=strn[0]
cnt=strg.find("Edit")
print strg,cnt
if cnt > -1:
return "Edit"
cnt=strg.find("Upload")
if cnt > -1:
return "Upload"
cnt=strg.find("Add")
if cnt > -1:
return "Add"
cnt=strg.find("Delete")
if cnt > -1:
return "Delete"
return "empty"
except:
return "empty"
@register.assignment_tag
def get_each_title(each):
try:
spl=each.split("-")
tit=spl[1].split("http")
return tit[0]
except:
return ""
@register.assignment_tag
def get_each_url(each):
try:
spl=each.split("http")
rets="http"+spl[1]
return rets
except:
return ""
@register.assignment_tag
def get_slug_of_video(videoid):
print "videoid",videoid
slug=""
vid=Gbobject.objects.filter(id=videoid)
if vid:
Gbobject.objects.get(id=videoid)
slug=vid.slug
print "videoslug",vid.slug
return slug
@register.assignment_tag
def get_image_object(objectid):
obj=Gbobject.objects.get(id=objectid)
return obj
@register.assignment_tag
def get_related_images(imageid):
try:
gbobject=Gbobject.objects.get(id=imageid)
tag = Tag.objects.get_for_object(gbobject)
otherRelatedimages = []
for each in tag:
print "alliteS",each.items.all()
for each1 in each.items.all():
tagItem = each1.object
print "tagitem",tagItem
check = tagItem.objecttypes.all()
if check.filter(title__contains="Image"):
if not tagItem.id == gbobject.id:
print tagItem,"tagit"
otherRelatedimages.append(tagItem)
except:
pass
return otherRelatedimages
@register.assignment_tag
def get_related_docus(docid):
try:
gbobject=Gbobject.objects.get(id=docid)
tag = Tag.objects.get_for_object(gbobject)
otherRelateddocs = []
for each in tag:
print "alliteS",each.items.all()
for each1 in each.items.all():
tagItem = each1.object
print "tagitem",tagItem
check = tagItem.objecttypes.all()
if check.filter(title__contains="Document"):
if not tagItem.id == gbobject.id:
print tagItem,"tagit"
otherRelateddocs.append(tagItem)
except:
pass
return otherRelateddocs
@register.assignment_tag
def get_first_object(imgcolln):
col=imgcolln[0]
return col
@register.assignment_tag
def split_images(imglst):
split=[]
lnimg=len(imglst)
j=0
while j < lnimg:
i=0
ins=[]
while i < 3 and j < lnimg :
ins.append(imglst[j])
i=i+1
j=j+1
split.append(ins)
return split
@register.assignment_tag
def get_doc_download(docid):
try:
sys=System.objects.get(id=docid)
filn="static/img/" + sys.title + ".tar.gz"
os.system("rm -rf /tmp/nroer/docdownload/")
os.system("mkdir /tmp/nroer/docdownload/")
strn="rm "+filn
print "delest",strn
os.system(strn)
tar=tarfile.open(filn,"w:gz")
mems=get_gbobjects(docid)
print "mems",mems
for each in mems:
fna="img/"+str(each.altnames)
fname=os.path.join("static/",fna)
strn="cp "+ fname +" /tmp/nroer/docdownload/"
print strn,"cpystr"
os.system(strn)
with cd_change("/tmp/nroer/docdownload/"):
for files in os.listdir('.'):
tar.add(files)
print "adding"
tar.close()
print "filname",filn
except:
pass
return filn
@register.inclusion_tag('gstudio/editdoccollns.html')
def show_edit_doc_collection(doccolid,user):
template='gstudio/editdoccollns.html'
print template,"t"
listcolls={}
syst=Objecttype.objects.get(title='Document')
a=syst.get_nbh['contains_members']
for each in a:
listcolls[each.id]=each.title
sys=System.objects.get(id=doccolid)
testlst=get_gbobjects(doccolid)
return {'template':template,'test':testlst,'user':user,'test1':listcolls,'doc':sys}
@register.inclusion_tag('gstudio/editcollection.html')
def show_edit_collection(imgcolid,user):
template='gstudio/editcollection.html'
listcolls={}
syst=Objecttype.objects.get(title='Image')
a=syst.get_nbh['contains_members']
for each in a:
listcolls[each.id]=each.title
sys=System.objects.get(id=imgcolid)
testlst=get_gbobjects(imgcolid)
print "editlist",testlst
return {'template':template,'test':testlst,'user':user,'test1':listcolls,'image':sys}
@register.assignment_tag
def check_if_collection(sysid):
a=Systemtype.objects.get(title='Imagecollection')
b=Systemtype.objects.get(title='Documentcollection')
fl=0
for each in a.member_systems.all():
if each.id == sysid:
fl=1
for each1 in b.member_systems.all():
if each1.id == sysid:
fl=1
return fl
@register.assignment_tag
def show_image_collections(imgcolid):
listcol=get_gbobjects(imgcolid)
return listcol
@register.assignment_tag
def show_doc_collections(doccolid):
listcol=get_gbobjects(doccolid)
return listcol
@register.assignment_tag
def get_document_collections():
print "inside getdoccoll"
listcolls={}
syst=Systemtype.objects.get(title='Documentcollection')
a=syst.member_systems.all()
for each in a:
listcolls[each.id]=each.title
return listcolls
@register.assignment_tag
def get_image_collections():
listcolls={}
syst=Systemtype.objects.get(title='Imagecollection')
a=syst.member_systems.all()
for each in a:
listcolls[each.id]=each.title
return listcolls
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_metatypes(template='gstudio/tags/metatypes.html'):
"""Return the metatypes"""
return {'template': template,
'metatypes': Metatype.tree.all()}
#@register.inclusion_tag('gstudio/tags/dummy.html')
#def get_subtypes(template='gstudio/tags/nodetypes.html'):
# """Return the subtypes"""
# return {'template': template,
# 'subtypes': Nodetype.tree.all()}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_authors(number=5, template='gstudio/tags/authors.html'):
"""Return the published authors"""
return {'template': template,
'authors': Author.published.all()[:number]}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_recent_nodetypes(number=5, template='gstudio/tags/recent_nodetypes.html'):
"""Return the most recent nodetypes"""
return {'template': template,
'nodetypes': Nodetype.published.all()[:number]}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_featured_nodetypes(number=5,
template='gstudio/tags/featured_nodetypes.html'):
"""Return the featured nodetypes"""
return {'template': template,
'nodetypes': Nodetype.published.filter(featured=True)[:number]}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_random_nodetypes(number=5, template='gstudio/tags/random_nodetypes.html'):
"""Return random nodetypes"""
nodetypes = Nodetype.published.all()
if number > len(nodetypes):
number = len(nodetypes)
return {'template': template,
'nodetypes': sample(nodetypes, number)}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_popular_nodetypes(number=5, template='gstudio/tags/popular_nodetypes.html'):
"""Return popular nodetypes"""
ctype = ContentType.objects.get_for_model(Nodetype)
query = """SELECT object_pk, COUNT(*) AS score
FROM %s
WHERE content_type_id = %%s
AND is_public = '1'
GROUP BY object_pk
ORDER BY score DESC""" % get_comment_model()._meta.db_table
cursor = connection.cursor()
cursor.execute(query, [ctype.id])
object_ids = [int(row[0]) for row in cursor.fetchall()]
# Use ``in_bulk`` here instead of an ``id__in`` filter, because ``id__in``
# would clobber the ordering.
object_dict = Nodetype.published.in_bulk(object_ids)
return {'template': template,
'nodetypes': [object_dict[object_id]
for object_id in object_ids
if object_id in object_dict][:number]}
@register.inclusion_tag('gstudio/tags/dummy.html', takes_context=True)
def get_similar_nodetypes(context, number=5,
template='gstudio/tags/similar_nodetypes.html',
flush=False):
"""Return similar nodetypes"""
global VECTORS
global CACHE_NODETYPES_RELATED
if VECTORS is None or flush:
VECTORS = VECTORS_FACTORY()
CACHE_NODETYPES_RELATED = {}
def compute_related(object_id, dataset):
"""Compute related nodetypes to a nodetype with a dataset"""
object_vector = None
for nodetype, e_vector in dataset.items():
if nodetype.pk == object_id:
object_vector = e_vector
if not object_vector:
return []
nodetype_related = {}
for nodetype, e_vector in dataset.items():
if nodetype.pk != object_id:
score = pearson_score(object_vector, e_vector)
if score:
nodetype_related[nodetype] = score
related = sorted(nodetype_related.items(), key=lambda(k, v): (v, k))
return [rel[0] for rel in related]
object_id = context['object'].pk
columns, dataset = VECTORS()
key = '%s-%s' % (object_id, VECTORS.key)
if not key in CACHE_NODETYPES_RELATED.keys():
CACHE_NODETYPES_RELATED[key] = compute_related(object_id, dataset)
nodetypes = CACHE_NODETYPES_RELATED[key][:number]
return {'template': template,
'nodetypes': nodetypes}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_archives_nodetypes(template='gstudio/tags/archives_nodetypes.html'):
"""Return archives nodetypes"""
return {'template': template,
'archives': Nodetype.published.dates('creation_date', 'month',
order='DESC')}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_archives_nodetypes_tree(
template='gstudio/tags/archives_nodetypes_tree.html'):
"""Return archives nodetypes as a Tree"""
return {'template': template,
'archives': Nodetype.published.dates('creation_date', 'day',
order='ASC')}
@register.inclusion_tag('gstudio/tags/dummy.html', takes_context=True)
def get_calendar_nodetypes(context, year=None, month=None,
template='gstudio/tags/calendar.html'):
"""Return an HTML calendar of nodetypes"""
if not year or not month:
date_month = context.get('month') or context.get('day') or \
getattr(context.get('object'), 'creation_date', None) or \
datetime.today()
year, month = date_month.timetuple()[:2]
calendar = GstudioCalendar()
current_month = datetime(year, month, 1)
dates = list(Nodetype.published.dates('creation_date', 'month'))
if not current_month in dates:
dates.append(current_month)
dates.sort()
index = dates.index(current_month)
previous_month = index > 0 and dates[index - 1] or None
next_month = index != len(dates) - 1 and dates[index + 1] or None
return {'template': template,
'next_month': next_month,
'previous_month': previous_month,
'calendar': calendar.formatmonth(year, month)}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_recent_comments(number=5, template='gstudio/tags/recent_comments.html'):
"""Return the most recent comments"""
# Using map(smart_unicode... fix bug related to issue #8554
#Modified comments to include CNL
nodetype_published_pks = map(smart_unicode,
Nodetype.published.values_list('id', flat=True))
content_type = ContentType.objects.get_for_model(Nodetype)
comments = get_comment_model().objects.filter(
Q(flags=None) | Q(flags__flag=CommentFlag.MODERATOR_APPROVAL),
content_type=content_type, object_pk__in=nodetype_published_pks,
is_public=True).order_by('-submit_date')[:number]
return {'template': template,
'comments': comments}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_recent_oldcomments(number=5, template='gstudio/tags/recent_comments.html'):
"""Return the most recent comments"""
# Using map(smart_unicode... fix bug related to issue #8554
nodetype_published_pks = map(smart_unicode,
Nodetype.published.values_list('id', flat=True))
content_type = ContentType.objects.get_for_model(Nodetype)
comments = get_comment_model().objects.filter(
Q(flags=None) | Q(flags__flag=CommentFlag.MODERATOR_APPROVAL),
content_type=content_type, object_pk__in=nodetype_published_pks,
is_public=True).order_by('-submit_date')[:number]
return {'template': template,
'comments': comments}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_recent_linkbacks(number=5,
template='gstudio/tags/recent_linkbacks.html'):
"""Return the most recent linkbacks"""
nodetype_published_pks = map(smart_unicode,
Nodetype.published.values_list('id', flat=True))
content_type = ContentType.objects.get_for_model(Nodetype)
linkbacks = get_comment_model().objects.filter(
content_type=content_type,
object_pk__in=nodetype_published_pks,
flags__flag__in=['pingback', 'trackback'],
is_public=True).order_by(
'-submit_date')[:number]
return {'template': template,
'linkbacks': linkbacks}
@register.inclusion_tag('gstudio/tags/dummy.html', takes_context=True)
def gstudio_pagination(context, page, begin_pages=3, end_pages=3,
before_pages=2, after_pages=2,
template='gstudio/tags/pagination.html'):
"""Return a Digg-like pagination, by splitting long list of page
into 3 blocks of pages"""
GET_string = ''
for key, value in context['request'].GET.items():
if key != 'page':
GET_string += '&%s=%s' % (key, value)
begin = page.paginator.page_range[:begin_pages]
end = page.paginator.page_range[-end_pages:]
middle = page.paginator.page_range[max(page.number - before_pages - 1, 0):
page.number + after_pages]
if set(begin) & set(end): # [1, 2, 3], [...], [2, 3, 4]
begin = sorted(set(begin + end)) # [1, 2, 3, 4]
middle, end = [], []
elif begin[-1] + 1 == end[0]: # [1, 2, 3], [...], [4, 5, 6]
begin += end # [1, 2, 3, 4, 5, 6]
middle, end = [], []
elif set(begin) & set(middle): # [1, 2, 3], [2, 3, 4], [...]
begin = sorted(set(begin + middle)) # [1, 2, 3, 4]
middle = []
elif begin[-1] + 1 == middle[0]: # [1, 2, 3], [4, 5, 6], [...]
begin += middle # [1, 2, 3, 4, 5, 6]
middle = []
elif middle[-1] + 1 == end[0]: # [...], [15, 16, 17], [18, 19, 20]
end = middle + end # [15, 16, 17, 18, 19, 20]
middle = []
elif set(middle) & set(end): # [...], [17, 18, 19], [18, 19, 20]
end = sorted(set(middle + end)) # [17, 18, 19, 20]
middle = []
return {'template': template, 'page': page, 'GET_string': GET_string,
'begin': begin, 'middle': middle, 'end': end}
@register.inclusion_tag('gstudio/tags/dummy.html', takes_context=True)
def gstudio_breadcrumbs(context, separator='>>', root_name='Home',
template='gstudio/tags/breadcrumbs.html',):
"""Return a breadcrumb for the application"""
path = context['request'].path
page_object = context.get('object') or context.get('metatype') or \
context.get('tag') or context.get('author') or context.get('image') or context.get('video')or context.get('doc') or context.get('meet_ob')
breadcrumbs = retrieve_breadcrumbs(path, page_object, root_name)
print breadcrumbs,"brcrbs",path,page_object,root_name
return {'template': template,
'separator': separator,
'breadcrumbs': breadcrumbs}
@register.simple_tag
def get_gravatar(email, size=80, rating='g', default=None):
"""Return url for a Gravatar"""
url = 'http://www.gravatar.com/avatar/%s.jpg' % \
md5(email.strip().lower()).hexdigest()
options = {'s': size, 'r': rating}
if default:
options['d'] = default
url = '%s?%s' % (url, urlencode(options))
return url.replace('&', '&')
@register.simple_tag
def get_type(name):
"""Return the type of node"""
return get_node(name)
class TagsNode(nd):
def __init__(self, context_var):
self.context_var = context_var
def render(self, context):
context[self.context_var] = tags_published()
return ''
#define get_CNL function
@register.assignment_tag
def get_CNL(no, takes_context = True):
l = get_CNL_list(no)
return l
@register.tag
def get_tags(parser, token):
"""{% get_tags as var %}"""
bits = token.split_contents()
if len(bits) != 3:
raise TemplateSyntaxError(
'get_tags tag takes exactly two arguments')
if bits[1] != 'as':
raise TemplateSyntaxError(
"first argument to get_tags tag must be 'as'")
return TagsNode(bits[2])
@register.simple_tag
def redirect(username):
link = "/"
return HttpResponseRedirect(link)
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_tag_cloud(steps=6, template='gstudio/tags/tag_cloud.html'):
"""Return a cloud of published tags"""
tags = Tag.objects.usage_for_queryset(
Nodetype.published.all(), counts=True)
return {'template': template,
'tags': calculate_cloud(tags, steps)}
@register.inclusion_tag('gstudio/tags/comment.html')
def show_comment(comment,idusr,flag,admin_id,attob):
return {'comment':comment , 'idusr' : idusr, "flag" : flag, "admin_id" : admin_id , "attribute" : attob}
@register.inclusion_tag('gstudio/tags/commentpage.html')
def show_commentpage(comment,idusr,flag,admin_id,attob):
return {'comment':comment , 'idusr' : idusr, "flag" : flag, "admin_id" : admin_id , "attribute" : attob}
@register.simple_tag
def show_nodesystem(object_id):
search=object_id
nbh=""
url=""
for each in System.objects.all():
sysid=each.id
for eachsys in each.systemtypes.all():
if eachsys.title=="Meeting":
url="group/gnowsys-grp/"
objecttitle = "TWIST"
elif eachsys.title=="Wikipage":
url="page/gnowsys-page/"
objecttitle = "WIKIPAGE"
for eachob in each.system_set.all():
if eachob.gbobject_set.all():
for eachgbob in eachob.gbobject_set.all():
if search==eachgbob.id:
nbh=url+str(sysid)
if search==sysid:
nbh=url+str(sysid)
return nbh
@register.assignment_tag
def check_release(meeting):
var = check_release_or_not(meeting)
return var
@register.assignment_tag
def check_subscribe(meeting,user):
var = check_subscribe_or_not(meeting,user)
return var
@register.assignment_tag
def check_user_admin(userid):
var=check_usr_admin(userid)
return var
@register.assignment_tag
def get_static_url():
var = os.path.join(os.path.dirname(__file__),STATIC_URL)
return var
@register.assignment_tag
def get_factory_looms():
fs = []
fs = get_factory_loom_OTs()
return fs
@register.inclusion_tag('gstudio/puttagsearch.html')
def put_tag_search():
template='gstudio/puttagsearch.html'
return {'template':template}
@register.assignment_tag
def put_home_content():
var = get_home_content()
return var
@register.assignment_tag
def put_more_content():
var = get_more_content()
return var
@register.assignment_tag
def put_home_title():
var = get_home_title()
return var
@register.inclusion_tag('gstudio/addreln.html')
def add_res_relation(meetingob,user):
template='gstudio/addreln.html'
return {'template':template,'meetingob':meetingob,'user':user}
@register.simple_tag
def get_available_level():
listlev=[]
lev=System.objects.get(id=19021)
s=unidecode(lev.title)
listlev.append(str(s))
lev=System.objects.get(id=18968)
s=unidecode(lev.title)
listlev.append(str(s))
lev=System.objects.get(id=39965)
s=unidecode(lev.title)
listlev.append(str(s))
return listlev
@register.simple_tag
def get_available_subjs():
listsubjs=[]
wikis=Systemtype.objects.get(title='Collection')
wiki=wikis.member_systems.all()
for each in wiki:
#unicodedata.normalize('NFKD', a.title).encode('ascii','ignore')
# s=unicodedata.normalize('NFKD', each.title).encode('ascii','ignore')+" - with inverse - "+unicodedata.normalize('NFKD',each.inverse).encode('ascii','ignore')
s=unidecode(each.title)
listsubjs.append(str(s))
return listsubjs
@register.simple_tag
def get_available_rts():
listrts={}
for each in Relationtype.objects.all():
#unicodedata.normalize('NFKD', a.title).encode('ascii','ignore')
# s=unicodedata.normalize('NFKD', each.title).encode('ascii','ignore')+" - with inverse - "+unicodedata.normalize('NFKD',each.inverse).encode('ascii','ignore')
s=unidecode(each.title)+" - with inverse - "
listrts[str(s)]=each.id
return listrts
@register.simple_tag
def get_available_objects():
listsubjs={}
# obtype=""
# vid=Nodetype.objects.get(title='Video')
# img=Nodetype.objects.get(title='Image')
# doc=Nodetype.objects.get(title='Document')
# col=Systemtype.objects.get(title='Collection')
# wiki=Systemtype.objects.get(title='Wikipage')
# meet=Systemtype.objects.get(title='Meeting')
for each in Gbobject.objects.all():
obtypes=each.objecttypes.all()
if not ('page box of' in each.title or 'message box of' in each.title):
# if vid in obtypes:
# obtype="is a video"
# if img in obtypes:
# obtype="is an image"
# if doc in obtypes:
# obtype="is a document"
# checksys=System.objects.filter(id=each.id).count()
# if checksys > 0:
# sysob=System.objects.get(id=each.id)
# systype=sysob.systemtypes.all()
# if col in systype:
# obtype="is a collection"
# elif wiki in systype:
# obtype="is a text document"
# elif meet in systype:
# obtype="is a Thread"
s=each.id
listsubjs[each.id]=s
return str(listsubjs)
@register.inclusion_tag('gstudio/edittitle.html')
def edit_title(objectid,objecttitle):
gbobject = Gbobject.objects.get(id=objectid)
template='gstudio/edititle.html'
return {'template':template,'objectid':objectid,'objecttitle':objecttitle,'gbobject':gbobject}
@register.simple_tag
def get_add_tag():
listtag = []
tag = Tag.objects.all()
for each in tag:
listtag.append(each.__str__())
return str(listtag)
@register.simple_tag
def get_page_drawer():
pagedrawer = []
#wikiset = Systemtype.objects.all()
drawerset = Systemtype.objects.get(title="Wikipage")
drawer= drawerset.member_systems.all()
for each in drawer:
pagedrawer.append(each.__str__())
return str(pagedrawer)
@register.inclusion_tag('gstudio/priorpost.html')
def addpriorpost(objectid,user):
template='gstudio/priorpost.html'
gbobject = Gbobject.objects.get(id=objectid)
priorgbobject = gbobject.prior_nodes.all()
posteriorgbobject = gbobject.posterior_nodes.all()
return {'template':template,'objectid':objectid,'priorgbobject':priorgbobject,'posteriorgbobject':posteriorgbobject,'user':user}
@register.inclusion_tag('gstudio/addingtag.html')
def addtag(viewtag,objectid,user):
gbobject = Gbobject.objects.get(id=objectid)
template='gstudio/addingtag.html'
return {'viewtag':viewtag,'objectid':objectid,'user':user,'gbobject':gbobject}
@register.simple_tag
def get_pri_post_page():
listobject = []
gbobject = Gbobject.objects.all()
for each in gbobject:
if not ('page box of' in each.title or 'message box of' in each.title):
listobject.append(each.__str__())
return str(listobject)
@register.inclusion_tag('gstudio/publicprivate.html')
def public_private(objectid,status):
template = 'gstudio/publicprivate.html'
return {'objectid':objectid,'status':status}
| agpl-3.0 | 4,942,955,280,426,499,000 | 32.410133 | 166 | 0.634112 | false |
gemfire/py-gemfire-rest | tests/PerformanceTests.py | 2 | 2432 | '''
Copyright (c) 2014 Pivotal Software, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
'''
from GemfireClient import *
import time
from Customer import *
class PerformanceTests:
def __init__(self):
hostname = raw_input("Enter hostname: ")
port = raw_input("Enter port: ")
self.client = GemfireClient(hostname,port)
self.myRegion = self.client.get_region(raw_input("Enter Region Name: "))
def warmup(self):
for x in range(0,10):
self.myRegion.put(x,{"random":"random"})
self.myRegion.get(x)
self.myRegion.clear()
def put(self, num):
self.warmup()
start = time.clock()
for x in range(0,num):
self.myRegion.put(x,Customer("John Doe", 42))
end = time.clock()
return (end-start)
def put_all(self, num):
self.warmup()
item = {}
for x in range(0,num):
item[x] = Customer("New Person", 1)
start = time.clock()
self.myRegion.put_all(item)
end = time.clock()
return (end-start)
def get(self, num):
self.warmup()
for x in range(0,num):
self.myRegion.put(x,Customer("John Doe", 42))
start = time.clock()
for x in range(0,num):
self.myRegion.get(x)
end = time.clock()
return (end-start)
def run_test(self,testname):
filename = raw_input("Enter filename to store run data: ")
file = open(filename, "w")
op_num = input("Number of operations per run: ")
runs = input("Number of runs: ")
name = getattr(PerformanceTests,testname)
total = 0
for x in range(0,runs):
y=name(self,op_num)
file.write(str(y)+"\n")
total+=y
file.close()
print "The average run time for " + str(op_num) + " " + testname + "s was " + str(total/runs) + " seconds"
| apache-2.0 | 81,333,886,949,984,320 | 32.777778 | 114 | 0.599918 | false |
bdh1011/wau | venv/lib/python2.7/site-packages/nbformat/v4/tests/test_validate.py | 3 | 2506 | """Tests for nbformat validation"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import io
import os
import nose.tools as nt
from nbformat.validator import validate, ValidationError
from ..nbjson import reads
from ..nbbase import (
nbformat,
new_code_cell, new_markdown_cell, new_notebook,
new_output, new_raw_cell,
)
def validate4(obj, ref=None):
return validate(obj, ref, version=nbformat)
def test_valid_code_cell():
cell = new_code_cell()
validate4(cell, 'code_cell')
def test_invalid_code_cell():
cell = new_code_cell()
cell['source'] = 5
with nt.assert_raises(ValidationError):
validate4(cell, 'code_cell')
cell = new_code_cell()
del cell['metadata']
with nt.assert_raises(ValidationError):
validate4(cell, 'code_cell')
cell = new_code_cell()
del cell['source']
with nt.assert_raises(ValidationError):
validate4(cell, 'code_cell')
cell = new_code_cell()
del cell['cell_type']
with nt.assert_raises(ValidationError):
validate4(cell, 'code_cell')
def test_invalid_markdown_cell():
cell = new_markdown_cell()
cell['source'] = 5
with nt.assert_raises(ValidationError):
validate4(cell, 'markdown_cell')
cell = new_markdown_cell()
del cell['metadata']
with nt.assert_raises(ValidationError):
validate4(cell, 'markdown_cell')
cell = new_markdown_cell()
del cell['source']
with nt.assert_raises(ValidationError):
validate4(cell, 'markdown_cell')
cell = new_markdown_cell()
del cell['cell_type']
with nt.assert_raises(ValidationError):
validate4(cell, 'markdown_cell')
def test_invalid_raw_cell():
cell = new_raw_cell()
cell['source'] = 5
with nt.assert_raises(ValidationError):
validate4(cell, 'raw_cell')
cell = new_raw_cell()
del cell['metadata']
with nt.assert_raises(ValidationError):
validate4(cell, 'raw_cell')
cell = new_raw_cell()
del cell['source']
with nt.assert_raises(ValidationError):
validate4(cell, 'raw_cell')
cell = new_raw_cell()
del cell['cell_type']
with nt.assert_raises(ValidationError):
validate4(cell, 'raw_cell')
def test_sample_notebook():
here = os.path.dirname(__file__)
with io.open(os.path.join(here, os.pardir, os.pardir, 'tests', "test4.ipynb"), encoding='utf-8') as f:
nb = reads(f.read())
validate4(nb)
| mit | 4,945,232,103,774,989,000 | 22.866667 | 106 | 0.647247 | false |
bstadie/cgt | examples/demo_char_rnn.py | 13 | 12470 | """
A nearly direct translation of Andrej's code
https://github.com/karpathy/char-rnn
"""
from __future__ import division
import cgt
from cgt import nn, utils, profiler
import numpy as np, numpy.random as nr
import os.path as osp
import argparse
from time import time
from StringIO import StringIO
from param_collection import ParamCollection
# via https://github.com/karpathy/char-rnn/blob/master/model/GRU.lua
# via http://arxiv.org/pdf/1412.3555v1.pdf
def make_deep_gru(size_input, size_mem, n_layers, size_output, size_batch):
inputs = [cgt.matrix() for i_layer in xrange(n_layers+1)]
outputs = []
for i_layer in xrange(n_layers):
prev_h = inputs[i_layer+1] # note that inputs[0] is the external input, so we add 1
x = inputs[0] if i_layer==0 else outputs[i_layer-1]
size_x = size_input if i_layer==0 else size_mem
update_gate = cgt.sigmoid(
nn.Affine(size_x, size_mem,name="i2u")(x)
+ nn.Affine(size_mem, size_mem, name="h2u")(prev_h))
reset_gate = cgt.sigmoid(
nn.Affine(size_x, size_mem,name="i2r")(x)
+ nn.Affine(size_mem, size_mem, name="h2r")(prev_h))
gated_hidden = reset_gate * prev_h
p2 = nn.Affine(size_mem, size_mem)(gated_hidden)
p1 = nn.Affine(size_x, size_mem)(x)
hidden_target = cgt.tanh(p1+p2)
next_h = (1.0-update_gate)*prev_h + update_gate*hidden_target
outputs.append(next_h)
category_activations = nn.Affine(size_mem, size_output,name="pred")(outputs[-1])
logprobs = nn.logsoftmax(category_activations)
outputs.append(logprobs)
return nn.Module(inputs, outputs)
def make_deep_lstm(size_input, size_mem, n_layers, size_output, size_batch):
inputs = [cgt.matrix(fixed_shape=(size_batch, size_input))]
for _ in xrange(2*n_layers):
inputs.append(cgt.matrix(fixed_shape=(size_batch, size_mem)))
outputs = []
for i_layer in xrange(n_layers):
prev_h = inputs[i_layer*2]
prev_c = inputs[i_layer*2+1]
if i_layer==0:
x = inputs[0]
size_x = size_input
else:
x = outputs[(i_layer-1)*2]
size_x = size_mem
input_sums = nn.Affine(size_x, 4*size_mem)(x) + nn.Affine(size_x, 4*size_mem)(prev_h)
sigmoid_chunk = cgt.sigmoid(input_sums[:,0:3*size_mem])
in_gate = sigmoid_chunk[:,0:size_mem]
forget_gate = sigmoid_chunk[:,size_mem:2*size_mem]
out_gate = sigmoid_chunk[:,2*size_mem:3*size_mem]
in_transform = cgt.tanh(input_sums[:,3*size_mem:4*size_mem])
next_c = forget_gate*prev_c + in_gate * in_transform
next_h = out_gate*cgt.tanh(next_c)
outputs.append(next_c)
outputs.append(next_h)
category_activations = nn.Affine(size_mem, size_output)(outputs[-1])
logprobs = nn.logsoftmax(category_activations)
outputs.append(logprobs)
return nn.Module(inputs, outputs)
def flatcat(xs):
return cgt.concatenate([x.flatten() for x in xs])
def cat_sample(ps):
"""
sample from categorical distribution
ps is a 2D array whose rows are vectors of probabilities
"""
r = nr.rand(len(ps))
out = np.zeros(len(ps),dtype='i4')
cumsums = np.cumsum(ps, axis=1)
for (irow,csrow) in enumerate(cumsums):
for (icol, csel) in enumerate(csrow):
if csel > r[irow]:
out[irow] = icol
break
return out
def rmsprop_update(grad, state):
state.sqgrad[:] *= state.decay_rate
state.count *= state.decay_rate
np.square(grad, out=state.scratch) # scratch=g^2
state.sqgrad += state.scratch
state.count += 1
np.sqrt(state.sqgrad, out=state.scratch) # scratch = sum of squares
np.divide(state.scratch, np.sqrt(state.count), out=state.scratch) # scratch = rms
np.divide(grad, state.scratch, out=state.scratch) # scratch = grad/rms
np.multiply(state.scratch, state.step_size, out=state.scratch)
state.theta[:] -= state.scratch
def make_loss_and_grad_and_step(arch, size_input, size_output, size_mem, size_batch, n_layers, n_unroll):
# symbolic variables
x_tnk = cgt.tensor3()
targ_tnk = cgt.tensor3()
make_network = make_deep_lstm if arch=="lstm" else make_deep_gru
network = make_network(size_input, size_mem, n_layers, size_output, size_batch)
init_hiddens = [cgt.matrix() for _ in xrange(get_num_hiddens(arch, n_layers))]
# TODO fixed sizes
cur_hiddens = init_hiddens
loss = 0
for t in xrange(n_unroll):
outputs = network([x_tnk[t]] + cur_hiddens)
cur_hiddens, prediction_logprobs = outputs[:-1], outputs[-1]
# loss = loss + nn.categorical_negloglik(prediction_probs, targ_tnk[t]).sum()
loss = loss - (prediction_logprobs*targ_tnk[t]).sum()
cur_hiddens = outputs[:-1]
final_hiddens = cur_hiddens
loss = loss / (n_unroll * size_batch)
params = network.get_parameters()
gradloss = cgt.grad(loss, params)
flatgrad = flatcat(gradloss)
with utils.Message("compiling loss+grad"):
f_loss_and_grad = cgt.function([x_tnk, targ_tnk] + init_hiddens, [loss, flatgrad] + final_hiddens)
f_loss = cgt.function([x_tnk, targ_tnk] + init_hiddens, loss)
assert len(init_hiddens) == len(final_hiddens)
x_nk = cgt.matrix('x')
outputs = network([x_nk] + init_hiddens)
f_step = cgt.function([x_nk]+init_hiddens, outputs)
# print "node count", cgt.count_nodes(flatgrad)
return network, f_loss, f_loss_and_grad, f_step
class Table(dict):
"dictionary-like object that exposes its keys as attributes"
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def make_rmsprop_state(theta, step_size, decay_rate):
return Table(theta=theta, sqgrad=np.zeros_like(theta)+1e-6, scratch=np.empty_like(theta),
step_size=step_size, decay_rate=decay_rate, count=0)
class Loader(object):
def __init__(self, data_dir, size_batch, n_unroll, split_fractions):
input_file = osp.join(data_dir,"input.txt")
preproc_file = osp.join(data_dir, "preproc.npz")
run_preproc = not osp.exists(preproc_file) or osp.getmtime(input_file) > osp.getmtime(preproc_file)
if run_preproc:
text_to_tensor(input_file, preproc_file)
data_file = np.load(preproc_file)
self.char2ind = {char:ind for (ind,char) in enumerate(data_file["chars"])}
data = data_file["inds"]
data = data[:data.shape[0] - (data.shape[0] % size_batch)].reshape(size_batch, -1).T # inds_tn
n_batches = (data.shape[0]-1) // n_unroll
data = data[:n_batches*n_unroll+1] # now t-1 is divisble by batch size
self.n_unroll = n_unroll
self.data = data
self.n_train_batches = int(n_batches*split_fractions[0])
self.n_test_batches = int(n_batches*split_fractions[1])
self.n_val_batches = n_batches - self.n_train_batches - self.n_test_batches
print "%i train batches, %i test batches, %i val batches"%(self.n_train_batches, self.n_test_batches, self.n_val_batches)
@property
def size_vocab(self):
return len(self.char2ind)
def train_batches_iter(self):
for i in xrange(self.n_train_batches):
start = i*self.n_unroll
stop = (i+1)*self.n_unroll
yield ind2onehot(self.data[start:stop], self.size_vocab), ind2onehot(self.data[start+1:stop+1], self.size_vocab) # XXX
# XXX move elsewhere
def ind2onehot(inds, n_cls):
inds = np.asarray(inds)
out = np.zeros(inds.shape+(n_cls,),cgt.floatX)
out.flat[np.arange(inds.size)*n_cls + inds.ravel()] = 1
return out
def text_to_tensor(text_file, preproc_file):
with open(text_file,"r") as fh:
text = fh.read()
char2ind = {}
inds = []
for char in text:
ind = char2ind.get(char, -1)
if ind == -1:
ind = len(char2ind)
char2ind[char] = ind
inds.append(ind)
np.savez(preproc_file, inds = inds, chars = sorted(char2ind, key = lambda char : char2ind[char]))
def get_num_hiddens(arch, n_layers):
return {"lstm" : 2 * n_layers, "gru" : n_layers}[arch]
def sample(f_step, init_hiddens, char2ind, n_steps, temperature, seed_text = ""):
vocab_size = len(char2ind)
ind2char = {ind:char for (char,ind) in char2ind.iteritems()}
cur_hiddens = init_hiddens
t = StringIO()
t.write(seed_text)
for char in seed_text:
x_1k = ind2onehot([char2ind[char]], vocab_size)
net_outputs = f_step(x_1k, cur_hiddens)
cur_hiddens, logprobs_1k = net_outputs[:-1], net_outputs[-1]
if len(seed_text)==0:
logprobs_1k = np.zeros((1,vocab_size))
for _ in xrange(n_steps):
logprobs_1k /= temperature
probs_1k = np.exp(logprobs_1k*2)
probs_1k /= probs_1k.sum()
index = cat_sample(probs_1k)[0]
char = ind2char[index]
x_1k = ind2onehot([index], vocab_size)
net_outputs = f_step(x_1k, *cur_hiddens)
cur_hiddens, logprobs_1k = net_outputs[:-1], net_outputs[-1]
t.write(char)
cgt.utils.colorprint(cgt.utils.Color.YELLOW, t.getvalue() + "\n")
def main():
nr.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="alice")
parser.add_argument("--size_mem", type=int,default=64)
parser.add_argument("--size_batch", type=int,default=64)
parser.add_argument("--n_layers",type=int,default=2)
parser.add_argument("--n_unroll",type=int,default=16)
parser.add_argument("--step_size",type=float,default=.01)
parser.add_argument("--decay_rate",type=float,default=0.95)
parser.add_argument("--n_epochs",type=int,default=20)
parser.add_argument("--arch",choices=["lstm","gru"],default="lstm")
parser.add_argument("--grad_check",action="store_true")
parser.add_argument("--profile",action="store_true")
parser.add_argument("--unittest",action="store_true")
parser.add_argument("--temperature",type=float,default=1)
args = parser.parse_args()
cgt.set_precision("quad" if args.grad_check else "single")
assert args.n_unroll > 1
loader = Loader(args.data_dir,args.size_batch, args.n_unroll, (1.0,0,0))
network, f_loss, f_loss_and_grad, f_step = make_loss_and_grad_and_step(args.arch, loader.size_vocab,
loader.size_vocab, args.size_mem, args.size_batch, args.n_layers, args.n_unroll)
if args.profile: profiler.start()
params = network.get_parameters()
pc = ParamCollection(params)
pc.set_value_flat(nr.uniform(-.1, .1, size=(pc.get_total_size(),)))
def initialize_hiddens(n):
return [np.zeros((n, args.size_mem), cgt.floatX) for _ in xrange(get_num_hiddens(args.arch, args.n_layers))]
if args.grad_check:
x,y = loader.train_batches_iter().next()
prev_hiddens = initialize_hiddens(args.size_batch)
def f(thnew):
thold = pc.get_value_flat()
pc.set_value_flat(thnew)
loss = f_loss(x,y, *prev_hiddens)
pc.set_value_flat(thold)
return loss
from cgt.numeric_diff import numeric_grad
g_num = numeric_grad(f, pc.get_value_flat(),eps=1e-10)
result = f_loss_and_grad(x,y,*prev_hiddens)
g_anal = result[1]
assert np.allclose(g_num, g_anal, atol=1e-4)
print "Gradient check succeeded!"
return
optim_state = make_rmsprop_state(theta=pc.get_value_flat(), step_size = args.step_size,
decay_rate = args.decay_rate)
for iepoch in xrange(args.n_epochs):
losses = []
tstart = time()
print "starting epoch",iepoch
cur_hiddens = initialize_hiddens(args.size_batch)
for (x,y) in loader.train_batches_iter():
out = f_loss_and_grad(x,y, *cur_hiddens)
loss = out[0]
grad = out[1]
cur_hiddens = out[2:]
rmsprop_update(grad, optim_state)
pc.set_value_flat(optim_state.theta)
losses.append(loss)
if args.unittest: return
print "%.3f s/batch. avg loss = %.3f"%((time()-tstart)/len(losses), np.mean(losses))
optim_state.step_size *= .98 #pylint: disable=E1101
sample(f_step, initialize_hiddens(1), char2ind=loader.char2ind, n_steps=1000, temperature=args.temperature, seed_text = "")
if args.profile: profiler.print_stats()
if __name__ == "__main__":
main()
| mit | -4,721,882,045,012,063,000 | 37.018293 | 131 | 0.624379 | false |
jacshfr/mozilla-bedrock | vendor-local/packages/chardet/chardet/mbcsgroupprober.py | 236 | 1889 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from charsetgroupprober import CharSetGroupProber
from utf8prober import UTF8Prober
from sjisprober import SJISProber
from eucjpprober import EUCJPProber
from gb2312prober import GB2312Prober
from euckrprober import EUCKRProber
from big5prober import Big5Prober
from euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [ \
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
Big5Prober(),
EUCTWProber()]
self.reset()
| mpl-2.0 | 2,186,669,350,203,373,800 | 36.78 | 69 | 0.692959 | false |
ToonTownInfiniteRepo/ToontownInfinite | Panda3D-1.9.0/python/Lib/site-packages/setuptools/tests/test_upload_docs.py | 522 | 2139 | """build_ext tests
"""
import sys, os, shutil, tempfile, unittest, site, zipfile
from setuptools.command.upload_docs import upload_docs
from setuptools.dist import Distribution
SETUP_PY = """\
from setuptools import setup
setup(name='foo')
"""
class TestUploadDocsTest(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'w')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
os.chdir(self.dir)
self.upload_dir = os.path.join(self.dir, 'build')
os.mkdir(self.upload_dir)
# A test document.
f = open(os.path.join(self.upload_dir, 'index.html'), 'w')
f.write("Hello world.")
f.close()
# An empty folder.
os.mkdir(os.path.join(self.upload_dir, 'empty'))
if sys.version >= "2.6":
self.old_base = site.USER_BASE
site.USER_BASE = upload_docs.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = upload_docs.USER_SITE = tempfile.mkdtemp()
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
if sys.version >= "2.6":
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
def test_create_zipfile(self):
# Test to make sure zipfile creation handles common cases.
# This explicitly includes a folder containing an empty folder.
dist = Distribution()
cmd = upload_docs(dist)
cmd.upload_dir = self.upload_dir
cmd.target_dir = self.upload_dir
tmp_dir = tempfile.mkdtemp()
tmp_file = os.path.join(tmp_dir, 'foo.zip')
try:
zip_file = cmd.create_zipfile(tmp_file)
assert zipfile.is_zipfile(tmp_file)
zip_file = zipfile.ZipFile(tmp_file) # woh...
assert zip_file.namelist() == ['index.html']
zip_file.close()
finally:
shutil.rmtree(tmp_dir)
| mit | -4,058,993,172,534,987,300 | 28.708333 | 71 | 0.583918 | false |
mrquim/repository.mrquim | script.module.pycryptodome/lib/Crypto/SelfTest/Cipher/test_CFB.py | 5 | 16124 | # ===================================================================
#
# Copyright (c) 2014, Legrandin <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
import unittest
from Crypto.SelfTest.loader import load_tests
from Crypto.SelfTest.st_common import list_test_cases
from Crypto.Util.py3compat import tobytes, b, unhexlify
from Crypto.Cipher import AES, DES3, DES
from Crypto.Hash import SHAKE128
def get_tag_random(tag, length):
return SHAKE128.new(data=tobytes(tag)).read(length)
from Crypto.SelfTest.Cipher.test_CBC import BlockChainingTests
class CfbTests(BlockChainingTests):
aes_mode = AES.MODE_CFB
des3_mode = DES3.MODE_CFB
# Redefine test_unaligned_data_128/64
def test_unaligned_data_128(self):
plaintexts = [ b("7777777") ] * 100
cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=8)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=8)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=128)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=128)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
def test_unaligned_data_64(self):
plaintexts = [ b("7777777") ] * 100
cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=8)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=8)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=64)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=64)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
# Extra
def test_segment_size_128(self):
for bits in xrange(8, 129, 8):
cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128,
segment_size=bits)
for bits in 0, 7, 9, 127, 129:
self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_CFB,
self.iv_128,
segment_size=bits)
def test_segment_size_64(self):
for bits in xrange(8, 65, 8):
cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64,
segment_size=bits)
for bits in 0, 7, 9, 63, 65:
self.assertRaises(ValueError, DES3.new, self.key_192, AES.MODE_CFB,
self.iv_64,
segment_size=bits)
class NistCfbVectors(unittest.TestCase):
def _do_kat_aes_test(self, file_name, segment_size):
test_vectors = load_tests(("Crypto", "SelfTest", "Cipher", "test_vectors", "AES"),
file_name,
"AES CFB%d KAT" % segment_size,
{ "count" : lambda x: int(x) } )
assert(test_vectors)
direction = None
for tv in test_vectors:
# The test vector file contains some directive lines
if isinstance(tv, basestring):
direction = tv
continue
self.description = tv.desc
cipher = AES.new(tv.key, AES.MODE_CFB, tv.iv,
segment_size=segment_size)
if direction == "[ENCRYPT]":
self.assertEqual(cipher.encrypt(tv.plaintext), tv.ciphertext)
elif direction == "[DECRYPT]":
self.assertEqual(cipher.decrypt(tv.ciphertext), tv.plaintext)
else:
assert False
# See Section 6.4.5 in AESAVS
def _do_mct_aes_test(self, file_name, segment_size):
test_vectors = load_tests(("Crypto", "SelfTest", "Cipher", "test_vectors", "AES"),
file_name,
"AES CFB%d Montecarlo" % segment_size,
{ "count" : lambda x: int(x) } )
assert(test_vectors)
assert(segment_size in (8, 128))
direction = None
for tv in test_vectors:
# The test vector file contains some directive lines
if isinstance(tv, basestring):
direction = tv
continue
self.description = tv.desc
cipher = AES.new(tv.key, AES.MODE_CFB, tv.iv,
segment_size=segment_size)
def get_input(input_text, output_seq, j):
# CFB128
if segment_size == 128:
if j >= 2:
return output_seq[-2]
return [input_text, tv.iv][j]
# CFB8
if j == 0:
return input_text
elif j <= 16:
return tv.iv[j - 1:j]
return output_seq[j - 17]
if direction == '[ENCRYPT]':
cts = []
for j in xrange(1000):
plaintext = get_input(tv.plaintext, cts, j)
cts.append(cipher.encrypt(plaintext))
self.assertEqual(cts[-1], tv.ciphertext)
elif direction == '[DECRYPT]':
pts = []
for j in xrange(1000):
ciphertext = get_input(tv.ciphertext, pts, j)
pts.append(cipher.decrypt(ciphertext))
self.assertEqual(pts[-1], tv.plaintext)
else:
assert False
def _do_tdes_test(self, file_name, segment_size):
test_vectors = load_tests(("Crypto", "SelfTest", "Cipher", "test_vectors", "TDES"),
file_name,
"AES CFB%d KAT" % segment_size,
{ "count" : lambda x: int(x) } )
assert(test_vectors)
direction = None
for tv in test_vectors:
# The test vector file contains some directive lines
if isinstance(tv, basestring):
direction = tv
continue
self.description = tv.desc
if hasattr(tv, "keys"):
cipher = DES.new(tv.keys, DES.MODE_CFB, tv.iv,
segment_size=segment_size)
else:
if tv.key1 != tv.key3:
key = tv.key1 + tv.key2 + tv.key3 # Option 3
else:
key = tv.key1 + tv.key2 # Option 2
cipher = DES3.new(key, DES3.MODE_CFB, tv.iv,
segment_size=segment_size)
if direction == "[ENCRYPT]":
self.assertEqual(cipher.encrypt(tv.plaintext), tv.ciphertext)
elif direction == "[DECRYPT]":
self.assertEqual(cipher.decrypt(tv.ciphertext), tv.plaintext)
else:
assert False
# Create one test method per file
nist_aes_kat_mmt_files = (
# KAT
"CFB?GFSbox128.rsp",
"CFB?GFSbox192.rsp",
"CFB?GFSbox256.rsp",
"CFB?KeySbox128.rsp",
"CFB?KeySbox192.rsp",
"CFB?KeySbox256.rsp",
"CFB?VarKey128.rsp",
"CFB?VarKey192.rsp",
"CFB?VarKey256.rsp",
"CFB?VarTxt128.rsp",
"CFB?VarTxt192.rsp",
"CFB?VarTxt256.rsp",
# MMT
"CFB?MMT128.rsp",
"CFB?MMT192.rsp",
"CFB?MMT256.rsp",
)
nist_aes_mct_files = (
"CFB?MCT128.rsp",
"CFB?MCT192.rsp",
"CFB?MCT256.rsp",
)
for file_gen_name in nist_aes_kat_mmt_files:
for bits in "8", "128":
file_name = file_gen_name.replace("?", bits)
def new_func(self, file_name=file_name, bits=bits):
self._do_kat_aes_test(file_name, int(bits))
setattr(NistCfbVectors, "test_AES_" + file_name, new_func)
for file_gen_name in nist_aes_mct_files:
for bits in "8", "128":
file_name = file_gen_name.replace("?", bits)
def new_func(self, file_name=file_name, bits=bits):
self._do_mct_aes_test(file_name, int(bits))
setattr(NistCfbVectors, "test_AES_" + file_name, new_func)
del file_name, new_func
nist_tdes_files = (
"TCFB?MMT2.rsp", # 2TDES
"TCFB?MMT3.rsp", # 3TDES
"TCFB?invperm.rsp", # Single DES
"TCFB?permop.rsp",
"TCFB?subtab.rsp",
"TCFB?varkey.rsp",
"TCFB?vartext.rsp",
)
for file_gen_name in nist_tdes_files:
for bits in "8", "64":
file_name = file_gen_name.replace("?", bits)
def new_func(self, file_name=file_name, bits=bits):
self._do_tdes_test(file_name, int(bits))
setattr(NistCfbVectors, "test_TDES_" + file_name, new_func)
# END OF NIST CBC TEST VECTORS
class SP800TestVectors(unittest.TestCase):
"""Class exercising the CFB test vectors found in Section F.3
of NIST SP 800-3A"""
def test_aes_128_cfb8(self):
plaintext = '6bc1bee22e409f96e93d7e117393172aae2d'
ciphertext = '3b79424c9c0dd436bace9e0ed4586a4f32b9'
key = '2b7e151628aed2a6abf7158809cf4f3c'
iv = '000102030405060708090a0b0c0d0e0f'
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
def test_aes_192_cfb8(self):
plaintext = '6bc1bee22e409f96e93d7e117393172aae2d'
ciphertext = 'cda2521ef0a905ca44cd057cbf0d47a0678a'
key = '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b'
iv = '000102030405060708090a0b0c0d0e0f'
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
def test_aes_256_cfb8(self):
plaintext = '6bc1bee22e409f96e93d7e117393172aae2d'
ciphertext = 'dc1f1a8520a64db55fcc8ac554844e889700'
key = '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4'
iv = '000102030405060708090a0b0c0d0e0f'
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
def test_aes_128_cfb128(self):
plaintext = '6bc1bee22e409f96e93d7e117393172a' +\
'ae2d8a571e03ac9c9eb76fac45af8e51' +\
'30c81c46a35ce411e5fbc1191a0a52ef' +\
'f69f2445df4f9b17ad2b417be66c3710'
ciphertext = '3b3fd92eb72dad20333449f8e83cfb4a' +\
'c8a64537a0b3a93fcde3cdad9f1ce58b' +\
'26751f67a3cbb140b1808cf187a4f4df' +\
'c04b05357c5d1c0eeac4c66f9ff7f2e6'
key = '2b7e151628aed2a6abf7158809cf4f3c'
iv = '000102030405060708090a0b0c0d0e0f'
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
def test_aes_192_cfb128(self):
plaintext = '6bc1bee22e409f96e93d7e117393172a' +\
'ae2d8a571e03ac9c9eb76fac45af8e51' +\
'30c81c46a35ce411e5fbc1191a0a52ef' +\
'f69f2445df4f9b17ad2b417be66c3710'
ciphertext = 'cdc80d6fddf18cab34c25909c99a4174' +\
'67ce7f7f81173621961a2b70171d3d7a' +\
'2e1e8a1dd59b88b1c8e60fed1efac4c9' +\
'c05f9f9ca9834fa042ae8fba584b09ff'
key = '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b'
iv = '000102030405060708090a0b0c0d0e0f'
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
def test_aes_256_cfb128(self):
plaintext = '6bc1bee22e409f96e93d7e117393172a' +\
'ae2d8a571e03ac9c9eb76fac45af8e51' +\
'30c81c46a35ce411e5fbc1191a0a52ef' +\
'f69f2445df4f9b17ad2b417be66c3710'
ciphertext = 'dc7e84bfda79164b7ecd8486985d3860' +\
'39ffed143b28b1c832113c6331e5407b' +\
'df10132415e54b92a13ed0a8267ae2f9' +\
'75a385741ab9cef82031623d55b1e471'
key = '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4'
iv = '000102030405060708090a0b0c0d0e0f'
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
def get_tests(config={}):
tests = []
tests += list_test_cases(CfbTests)
tests += list_test_cases(NistCfbVectors)
tests += list_test_cases(SP800TestVectors)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
| gpl-2.0 | -3,972,363,009,042,638,000 | 39.209476 | 91 | 0.585463 | false |
lukleh/TwistedBot | twistedbot/plugins/base.py | 1 | 2342 |
import os
import pkgutil
import abc
from functools import wraps
class PluginMeta(abc.ABCMeta):
def __new__(meta, name, bases, dct):
cls = super(PluginMeta, meta).__new__(meta, name, bases, dct)
cls.handlers = []
for name, obj in cls.__dict__.iteritems():
if hasattr(obj, "__call__") and name.startswith("on_"):
cls.handlers.append(name)
return cls
class PluginBase(object):
__metaclass__ = PluginMeta
def __init__(self, world):
self.world = world
class PluginChatBase(PluginBase):
def send_chat_message(self, msg):
self.world.chat.send_chat_message(msg)
@abc.abstractproperty
def command_verb(self):
pass
@property
def aliases(self):
return []
@abc.abstractproperty
def help(self):
pass
@abc.abstractmethod
def command(self, sender, command, args):
pass
class PluginEventHandlerBase(PluginBase):
pass
class PluginPlannerBase(object):
pass
def load(log, call_file, group):
plugs = []
path = [os.path.dirname(os.path.realpath(call_file))]
for loader, name, _ in list(pkgutil.iter_modules(path=path)):
try:
mpath = ".".join([__package__, group, name])
module = loader.find_module(mpath).load_module(mpath)
if not getattr(module, "plugin", False):
log.msg("module %s does not include plugin" % module.__name__)
continue
plugin_class = module.plugin
plugin_path = "%s.%s" % (module.__name__, plugin_class.__name__)
if issubclass(plugin_class, PluginChatBase):
log.msg("loaded chat plugin %s" % plugin_path)
plugs.append(plugin_class)
elif issubclass(plugin_class, PluginEventHandlerBase):
log.msg("loaded event plugin %s" % plugin_path)
plugs.append(plugin_class)
elif issubclass(plugin_class, PluginPlannerBase):
log.msg("loaded planner plugin %s" % plugin_path)
plugs.append(plugin_class)
else:
log.msg("class %s is not plugin" % plugin_path)
except Exception as e:
log.err(_stuff=e, _why="could not load plugin %s.py" % os.path.join(path[0], name))
return plugs
| mit | -5,671,564,216,552,540,000 | 27.91358 | 95 | 0.583689 | false |
Amechi101/concepteur-market-app | venv/lib/python2.7/site-packages/gunicorn/arbiter.py | 24 | 17047 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
from __future__ import with_statement
import errno
import os
import random
import select
import signal
import sys
import time
import traceback
from gunicorn.errors import HaltServer, AppImportError
from gunicorn.pidfile import Pidfile
from gunicorn.sock import create_sockets
from gunicorn import util
from gunicorn import __version__, SERVER_SOFTWARE
class Arbiter(object):
"""
Arbiter maintain the workers processes alive. It launches or
kills them if needed. It also manages application reloading
via SIGHUP/USR2.
"""
# A flag indicating if a worker failed to
# to boot. If a worker process exist with
# this error code, the arbiter will terminate.
WORKER_BOOT_ERROR = 3
# A flag indicating if an application failed to be loaded
APP_LOAD_ERROR = 4
START_CTX = {}
LISTENERS = []
WORKERS = {}
PIPE = []
# I love dynamic languages
SIG_QUEUE = []
SIGNALS = [getattr(signal, "SIG%s" % x) \
for x in "HUP QUIT INT TERM TTIN TTOU USR1 USR2 WINCH".split()]
SIG_NAMES = dict(
(getattr(signal, name), name[3:].lower()) for name in dir(signal)
if name[:3] == "SIG" and name[3] != "_"
)
def __init__(self, app):
os.environ["SERVER_SOFTWARE"] = SERVER_SOFTWARE
self._num_workers = None
self.setup(app)
self.pidfile = None
self.worker_age = 0
self.reexec_pid = 0
self.master_name = "Master"
cwd = util.getcwd()
args = sys.argv[:]
args.insert(0, sys.executable)
# init start context
self.START_CTX = {
"args": args,
"cwd": cwd,
0: sys.executable
}
def _get_num_workers(self):
return self._num_workers
def _set_num_workers(self, value):
old_value = self._num_workers
self._num_workers = value
self.cfg.nworkers_changed(self, value, old_value)
num_workers = property(_get_num_workers, _set_num_workers)
def setup(self, app):
self.app = app
self.cfg = app.cfg
self.log = self.cfg.logger_class(app.cfg)
# reopen files
if 'GUNICORN_FD' in os.environ:
self.log.reopen_files()
self.address = self.cfg.address
self.num_workers = self.cfg.workers
self.debug = self.cfg.debug
self.timeout = self.cfg.timeout
self.proc_name = self.cfg.proc_name
self.worker_class = self.cfg.worker_class
if self.cfg.debug:
self.log.debug("Current configuration:")
for config, value in sorted(self.cfg.settings.items(),
key=lambda setting: setting[1]):
self.log.debug(" %s: %s", config, value.value)
if self.cfg.preload_app:
if not self.cfg.debug:
self.app.wsgi()
else:
self.log.warning("debug mode: app isn't preloaded.")
def start(self):
"""\
Initialize the arbiter. Start listening and set pidfile if needed.
"""
self.log.info("Starting gunicorn %s", __version__)
self.pid = os.getpid()
if self.cfg.pidfile is not None:
self.pidfile = Pidfile(self.cfg.pidfile)
self.pidfile.create(self.pid)
self.cfg.on_starting(self)
# set enviroment' variables
if self.cfg.env:
for k, v in self.cfg.env.items():
os.environ[k] = v
self.init_signals()
if not self.LISTENERS:
self.LISTENERS = create_sockets(self.cfg, self.log)
listeners_str = ",".join([str(l) for l in self.LISTENERS])
self.log.debug("Arbiter booted")
self.log.info("Listening at: %s (%s)", listeners_str, self.pid)
self.log.info("Using worker: %s",
self.cfg.settings['worker_class'].get())
self.cfg.when_ready(self)
def init_signals(self):
"""\
Initialize master signal handling. Most of the signals
are queued. Child signals only wake up the master.
"""
# close old PIPE
if self.PIPE:
[os.close(p) for p in self.PIPE]
# initialize the pipe
self.PIPE = pair = os.pipe()
for p in pair:
util.set_non_blocking(p)
util.close_on_exec(p)
self.log.close_on_exec()
# initialize all signals
[signal.signal(s, self.signal) for s in self.SIGNALS]
signal.signal(signal.SIGCHLD, self.handle_chld)
def signal(self, sig, frame):
if len(self.SIG_QUEUE) < 5:
self.SIG_QUEUE.append(sig)
self.wakeup()
def run(self):
"Main master loop."
self.start()
util._setproctitle("master [%s]" % self.proc_name)
self.manage_workers()
while True:
try:
self.reap_workers()
sig = self.SIG_QUEUE.pop(0) if len(self.SIG_QUEUE) else None
if sig is None:
self.sleep()
self.murder_workers()
self.manage_workers()
continue
if sig not in self.SIG_NAMES:
self.log.info("Ignoring unknown signal: %s", sig)
continue
signame = self.SIG_NAMES.get(sig)
handler = getattr(self, "handle_%s" % signame, None)
if not handler:
self.log.error("Unhandled signal: %s", signame)
continue
self.log.info("Handling signal: %s", signame)
handler()
self.wakeup()
except StopIteration:
self.halt()
except KeyboardInterrupt:
self.halt()
except HaltServer as inst:
self.halt(reason=inst.reason, exit_status=inst.exit_status)
except SystemExit:
raise
except Exception:
self.log.info("Unhandled exception in main loop:\n%s",
traceback.format_exc())
self.stop(False)
if self.pidfile is not None:
self.pidfile.unlink()
sys.exit(-1)
def handle_chld(self, sig, frame):
"SIGCHLD handling"
self.wakeup()
def handle_hup(self):
"""\
HUP handling.
- Reload configuration
- Start the new worker processes with a new configuration
- Gracefully shutdown the old worker processes
"""
self.log.info("Hang up: %s", self.master_name)
self.reload()
def handle_quit(self):
"SIGQUIT handling"
raise StopIteration
def handle_int(self):
"SIGINT handling"
self.stop(False)
raise StopIteration
def handle_term(self):
"SIGTERM handling"
self.stop(False)
raise StopIteration
def handle_ttin(self):
"""\
SIGTTIN handling.
Increases the number of workers by one.
"""
self.num_workers += 1
self.manage_workers()
def handle_ttou(self):
"""\
SIGTTOU handling.
Decreases the number of workers by one.
"""
if self.num_workers <= 1:
return
self.num_workers -= 1
self.manage_workers()
def handle_usr1(self):
"""\
SIGUSR1 handling.
Kill all workers by sending them a SIGUSR1
"""
self.kill_workers(signal.SIGUSR1)
self.log.reopen_files()
def handle_usr2(self):
"""\
SIGUSR2 handling.
Creates a new master/worker set as a slave of the current
master without affecting old workers. Use this to do live
deployment with the ability to backout a change.
"""
self.reexec()
def handle_winch(self):
"SIGWINCH handling"
if self.cfg.daemon:
self.log.info("graceful stop of workers")
self.num_workers = 0
self.kill_workers(signal.SIGQUIT)
else:
self.log.info("SIGWINCH ignored. Not daemonized")
def wakeup(self):
"""\
Wake up the arbiter by writing to the PIPE
"""
try:
os.write(self.PIPE[1], b'.')
except IOError as e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def halt(self, reason=None, exit_status=0):
""" halt arbiter """
self.stop()
self.log.info("Shutting down: %s", self.master_name)
if reason is not None:
self.log.info("Reason: %s", reason)
if self.pidfile is not None:
self.pidfile.unlink()
sys.exit(exit_status)
def sleep(self):
"""\
Sleep until PIPE is readable or we timeout.
A readable PIPE means a signal occurred.
"""
try:
ready = select.select([self.PIPE[0]], [], [], 1.0)
if not ready[0]:
return
while os.read(self.PIPE[0], 1):
pass
except select.error as e:
if e.args[0] not in [errno.EAGAIN, errno.EINTR]:
raise
except OSError as e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
except KeyboardInterrupt:
sys.exit()
def stop(self, graceful=True):
"""\
Stop workers
:attr graceful: boolean, If True (the default) workers will be
killed gracefully (ie. trying to wait for the current connection)
"""
self.LISTENERS = []
sig = signal.SIGQUIT
if not graceful:
sig = signal.SIGTERM
limit = time.time() + self.cfg.graceful_timeout
while self.WORKERS and time.time() < limit:
self.kill_workers(sig)
time.sleep(0.1)
self.reap_workers()
self.kill_workers(signal.SIGKILL)
def reexec(self):
"""\
Relaunch the master and workers.
"""
if self.pidfile is not None:
self.pidfile.rename("%s.oldbin" % self.pidfile.fname)
self.reexec_pid = os.fork()
if self.reexec_pid != 0:
self.master_name = "Old Master"
return
environ = self.cfg.env_orig.copy()
fds = [l.fileno() for l in self.LISTENERS]
environ['GUNICORN_FD'] = ",".join([str(fd) for fd in fds])
os.chdir(self.START_CTX['cwd'])
self.cfg.pre_exec(self)
# exec the process using the original environnement
os.execvpe(self.START_CTX[0], self.START_CTX['args'], environ)
def reload(self):
old_address = self.cfg.address
# reset old environement
for k in self.cfg.env:
if k in self.cfg.env_orig:
# reset the key to the value it had before
# we launched gunicorn
os.environ[k] = self.cfg.env_orig[k]
else:
# delete the value set by gunicorn
try:
del os.environ[k]
except KeyError:
pass
# reload conf
self.app.reload()
self.setup(self.app)
# reopen log files
self.log.reopen_files()
# do we need to change listener ?
if old_address != self.cfg.address:
# close all listeners
[l.close() for l in self.LISTENERS]
# init new listeners
self.LISTENERS = create_sockets(self.cfg, self.log)
self.log.info("Listening at: %s", ",".join(str(self.LISTENERS)))
# do some actions on reload
self.cfg.on_reload(self)
# unlink pidfile
if self.pidfile is not None:
self.pidfile.unlink()
# create new pidfile
if self.cfg.pidfile is not None:
self.pidfile = Pidfile(self.cfg.pidfile)
self.pidfile.create(self.pid)
# set new proc_name
util._setproctitle("master [%s]" % self.proc_name)
# spawn new workers
for i in range(self.cfg.workers):
self.spawn_worker()
# manage workers
self.manage_workers()
def murder_workers(self):
"""\
Kill unused/idle workers
"""
if not self.timeout:
return
for (pid, worker) in self.WORKERS.items():
try:
if time.time() - worker.tmp.last_update() <= self.timeout:
continue
except ValueError:
continue
self.log.critical("WORKER TIMEOUT (pid:%s)", pid)
self.kill_worker(pid, signal.SIGKILL)
def reap_workers(self):
"""\
Reap workers to avoid zombie processes
"""
try:
while True:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if self.reexec_pid == wpid:
self.reexec_pid = 0
else:
# A worker said it cannot boot. We'll shutdown
# to avoid infinite start/stop cycles.
exitcode = status >> 8
if exitcode == self.WORKER_BOOT_ERROR:
reason = "Worker failed to boot."
raise HaltServer(reason, self.WORKER_BOOT_ERROR)
if exitcode == self.APP_LOAD_ERROR:
reason = "App failed to load."
raise HaltServer(reason, self.APP_LOAD_ERROR)
worker = self.WORKERS.pop(wpid, None)
if not worker:
continue
worker.tmp.close()
except OSError as e:
if e.errno == errno.ECHILD:
pass
def manage_workers(self):
"""\
Maintain the number of workers by spawning or killing
as required.
"""
if len(self.WORKERS.keys()) < self.num_workers:
self.spawn_workers()
workers = self.WORKERS.items()
workers = sorted(workers, key=lambda w: w[1].age)
while len(workers) > self.num_workers:
(pid, _) = workers.pop(0)
self.kill_worker(pid, signal.SIGQUIT)
def spawn_worker(self):
self.worker_age += 1
worker = self.worker_class(self.worker_age, self.pid, self.LISTENERS,
self.app, self.timeout / 2.0,
self.cfg, self.log)
self.cfg.pre_fork(self, worker)
pid = os.fork()
if pid != 0:
self.WORKERS[pid] = worker
return pid
# Process Child
worker_pid = os.getpid()
try:
util._setproctitle("worker [%s]" % self.proc_name)
self.log.info("Booting worker with pid: %s", worker_pid)
self.cfg.post_fork(self, worker)
worker.init_process()
sys.exit(0)
except SystemExit:
raise
except AppImportError as e:
self.log.debug("Exception while loading the application: \n%s",
traceback.format_exc())
sys.stderr.write("%s\n" % e)
sys.stderr.flush()
sys.exit(self.APP_LOAD_ERROR)
except:
self.log.exception("Exception in worker process:\n%s",
traceback.format_exc())
if not worker.booted:
sys.exit(self.WORKER_BOOT_ERROR)
sys.exit(-1)
finally:
self.log.info("Worker exiting (pid: %s)", worker_pid)
try:
worker.tmp.close()
self.cfg.worker_exit(self, worker)
except:
pass
def spawn_workers(self):
"""\
Spawn new workers as needed.
This is where a worker process leaves the main loop
of the master process.
"""
for i in range(self.num_workers - len(self.WORKERS.keys())):
self.spawn_worker()
time.sleep(0.1 * random.random())
def kill_workers(self, sig):
"""\
Kill all workers with the signal `sig`
:attr sig: `signal.SIG*` value
"""
for pid in self.WORKERS.keys():
self.kill_worker(pid, sig)
def kill_worker(self, pid, sig):
"""\
Kill a worker
:attr pid: int, worker pid
:attr sig: `signal.SIG*` value
"""
try:
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH:
try:
worker = self.WORKERS.pop(pid)
worker.tmp.close()
self.cfg.worker_exit(self, worker)
return
except (KeyError, OSError):
return
raise
| mit | 1,040,810,405,387,226,100 | 29.550179 | 77 | 0.526486 | false |
nikesh-mahalka/cinder | cinder/api/views/cgsnapshots.py | 23 | 2446 | # Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from cinder.api import common
LOG = logging.getLogger(__name__)
class ViewBuilder(common.ViewBuilder):
"""Model cgsnapshot API responses as a python dictionary."""
_collection_name = "cgsnapshots"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, cgsnapshots):
"""Show a list of cgsnapshots without many details."""
return self._list_view(self.summary, request, cgsnapshots)
def detail_list(self, request, cgsnapshots):
"""Detailed view of a list of cgsnapshots ."""
return self._list_view(self.detail, request, cgsnapshots)
def summary(self, request, cgsnapshot):
"""Generic, non-detailed view of a cgsnapshot."""
return {
'cgsnapshot': {
'id': cgsnapshot['id'],
'name': cgsnapshot['name']
}
}
def detail(self, request, cgsnapshot):
"""Detailed view of a single cgsnapshot."""
return {
'cgsnapshot': {
'id': cgsnapshot.get('id'),
'consistencygroup_id': cgsnapshot.get('consistencygroup_id'),
'status': cgsnapshot.get('status'),
'created_at': cgsnapshot.get('created_at'),
'name': cgsnapshot.get('name'),
'description': cgsnapshot.get('description')
}
}
def _list_view(self, func, request, cgsnapshots):
"""Provide a view for a list of cgsnapshots."""
cgsnapshots_list = [func(request, cgsnapshot)['cgsnapshot']
for cgsnapshot in cgsnapshots]
cgsnapshots_dict = dict(cgsnapshots=cgsnapshots_list)
return cgsnapshots_dict
| apache-2.0 | -1,952,304,181,464,326,000 | 34.449275 | 78 | 0.619379 | false |
vivianli32/TravelConnect | flask/lib/python3.4/site-packages/openid/extension.py | 13 | 1703 | import warnings
from openid import message as message_module
class Extension(object):
"""An interface for OpenID extensions.
@ivar ns_uri: The namespace to which to add the arguments for this
extension
"""
ns_uri = None
ns_alias = None
def getExtensionArgs(self):
"""Get the string arguments that should be added to an OpenID
message for this extension.
@returns: A dictionary of completely non-namespaced arguments
to be added. For example, if the extension's alias is
'uncle', and this method returns {'meat':'Hot Rats'}, the
final message will contain {'openid.uncle.meat':'Hot Rats'}
"""
raise NotImplementedError()
def toMessage(self, message=None):
"""Add the arguments from this extension to the provided
message, or create a new message containing only those
arguments.
@returns: The message with the extension arguments added
"""
if message is None:
warnings.warn(
'Passing None to Extension.toMessage is deprecated. '
'Creating a message assuming you want OpenID 2.',
DeprecationWarning, stacklevel=2)
message = message_module.Message(message_module.OPENID2_NS)
implicit = message.isOpenID1()
try:
message.namespaces.addAlias(self.ns_uri, self.ns_alias,
implicit=implicit)
except KeyError:
if message.namespaces.getAlias(self.ns_uri) != self.ns_alias:
raise
message.updateArgs(self.ns_uri, self.getExtensionArgs())
return message
| mit | -2,377,073,209,210,731,500 | 33.06 | 73 | 0.616559 | false |
the76thHunter/tmdbsimple | tmdbsimple/search.py | 8 | 6919 | # -*- coding: utf-8 -*-
"""
tmdbsimple.search
~~~~~~~~~~~~~~~~~
This module implements the Search functionality of tmdbsimple.
Created by Celia Oakley on 2013-10-31.
:copyright: (c) 2013-2014 by Celia Oakley
:license: GPLv3, see LICENSE for more details
"""
from .base import TMDB
class Search(TMDB):
"""
Search functionality
See: http://docs.themoviedb.apiary.io/#search
"""
BASE_PATH = 'search'
URLS = {
'movie': '/movie',
'collection': '/collection',
'tv': '/tv',
'person': '/person',
'list': '/list',
'company': '/company',
'keyword': '/keyword',
'multi': '/multi'
}
def movie(self, **kwargs):
"""
Search for movies by title.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
include_adult: (optional) Toggle the inclusion of adult titles.
Expected value is True or False.
year: (optional) Filter the results release dates to matches that
include this value.
primary_release_year: (optional) Filter the results so that only
the primary release dates have this value.
search_type: (optional) By default, the search type is 'phrase'.
This is almost guaranteed the option you will want.
It's a great all purpose search type and by far the
most tuned for every day querying. For those wanting
more of an "autocomplete" type search, set this
option to 'ngram'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('movie')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def collection(self, **kwargs):
"""
Search for collections by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('collection')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def tv(self, **kwargs):
"""
Search for TV shows by title.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
first_air_date_year: (optional) Filter the results to only match
shows that have a air date with with value.
search_type: (optional) By default, the search type is 'phrase'.
This is almost guaranteed the option you will want.
It's a great all purpose search type and by far the
most tuned for every day querying. For those wanting
more of an "autocomplete" type search, set this
option to 'ngram'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('tv')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def person(self, **kwargs):
"""
Search for people by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
include_adult: (optional) Toggle the inclusion of adult titles.
Expected value is True or False.
search_type: (optional) By default, the search type is 'phrase'.
This is almost guaranteed the option you will want.
It's a great all purpose search type and by far the
most tuned for every day querying. For those wanting
more of an "autocomplete" type search, set this
option to 'ngram'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('person')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def list(self, **kwargs):
"""
Search for lists by name and description.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
include_adult: (optional) Toggle the inclusion of adult titles.
Expected value is True or False.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('list')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def company(self, **kwargs):
"""
Search for companies by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('company')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def keyword(self, **kwargs):
"""
Search for keywords by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('keyword')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def multi(self, **kwargs):
"""
Search the movie, tv show and person collections with a single query.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
include_adult: (optional) Toggle the inclusion of adult titles.
Expected value is True or False.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('multi')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | gpl-3.0 | -1,946,622,739,095,508,700 | 32.921569 | 78 | 0.552247 | false |
MQQiang/kbengine | kbe/res/scripts/common/Lib/site-packages/setuptools/command/easy_install.py | 206 | 72706 | #!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://pythonhosted.org/setuptools/easy_install.html
"""
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import platform
import textwrap
import warnings
import site
import struct
from glob import glob
from distutils import log, dir_util
import pkg_resources
from setuptools import Command, _dont_write_bytecode
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import DistutilsArgError, DistutilsOptionError, \
DistutilsError, DistutilsPlatformError
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import PackageIndex
from setuptools.package_index import URL_SCHEME
from setuptools.command import bdist_egg, egg_info
from setuptools.compat import (iteritems, maxsize, basestring, unicode,
reraise)
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
sys_executable = os.environ.get('__VENV_LAUNCHER__',
os.path.normpath(sys.executable))
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if sys.version_info <= (3,):
def _to_ascii(s):
return s
def isascii(s):
try:
unicode(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=','S',"list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
if site.ENABLE_USER_SITE:
whereami = os.path.abspath(__file__)
self.user = whereami.startswith(site.USER_SITE)
else:
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
for filename in blockers:
if os.path.exists(filename) or os.path.islink(filename):
log.info("Deleting %s", filename)
if not self.dry_run:
if os.path.isdir(filename) and not os.path.islink(filename):
rmtree(filename)
else:
os.unlink(filename)
def finalize_options(self):
if self.version:
print('setuptools %s' % get_distribution('setuptools').version)
sys.exit()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
# fix the install_dir if "--user" was used
#XXX: duplicate of the code in the setup command
if self.user and site.ENABLE_USER_SITE:
self.create_home_path()
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
if os.name == 'posix':
self.select_scheme("unix_user")
else:
self.select_scheme(os.name + "_user")
self.expand_basedirs()
self.expand_dirs()
self._expand('install_dir','script_dir','build_directory','site_dirs')
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options('install_lib',
('install_dir','install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options('install_scripts',
('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d+" (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable: self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path = self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path+sys.path)
if self.find_links is not None:
if isinstance(self.find_links, basestring):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path+sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize','optimize'))
if not isinstance(self.optimize,int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2): raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data',])
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except:
pid = random.randint(0, maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir,'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname()+'.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists: os.unlink(testfile)
open(testfile,'w').close()
os.unlink(testfile)
except (OSError,IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH','').split(os.pathsep)
if instdir not in map(normalize_path, [_f for _f in PYTHONPATH if _f]):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
def cant_write_to_target(self):
template = """can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
"""
msg = template % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += """
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
"""
else:
msg += """
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://pythonhosted.org/setuptools/easy_install.html
Please make the appropriate changes for your system and try again.
"""
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname()+".pth"
ok_file = pth_file+'.ok'
ok_exists = os.path.exists(ok_file)
try:
if ok_exists: os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file,'w')
except (OSError,IOError):
self.cant_write_to_target()
else:
try:
f.write("import os; f = open(%r, 'w'); f.write('OK'); f.close()\n" % (ok_file,))
f.close()
f=None
executable = sys.executable
if os.name=='nt':
dirname,basename = os.path.split(executable)
alt = os.path.join(dirname,'pythonw.exe')
if basename.lower()=='python.exe' and os.path.exists(alt):
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable,'-E','-c','pass'],0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/'+script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base,filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self,spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
def easy_install(self, spec, deps=False):
tmpdir = tempfile.mkdtemp(prefix="easy_install-")
download = None
if not self.editable: self.install_site_py()
try:
if not isinstance(spec,Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
download = self.package_index.download(spec, tmpdir)
return self.install_item(None, download, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable, not self.always_copy,
self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg+=" (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence==DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location==download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = requirement or distreq
requirement = Requirement(
distreq.project_name, distreq.specs, requirement.extras
)
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound:
e = sys.exc_info()[1]
raise DistutilsError(
"Could not find required distribution %s" % e.args
)
except VersionConflict:
e = sys.exc_info()[1]
raise DistutilsError(
"Installed distribution %s conflicts with requirement %s"
% e.args
)
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = "%r already exists in %s; build directory %s will not be kept"
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename)==setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents)==1:
dist_filename = os.path.join(setup_base,contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if not self.exclude_scripts:
for args in get_script_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
def get_template(filename):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
These templates use triple-quotes to escape variable
substitutions so the scripts get the 2to3 treatment when build
on Python 3. The templates cannot use triple-quotes naturally.
"""
raw_bytes = resource_string('setuptools', template_name)
template_str = raw_bytes.decode('utf-8')
clean_template = template_str.replace('"""', '')
return clean_template
if is_script:
template_name = 'script template.py'
if dev_path:
template_name = template_name.replace('.py', ' (dev).py')
script_text = (get_script_header(script_text) +
get_template(template_name) % locals())
self.write_script(script_name, _to_ascii(script_text), 'b')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir,x) for x in blockers])
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target, 0x1FF-mask) # 0777
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" % os.path.abspath(dist_filename)
)
if len(setups)>1:
raise DistutilsError(
"Multiple setup scripts in %s" % os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path,os.path.join(egg_path,'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path,metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(self.install_dir,os.path.basename(egg_path))
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(os.unlink,(destination,),"Removing "+destination)
uncache_zipdir(destination)
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f,m = shutil.move, "Moving"
else:
f,m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f,m = self.unpack_and_compile, "Extracting"
elif egg_path.startswith(tmpdir):
f,m = shutil.move, "Moving"
else:
f,m = shutil.copy2, "Copying"
self.execute(f, (egg_path, destination),
(m+" %s to %s") %
(os.path.basename(egg_path),os.path.dirname(destination)))
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata','name'),
version=cfg.get('metadata','version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = dist.location = os.path.join(tmpdir, dist.egg_name()+'.egg')
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf,'w')
f.write('Metadata-Version: 1.0\n')
for k,v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_','-').title(), v))
f.close()
script_dir = os.path.join(_egg_info,'scripts')
self.delete_blockers( # delete entry-point scripts to avoid duping
[os.path.join(script_dir,args[0]) for args in get_script_args(dist)]
)
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src,dst):
s = src.lower()
for old,new in prefixes:
if s.startswith(old):
src = new+src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old!='SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1])+'.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(os.path.join(egg_tmp,'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level','native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name+'.txt')
if not os.path.exists(txt):
f = open(txt,'w')
f.write('\n'.join(locals()[name])+'\n')
f.close()
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += """
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
"""
if self.install_dir not in map(normalize_path,sys.path):
msg += """
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
"""
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return """\nExtracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""" % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose>2:
v = 'v' * (self.verbose - 1)
args.insert(0,'-'+v)
elif self.verbose<2:
args.insert(0,'-q')
if self.dry_run:
args.insert(0,'-n')
log.info(
"Running %s %s", setup_script[len(setup_base)+1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit:
v = sys.exc_info()[1]
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives: continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key=='setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir,'setuptools.pth')
if os.path.islink(filename): os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location)+'\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src,dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0x16D) & 0xFED # 0555, 07755
chmod(f, mode)
def byte_compile(self, to_compile):
if _dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
def no_default_version_msg(self):
template = """bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again."""
return template % (self.install_dir, os.environ.get('PYTHONPATH',''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
f = open(sitepy,'rb')
current = f.read()
# we want str, not bytes
if sys.version_info >= (3,):
current = current.decode()
f.close()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy,'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0700)" % path)
os.makedirs(path, 0x1C0) # 0700
INSTALL_SCHEMES = dict(
posix = dict(
install_dir = '$base/lib/python$py_version_short/site-packages',
script_dir = '$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir = '$base/Lib/site-packages',
script_dir = '$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name,self.DEFAULT_SCHEME)
for attr,val in scheme.items():
if getattr(self,attr,None) is None:
setattr(self,attr,val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")])
else:
sitedirs.extend(
[prefix, os.path.join(prefix, "lib", "site-packages")]
)
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs: sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth','setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname,name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a ConfigParser.RawConfigParser, or None
"""
f = open(dist_filename,'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended-12)
from setuptools.compat import StringIO, ConfigParser
import struct
tag, cfglen, bmlen = struct.unpack("<iii",f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended-(12+cfglen))
cfg = ConfigParser.RawConfigParser({'version':'','target_version':''})
try:
part = f.read(cfglen)
# part is in bytes, but we need to read up to the first null
# byte.
if sys.version_info >= (2,6):
null_byte = bytes([0])
else:
null_byte = chr(0)
config = part.split(null_byte, 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(StringIO(config))
except ConfigParser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts)==3 and parts[2]=='PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0,('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB','PLATLIB'):
contents = z.read(name)
if sys.version_info >= (3,):
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\','/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0],pth)), ''))
finally:
z.close()
prefixes = [(x.lower(),y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename,'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir,path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
data = '\n'.join(map(self.make_relative,self.paths))
if data:
log.debug("Saving %s", self.filename)
data = (
"import sys; sys.__plen = len(sys.path)\n"
"%s\n"
"import sys; new=sys.path[sys.__plen:];"
" del sys.path[sys.__plen:];"
" p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;"
" sys.__egginsert = p+len(new)\n"
) % data
if os.path.islink(self.filename):
os.unlink(self.filename)
f = open(self.filename,'wt')
f.write(data)
f.close()
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
def add(self, dist):
"""Add `dist` to the distribution map"""
if (dist.location not in self.paths and (
dist.location not in self.sitedirs or
dist.location == os.getcwd() # account for '.' being in PYTHONPATH
)):
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self,path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep=='/' and '/' or os.sep
while len(npath)>=baselen:
if npath==self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
def get_script_header(script_text, executable=sys_executable, wininst=False):
"""Create a #! line, getting options (if any) from script_text"""
from distutils.command.build_scripts import first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
if not isinstance(first_line_re.pattern, str):
first_line_re = re.compile(first_line_re.pattern.decode())
first = (script_text+'\n').splitlines()[0]
match = first_line_re.match(first)
options = ''
if match:
options = match.group(1) or ''
if options: options = ' '+options
if wininst:
executable = "python.exe"
else:
executable = nt_quote_arg(executable)
hdr = "#!%(executable)s%(options)s\n" % locals()
if not isascii(hdr):
# Non-ascii path to sys.executable, use -x to prevent warnings
if options:
if options.strip().startswith('-'):
options = ' -x'+options.strip()[1:]
# else: punt, we can't do it, let the warning happen anyway
else:
options = ' -x'
executable = fix_jython_executable(executable, options)
hdr = "#!%(executable)s%(options)s\n" % locals()
return hdr
def auto_chmod(func, arg, exc):
if func is os.remove and os.name=='nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
reraise(et, (ev[0], ev[1] + (" %s %s" % (func,arg))))
def uncache_zipdir(path):
"""Ensure that the importer caches dont have stale info for `path`"""
from zipimport import _zip_directory_cache as zdc
_uncache(path, zdc)
_uncache(path, sys.path_importer_cache)
def _uncache(path, cache):
if path in cache:
del cache[path]
else:
path = normalize_path(path)
for p in cache:
if normalize_path(p)==path:
del cache[p]
return
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
fp = open(executable)
magic = fp.read(2)
fp.close()
except (OSError,IOError): return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
result = []
needquote = False
nb = 0
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
nb += 1
elif c == '"':
# double preceding backslashes, then add a \"
result.append('\\' * (nb*2) + '\\"')
nb = 0
else:
if nb:
result.append('\\' * nb)
nb = 0
result.append(c)
if nb:
result.append('\\' * nb)
if needquote:
result.append('\\' * nb) # double the trailing backslashes
result.append('"')
return ''.join(result)
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args): pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error:
e = sys.exc_info()[1]
log.debug("chmod failed: %s", e)
def fix_jython_executable(executable, options):
if sys.platform.startswith('java') and is_sh(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty("os.name") == "Linux":
return executable
# Workaround Jython's sys.executable being a .sh (an invalid
# shebang line interpreter)
if options:
# Can't apply the workaround, leave it broken
log.warn(
"WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
else:
return '/usr/bin/env %s' % executable
return executable
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
@classmethod
def get_script_args(cls, dist, executable=sys_executable, wininst=False):
"""
Yield write_script() argument tuples for a distribution's entrypoints
"""
gen_class = cls.get_writer(wininst)
spec = str(dist.as_requirement())
header = get_script_header("", executable, wininst)
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
script_text = gen_class.template % locals()
for res in gen_class._get_script_args(type_, name, header,
script_text):
yield res
@classmethod
def get_writer(cls, force_windows):
if force_windows or sys.platform=='win32':
return WindowsScriptWriter.get_writer()
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header+script_text)
class WindowsScriptWriter(ScriptWriter):
@classmethod
def get_writer(cls):
"""
Get a script writer suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
warnings.warn("%s not listed in PATHEXT; scripts will not be "
"recognized as executables." % ext, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name+x for x in old]
yield name+ext, header+script_text, 't', blockers
@staticmethod
def _adjust_header(type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
clean_header = new_header[2:-1].strip('"')
if sys.platform == 'win32' and not os.path.exists(clean_header):
# the adjusted version doesn't exist, so return the original
return orig_header
return new_header
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_=='gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py','.pyc','.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name+x for x in old]
yield (name+ext, hdr+script_text, 't', blockers)
yield (
name+'.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if platform.machine().lower()=='arm':
launcher_fn = launcher_fn.replace(".", "-arm.")
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if sys.version_info[0] < 3:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
"""Recursively delete a directory tree.
This code is taken from the Python 2.4 version of 'shutil', because
the 2.3 version doesn't really work right.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def current_umask():
tmp = os.umask(0x12) # 022
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
import distutils.core
USAGE = """\
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
"""
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
def with_ei_usage(f):
old_gen_usage = distutils.core.gen_usage
try:
distutils.core.gen_usage = gen_usage
return f()
finally:
distutils.core.gen_usage = old_gen_usage
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self,*args,**kw):
with_ei_usage(lambda: Distribution._show_help(self,*args,**kw))
if argv is None:
argv = sys.argv[1:]
with_ei_usage(lambda:
setup(
script_args = ['-q','easy_install', '-v']+argv,
script_name = sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands, **kw
)
)
| lgpl-3.0 | -5,834,135,800,087,649,000 | 36.96658 | 96 | 0.564438 | false |
jquacinella/IS602_Project | web/gluon/rocket.py | 19 | 58903 | # -*- coding: utf-8 -*-
# This file is part of the Rocket Web Server
# Copyright (c) 2011 Timothy Farrell
# Modified by Massimo Di Pierro
# Import System Modules
import sys
import errno
import socket
import logging
import platform
# Define Constants
VERSION = '1.2.6'
SERVER_NAME = socket.gethostname()
SERVER_SOFTWARE = 'Rocket %s' % VERSION
HTTP_SERVER_SOFTWARE = '%s Python/%s' % (
SERVER_SOFTWARE, sys.version.split(' ')[0])
BUF_SIZE = 16384
SOCKET_TIMEOUT = 10 # in secs
THREAD_STOP_CHECK_INTERVAL = 1 # in secs, How often should threads check for a server stop message?
IS_JYTHON = platform.system() == 'Java' # Handle special cases for Jython
IGNORE_ERRORS_ON_CLOSE = set([errno.ECONNABORTED, errno.ECONNRESET])
DEFAULT_LISTEN_QUEUE_SIZE = 5
DEFAULT_MIN_THREADS = 10
DEFAULT_MAX_THREADS = 0
DEFAULTS = dict(LISTEN_QUEUE_SIZE=DEFAULT_LISTEN_QUEUE_SIZE,
MIN_THREADS=DEFAULT_MIN_THREADS,
MAX_THREADS=DEFAULT_MAX_THREADS)
PY3K = sys.version_info[0] > 2
class NullHandler(logging.Handler):
"A Logging handler to prevent library errors."
def emit(self, record):
pass
if PY3K:
def b(val):
""" Convert string/unicode/bytes literals into bytes. This allows for
the same code to run on Python 2.x and 3.x. """
if isinstance(val, str):
return val.encode()
else:
return val
def u(val, encoding="us-ascii"):
""" Convert bytes into string/unicode. This allows for the
same code to run on Python 2.x and 3.x. """
if isinstance(val, bytes):
return val.decode(encoding)
else:
return val
else:
def b(val):
""" Convert string/unicode/bytes literals into bytes. This allows for
the same code to run on Python 2.x and 3.x. """
if isinstance(val, unicode):
return val.encode()
else:
return val
def u(val, encoding="us-ascii"):
""" Convert bytes into string/unicode. This allows for the
same code to run on Python 2.x and 3.x. """
if isinstance(val, str):
return val.decode(encoding)
else:
return val
# Import Package Modules
# package imports removed in monolithic build
__all__ = ['VERSION', 'SERVER_SOFTWARE', 'HTTP_SERVER_SOFTWARE', 'BUF_SIZE',
'IS_JYTHON', 'IGNORE_ERRORS_ON_CLOSE', 'DEFAULTS', 'PY3K', 'b', 'u',
'Rocket', 'CherryPyWSGIServer', 'SERVER_NAME', 'NullHandler']
# Monolithic build...end of module: rocket/__init__.py
# Monolithic build...start of module: rocket/connection.py
# Import System Modules
import sys
import time
import socket
try:
import ssl
has_ssl = True
except ImportError:
has_ssl = False
# Import Package Modules
# package imports removed in monolithic build
# TODO - This part is still very experimental.
#from .filelike import FileLikeSocket
class Connection(object):
__slots__ = [
'setblocking',
'sendall',
'shutdown',
'makefile',
'fileno',
'client_addr',
'client_port',
'server_port',
'socket',
'start_time',
'ssl',
'secure',
'recv',
'send',
'read',
'write'
]
def __init__(self, sock_tuple, port, secure=False):
self.client_addr, self.client_port = sock_tuple[1][:2]
self.server_port = port
self.socket = sock_tuple[0]
self.start_time = time.time()
self.ssl = has_ssl and isinstance(self.socket, ssl.SSLSocket)
self.secure = secure
if IS_JYTHON:
# In Jython we must set TCP_NODELAY here since it does not
# inherit from the listening socket.
# See: http://bugs.jython.org/issue1309
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.settimeout(SOCKET_TIMEOUT)
self.shutdown = self.socket.shutdown
self.fileno = self.socket.fileno
self.setblocking = self.socket.setblocking
self.recv = self.socket.recv
self.send = self.socket.send
self.makefile = self.socket.makefile
if sys.platform == 'darwin':
self.sendall = self._sendall_darwin
else:
self.sendall = self.socket.sendall
def _sendall_darwin(self, buf):
pending = len(buf)
offset = 0
while pending:
try:
sent = self.socket.send(buf[offset:])
pending -= sent
offset += sent
except socket.error:
import errno
info = sys.exc_info()
if info[1].args[0] != errno.EAGAIN:
raise
return offset
# FIXME - this is not ready for prime-time yet.
# def makefile(self, buf_size=BUF_SIZE):
# return FileLikeSocket(self, buf_size)
def close(self):
if hasattr(self.socket, '_sock'):
try:
self.socket._sock.close()
except socket.error:
info = sys.exc_info()
if info[1].args[0] != socket.EBADF:
raise info[1]
else:
pass
self.socket.close()
# Monolithic build...end of module: rocket/connection.py
# Monolithic build...start of module: rocket/filelike.py
# Import System Modules
import socket
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Import Package Modules
# package imports removed in monolithic build
class FileLikeSocket(object):
def __init__(self, conn, buf_size=BUF_SIZE):
self.conn = conn
self.buf_size = buf_size
self.buffer = StringIO()
self.content_length = None
if self.conn.socket.gettimeout() == 0.0:
self.read = self.non_blocking_read
else:
self.read = self.blocking_read
def __iter__(self):
return self
def recv(self, size):
while True:
try:
return self.conn.recv(size)
except socket.error:
exc = sys.exc_info()
e = exc[1]
# FIXME - Don't raise socket_errors_nonblocking or socket_error_eintr
if (e.args[0] not in set()):
raise
def next(self):
data = self.readline()
if data == '':
raise StopIteration
return data
def non_blocking_read(self, size=None):
# Shamelessly adapted from Cherrypy!
bufr = self.buffer
bufr.seek(0, 2)
if size is None:
while True:
data = self.recv(self.buf_size)
if not data:
break
bufr.write(data)
self.buffer = StringIO()
return bufr.getvalue()
else:
buf_len = self.buffer.tell()
if buf_len >= size:
bufr.seek(0)
data = bufr.read(size)
self.buffer = StringIO(bufr.read())
return data
self.buffer = StringIO()
while True:
remaining = size - buf_len
data = self.recv(remaining)
if not data:
break
n = len(data)
if n == size and not buf_len:
return data
if n == remaining:
bufr.write(data)
del data
break
bufr.write(data)
buf_len += n
del data
return bufr.getvalue()
def blocking_read(self, length=None):
if length is None:
if self.content_length is not None:
length = self.content_length
else:
length = 1
try:
data = self.conn.recv(length)
except:
data = b('')
return data
def readline(self):
data = b("")
char = self.read(1)
while char != b('\n') and char is not b(''):
line = repr(char)
data += char
char = self.read(1)
data += char
return data
def readlines(self, hint="ignored"):
return list(self)
def close(self):
self.conn = None
self.content_length = None
# Monolithic build...end of module: rocket/filelike.py
# Monolithic build...start of module: rocket/futures.py
# Import System Modules
import time
try:
from concurrent.futures import Future, ThreadPoolExecutor
from concurrent.futures.thread import _WorkItem
has_futures = True
except ImportError:
has_futures = False
class Future:
pass
class ThreadPoolExecutor:
pass
class _WorkItem:
pass
class WSGIFuture(Future):
def __init__(self, f_dict, *args, **kwargs):
Future.__init__(self, *args, **kwargs)
self.timeout = None
self._mem_dict = f_dict
self._lifespan = 30
self._name = None
self._start_time = time.time()
def set_running_or_notify_cancel(self):
if time.time() - self._start_time >= self._lifespan:
self.cancel()
else:
return super(WSGIFuture, self).set_running_or_notify_cancel()
def remember(self, name, lifespan=None):
self._lifespan = lifespan or self._lifespan
if name in self._mem_dict:
raise NameError('Cannot remember future by name "%s". ' % name +
'A future already exists with that name.')
self._name = name
self._mem_dict[name] = self
return self
def forget(self):
if self._name in self._mem_dict and self._mem_dict[self._name] is self:
del self._mem_dict[self._name]
self._name = None
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException:
e = sys.exc_info()[1]
self.future.set_exception(e)
else:
self.future.set_result(result)
class WSGIExecutor(ThreadPoolExecutor):
multithread = True
multiprocess = False
def __init__(self, *args, **kwargs):
ThreadPoolExecutor.__init__(self, *args, **kwargs)
self.futures = dict()
def submit(self, fn, *args, **kwargs):
if self._shutdown_lock.acquire():
if self._shutdown:
self._shutdown_lock.release()
raise RuntimeError(
'Cannot schedule new futures after shutdown')
f = WSGIFuture(self.futures)
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
self._shutdown_lock.release()
return f
else:
return False
class FuturesMiddleware(object):
"Futures middleware that adds a Futures Executor to the environment"
def __init__(self, app, threads=5):
self.app = app
self.executor = WSGIExecutor(threads)
def __call__(self, environ, start_response):
environ["wsgiorg.executor"] = self.executor
environ["wsgiorg.futures"] = self.executor.futures
return self.app(environ, start_response)
# Monolithic build...end of module: rocket/futures.py
# Monolithic build...start of module: rocket/listener.py
# Import System Modules
import os
import socket
import logging
import traceback
from threading import Thread
try:
import ssl
from ssl import SSLError
has_ssl = True
except ImportError:
has_ssl = False
class SSLError(socket.error):
pass
# Import Package Modules
# package imports removed in monolithic build
class Listener(Thread):
"""The Listener class is a class responsible for accepting connections
and queuing them to be processed by a worker thread."""
def __init__(self, interface, queue_size, active_queue, *args, **kwargs):
Thread.__init__(self, *args, **kwargs)
# Instance variables
self.active_queue = active_queue
self.interface = interface
self.addr = interface[0]
self.port = interface[1]
self.secure = len(interface) >= 4
self.clientcert_req = (len(interface) == 5 and interface[4])
self.thread = None
self.ready = False
# Error Log
self.err_log = logging.getLogger('Rocket.Errors.Port%i' % self.port)
self.err_log.addHandler(NullHandler())
# Build the socket
if ':' in self.addr:
listener = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not listener:
self.err_log.error("Failed to get socket.")
return
if self.secure:
if not has_ssl:
self.err_log.error("ssl module required to serve HTTPS.")
return
elif not os.path.exists(interface[2]):
data = (interface[2], interface[0], interface[1])
self.err_log.error("Cannot find key file "
"'%s'. Cannot bind to %s:%s" % data)
return
elif not os.path.exists(interface[3]):
data = (interface[3], interface[0], interface[1])
self.err_log.error("Cannot find certificate file "
"'%s'. Cannot bind to %s:%s" % data)
return
if self.clientcert_req and not os.path.exists(interface[4]):
data = (interface[4], interface[0], interface[1])
self.err_log.error("Cannot find root ca certificate file "
"'%s'. Cannot bind to %s:%s" % data)
return
# Set socket options
try:
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except:
msg = "Cannot share socket. Using %s:%i exclusively."
self.err_log.warning(msg % (self.addr, self.port))
try:
if not IS_JYTHON:
listener.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY,
1)
except:
msg = "Cannot set TCP_NODELAY, things might run a little slower"
self.err_log.warning(msg)
try:
listener.bind((self.addr, self.port))
except:
msg = "Socket %s:%i in use by other process and it won't share."
self.err_log.error(msg % (self.addr, self.port))
else:
# We want socket operations to timeout periodically so we can
# check if the server is shutting down
listener.settimeout(THREAD_STOP_CHECK_INTERVAL)
# Listen for new connections allowing queue_size number of
# connections to wait before rejecting a connection.
listener.listen(queue_size)
self.listener = listener
self.ready = True
def wrap_socket(self, sock):
try:
if self.clientcert_req:
ca_certs = self.interface[4]
cert_reqs = ssl.CERT_OPTIONAL
sock = ssl.wrap_socket(sock,
keyfile=self.interface[2],
certfile=self.interface[3],
server_side=True,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
ssl_version=ssl.PROTOCOL_SSLv23)
else:
sock = ssl.wrap_socket(sock,
keyfile=self.interface[2],
certfile=self.interface[3],
server_side=True,
ssl_version=ssl.PROTOCOL_SSLv23)
except SSLError:
# Generally this happens when an HTTP request is received on a
# secure socket. We don't do anything because it will be detected
# by Worker and dealt with appropriately.
pass
return sock
def start(self):
if not self.ready:
self.err_log.warning('Listener started when not ready.')
return
if self.thread is not None and self.thread.isAlive():
self.err_log.warning('Listener already running.')
return
self.thread = Thread(target=self.listen, name="Port" + str(self.port))
self.thread.start()
def isAlive(self):
if self.thread is None:
return False
return self.thread.isAlive()
def join(self):
if self.thread is None:
return
self.ready = False
self.thread.join()
del self.thread
self.thread = None
self.ready = True
def listen(self):
if __debug__:
self.err_log.debug('Entering main loop.')
while True:
try:
sock, addr = self.listener.accept()
if self.secure:
sock = self.wrap_socket(sock)
self.active_queue.put(((sock, addr),
self.interface[1],
self.secure))
except socket.timeout:
# socket.timeout will be raised every
# THREAD_STOP_CHECK_INTERVAL seconds. When that happens,
# we check if it's time to die.
if not self.ready:
if __debug__:
self.err_log.debug('Listener exiting.')
return
else:
continue
except:
self.err_log.error(traceback.format_exc())
# Monolithic build...end of module: rocket/listener.py
# Monolithic build...start of module: rocket/main.py
# Import System Modules
import sys
import time
import socket
import logging
import traceback
from threading import Lock
try:
from queue import Queue
except ImportError:
from Queue import Queue
# Import Package Modules
# package imports removed in monolithic build
# Setup Logging
log = logging.getLogger('Rocket')
log.addHandler(NullHandler())
class Rocket(object):
"""The Rocket class is responsible for handling threads and accepting and
dispatching connections."""
def __init__(self,
interfaces=('127.0.0.1', 8000),
method='wsgi',
app_info=None,
min_threads=None,
max_threads=None,
queue_size=None,
timeout=600,
handle_signals=True):
self.handle_signals = handle_signals
self.startstop_lock = Lock()
self.timeout = timeout
if not isinstance(interfaces, list):
self.interfaces = [interfaces]
else:
self.interfaces = interfaces
if min_threads is None:
min_threads = DEFAULTS['MIN_THREADS']
if max_threads is None:
max_threads = DEFAULTS['MAX_THREADS']
if not queue_size:
if hasattr(socket, 'SOMAXCONN'):
queue_size = socket.SOMAXCONN
else:
queue_size = DEFAULTS['LISTEN_QUEUE_SIZE']
if max_threads and queue_size > max_threads:
queue_size = max_threads
if isinstance(app_info, dict):
app_info['server_software'] = SERVER_SOFTWARE
self.monitor_queue = Queue()
self.active_queue = Queue()
self._threadpool = ThreadPool(get_method(method),
app_info=app_info,
active_queue=self.active_queue,
monitor_queue=self.monitor_queue,
min_threads=min_threads,
max_threads=max_threads)
# Build our socket listeners
self.listeners = [Listener(
i, queue_size, self.active_queue) for i in self.interfaces]
for ndx in range(len(self.listeners) - 1, 0, -1):
if not self.listeners[ndx].ready:
del self.listeners[ndx]
if not self.listeners:
log.critical("No interfaces to listen on...closing.")
sys.exit(1)
def _sigterm(self, signum, frame):
log.info('Received SIGTERM')
self.stop()
def _sighup(self, signum, frame):
log.info('Received SIGHUP')
self.restart()
def start(self, background=False):
log.info('Starting %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Set up our shutdown signals
if self.handle_signals:
try:
import signal
signal.signal(signal.SIGTERM, self._sigterm)
signal.signal(signal.SIGUSR1, self._sighup)
except:
log.debug('This platform does not support signals.')
# Start our worker threads
self._threadpool.start()
# Start our monitor thread
self._monitor = Monitor(self.monitor_queue,
self.active_queue,
self.timeout,
self._threadpool)
self._monitor.setDaemon(True)
self._monitor.start()
# I know that EXPR and A or B is bad but I'm keeping it for Py2.4
# compatibility.
str_extract = lambda l: (l.addr, l.port, l.secure and '*' or '')
msg = 'Listening on sockets: '
msg += ', '.join(
['%s:%i%s' % str_extract(l) for l in self.listeners])
log.info(msg)
for l in self.listeners:
l.start()
finally:
self.startstop_lock.release()
if background:
return
while self._monitor.isAlive():
try:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
except KeyboardInterrupt:
# Capture a keyboard interrupt when running from a console
break
except:
if self._monitor.isAlive():
log.error(traceback.format_exc())
continue
return self.stop()
def stop(self, stoplogging=False):
log.info('Stopping %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Stop listeners
for l in self.listeners:
l.ready = False
# Encourage a context switch
time.sleep(0.01)
for l in self.listeners:
if l.isAlive():
l.join()
# Stop Monitor
self._monitor.stop()
if self._monitor.isAlive():
self._monitor.join()
# Stop Worker threads
self._threadpool.stop()
if stoplogging:
logging.shutdown()
msg = "Calling logging.shutdown() is now the responsibility of \
the application developer. Please update your \
applications to no longer call rocket.stop(True)"
try:
import warnings
raise warnings.DeprecationWarning(msg)
except ImportError:
raise RuntimeError(msg)
finally:
self.startstop_lock.release()
def restart(self):
self.stop()
self.start()
def CherryPyWSGIServer(bind_addr,
wsgi_app,
numthreads=10,
server_name=None,
max=-1,
request_queue_size=5,
timeout=10,
shutdown_timeout=5):
""" A Cherrypy wsgiserver-compatible wrapper. """
max_threads = max
if max_threads < 0:
max_threads = 0
return Rocket(bind_addr, 'wsgi', {'wsgi_app': wsgi_app},
min_threads=numthreads,
max_threads=max_threads,
queue_size=request_queue_size,
timeout=timeout)
# Monolithic build...end of module: rocket/main.py
# Monolithic build...start of module: rocket/monitor.py
# Import System Modules
import time
import logging
import select
from threading import Thread
# Import Package Modules
# package imports removed in monolithic build
class Monitor(Thread):
# Monitor worker class.
def __init__(self,
monitor_queue,
active_queue,
timeout,
threadpool,
*args,
**kwargs):
Thread.__init__(self, *args, **kwargs)
self._threadpool = threadpool
# Instance Variables
self.monitor_queue = monitor_queue
self.active_queue = active_queue
self.timeout = timeout
self.log = logging.getLogger('Rocket.Monitor')
self.log.addHandler(NullHandler())
self.connections = set()
self.active = False
def run(self):
self.active = True
conn_list = list()
list_changed = False
# We need to make sure the queue is empty before we start
while not self.monitor_queue.empty():
self.monitor_queue.get()
if __debug__:
self.log.debug('Entering monitor loop.')
# Enter thread main loop
while self.active:
# Move the queued connections to the selection pool
while not self.monitor_queue.empty():
if __debug__:
self.log.debug('In "receive timed-out connections" loop.')
c = self.monitor_queue.get()
if c is None:
# A non-client is a signal to die
if __debug__:
self.log.debug('Received a death threat.')
self.stop()
break
self.log.debug('Received a timed out connection.')
if __debug__:
assert(c not in self.connections)
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it.
c.setblocking(False)
if __debug__:
self.log.debug('Adding connection to monitor list.')
self.connections.add(c)
list_changed = True
# Wait on those connections
if list_changed:
conn_list = list(self.connections)
list_changed = False
try:
if len(conn_list):
readable = select.select(conn_list,
[],
[],
THREAD_STOP_CHECK_INTERVAL)[0]
else:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
readable = []
if not self.active:
break
# If we have any readable connections, put them back
for r in readable:
if __debug__:
self.log.debug('Restoring readable connection')
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it, but the rest of the code requires
# that it be in blocking mode.
r.setblocking(True)
r.start_time = time.time()
self.active_queue.put(r)
self.connections.remove(r)
list_changed = True
except:
if self.active:
raise
else:
break
# If we have any stale connections, kill them off.
if self.timeout:
now = time.time()
stale = set()
for c in self.connections:
if (now - c.start_time) >= self.timeout:
stale.add(c)
for c in stale:
if __debug__:
# "EXPR and A or B" kept for Py2.4 compatibility
data = (
c.client_addr, c.server_port, c.ssl and '*' or '')
self.log.debug(
'Flushing stale connection: %s:%i%s' % data)
self.connections.remove(c)
list_changed = True
try:
c.close()
finally:
del c
# Dynamically resize the threadpool to adapt to our changing needs.
self._threadpool.dynamic_resize()
def stop(self):
self.active = False
if __debug__:
self.log.debug('Flushing waiting connections')
while self.connections:
c = self.connections.pop()
try:
c.close()
finally:
del c
if __debug__:
self.log.debug('Flushing queued connections')
while not self.monitor_queue.empty():
c = self.monitor_queue.get()
if c is None:
continue
try:
c.close()
finally:
del c
# Place a None sentry value to cause the monitor to die.
self.monitor_queue.put(None)
# Monolithic build...end of module: rocket/monitor.py
# Monolithic build...start of module: rocket/threadpool.py
# Import System Modules
import logging
# Import Package Modules
# package imports removed in monolithic build
# Setup Logging
log = logging.getLogger('Rocket.Errors.ThreadPool')
log.addHandler(NullHandler())
class ThreadPool:
"""The ThreadPool class is a container class for all the worker threads. It
manages the number of actively running threads."""
def __init__(self,
method,
app_info,
active_queue,
monitor_queue,
min_threads=DEFAULTS['MIN_THREADS'],
max_threads=DEFAULTS['MAX_THREADS'],
):
if __debug__:
log.debug("Initializing ThreadPool.")
self.check_for_dead_threads = 0
self.active_queue = active_queue
self.worker_class = method
self.min_threads = min_threads
self.max_threads = max_threads
self.monitor_queue = monitor_queue
self.stop_server = False
self.alive = False
# TODO - Optimize this based on some real-world usage data
self.grow_threshold = int(max_threads / 10) + 2
if not isinstance(app_info, dict):
app_info = dict()
if has_futures and app_info.get('futures'):
app_info['executor'] = WSGIExecutor(max([DEFAULTS['MIN_THREADS'],
2]))
app_info.update(max_threads=max_threads,
min_threads=min_threads)
self.min_threads = min_threads
self.app_info = app_info
self.threads = set()
def start(self):
self.stop_server = False
if __debug__:
log.debug("Starting threads.")
self.grow(self.min_threads)
self.alive = True
def stop(self):
self.alive = False
if __debug__:
log.debug("Stopping threads.")
self.stop_server = True
# Prompt the threads to die
self.shrink(len(self.threads))
# Stop futures initially
if has_futures and self.app_info.get('futures'):
if __debug__:
log.debug("Future executor is present. Python will not "
"exit until all jobs have finished.")
self.app_info['executor'].shutdown(wait=False)
# Give them the gun
#active_threads = [t for t in self.threads if t.isAlive()]
#while active_threads:
# t = active_threads.pop()
# t.kill()
# Wait until they pull the trigger
for t in self.threads:
if t.isAlive():
t.join()
# Clean up the mess
self.bring_out_your_dead()
def bring_out_your_dead(self):
# Remove dead threads from the pool
dead_threads = [t for t in self.threads if not t.isAlive()]
for t in dead_threads:
if __debug__:
log.debug("Removing dead thread: %s." % t.getName())
try:
# Py2.4 complains here so we put it in a try block
self.threads.remove(t)
except:
pass
self.check_for_dead_threads -= len(dead_threads)
def grow(self, amount=None):
if self.stop_server:
return
if not amount:
amount = self.max_threads
if self.alive:
amount = min([amount, self.max_threads - len(self.threads)])
if __debug__:
log.debug("Growing by %i." % amount)
for x in range(amount):
worker = self.worker_class(self.app_info,
self.active_queue,
self.monitor_queue)
worker.setDaemon(True)
self.threads.add(worker)
worker.start()
def shrink(self, amount=1):
if __debug__:
log.debug("Shrinking by %i." % amount)
self.check_for_dead_threads += amount
for x in range(amount):
self.active_queue.put(None)
def dynamic_resize(self):
if (self.max_threads > self.min_threads or self.max_threads == 0):
if self.check_for_dead_threads > 0:
self.bring_out_your_dead()
queueSize = self.active_queue.qsize()
threadCount = len(self.threads)
if __debug__:
log.debug("Examining ThreadPool. %i threads and %i Q'd conxions"
% (threadCount, queueSize))
if queueSize == 0 and threadCount > self.min_threads:
self.shrink()
elif queueSize > self.grow_threshold:
self.grow(queueSize)
# Monolithic build...end of module: rocket/threadpool.py
# Monolithic build...start of module: rocket/worker.py
# Import System Modules
import re
import sys
import socket
import logging
import traceback
from wsgiref.headers import Headers
from threading import Thread
from datetime import datetime
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from ssl import SSLError
except ImportError:
class SSLError(socket.error):
pass
# Import Package Modules
# package imports removed in monolithic build
# Define Constants
re_SLASH = re.compile('%2F', re.IGNORECASE)
re_REQUEST_LINE = re.compile(r"""^
(?P<method>OPTIONS|GET|HEAD|POST|PUT|DELETE|TRACE|CONNECT) # Request Method
\ # (single space)
(
(?P<scheme>[^:/]+) # Scheme
(://) #
(?P<host>[^/]+) # Host
)? #
(?P<path>(\*|/[^ \?]*)) # Path
(\? (?P<query_string>[^ ]*))? # Query String
\ # (single space)
(?P<protocol>HTTPS?/1\.[01]) # Protocol
$
""", re.X)
LOG_LINE = '%(client_ip)s - "%(request_line)s" - %(status)s %(size)s'
RESPONSE = '''\
%s %s
Content-Length: %i
Content-Type: %s
%s
'''
if IS_JYTHON:
HTTP_METHODS = set(['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT',
'DELETE', 'TRACE', 'CONNECT'])
class Worker(Thread):
"""The Worker class is a base class responsible for receiving connections
and (a subclass) will run an application to process the the connection """
def __init__(self,
app_info,
active_queue,
monitor_queue,
*args,
**kwargs):
Thread.__init__(self, *args, **kwargs)
# Instance Variables
self.app_info = app_info
self.active_queue = active_queue
self.monitor_queue = monitor_queue
self.size = 0
self.status = "200 OK"
self.closeConnection = True
self.request_line = ""
self.protocol = 'HTTP/1.1'
# Request Log
self.req_log = logging.getLogger('Rocket.Requests')
self.req_log.addHandler(NullHandler())
# Error Log
self.err_log = logging.getLogger('Rocket.Errors.' + self.getName())
self.err_log.addHandler(NullHandler())
def _handleError(self, typ, val, tb):
if typ == SSLError:
if 'timed out' in str(val.args[0]):
typ = SocketTimeout
if typ == SocketTimeout:
if __debug__:
self.err_log.debug('Socket timed out')
self.monitor_queue.put(self.conn)
return True
if typ == SocketClosed:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client closed socket')
return False
if typ == BadRequest:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client sent a bad request')
return True
if typ == socket.error:
self.closeConnection = True
if val.args[0] in IGNORE_ERRORS_ON_CLOSE:
if __debug__:
self.err_log.debug('Ignorable socket Error received...'
'closing connection.')
return False
else:
self.status = "999 Utter Server Failure"
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('Unhandled Error when serving '
'connection:\n' + '\n'.join(tb_fmt))
return False
self.closeConnection = True
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('\n'.join(tb_fmt))
self.send_response('500 Server Error')
return False
def run(self):
if __debug__:
self.err_log.debug('Entering main loop.')
# Enter thread main loop
while True:
conn = self.active_queue.get()
if not conn:
# A non-client is a signal to die
if __debug__:
self.err_log.debug('Received a death threat.')
return conn
if isinstance(conn, tuple):
conn = Connection(*conn)
self.conn = conn
if conn.ssl != conn.secure:
self.err_log.info('Received HTTP connection on HTTPS port.')
self.send_response('400 Bad Request')
self.closeConnection = True
conn.close()
continue
else:
if __debug__:
self.err_log.debug('Received a connection.')
self.closeConnection = False
# Enter connection serve loop
while True:
if __debug__:
self.err_log.debug('Serving a request')
try:
self.run_app(conn)
except:
exc = sys.exc_info()
handled = self._handleError(*exc)
if handled:
break
finally:
if self.request_line:
log_info = dict(client_ip=conn.client_addr,
time=datetime.now().strftime('%c'),
status=self.status.split(' ')[0],
size=self.size,
request_line=self.request_line)
self.req_log.info(LOG_LINE % log_info)
if self.closeConnection:
try:
conn.close()
except:
self.err_log.error(str(traceback.format_exc()))
break
def run_app(self, conn):
# Must be overridden with a method reads the request from the socket
# and sends a response.
self.closeConnection = True
raise NotImplementedError('Overload this method!')
def send_response(self, status):
stat_msg = status.split(' ', 1)[1]
msg = RESPONSE % (self.protocol,
status,
len(stat_msg),
'text/plain',
stat_msg)
try:
self.conn.sendall(b(msg))
except socket.timeout:
self.closeConnection = True
msg = 'Tried to send "%s" to client but received timeout error'
self.err_log.error(msg % status)
except socket.error:
self.closeConnection = True
msg = 'Tried to send "%s" to client but received socket error'
self.err_log.error(msg % status)
def read_request_line(self, sock_file):
self.request_line = ''
try:
# Grab the request line
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
if d == '\r\n':
# Allow an extra NEWLINE at the beginning per HTTP 1.1 spec
if __debug__:
self.err_log.debug('Client sent newline')
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
except socket.timeout:
raise SocketTimeout('Socket timed out before request.')
except TypeError:
raise SocketClosed(
'SSL bug caused closure of socket. See '
'"https://groups.google.com/d/topic/web2py/P_Gw0JxWzCs".')
d = d.strip()
if not d:
if __debug__:
self.err_log.debug(
'Client did not send a recognizable request.')
raise SocketClosed('Client closed socket.')
self.request_line = d
# NOTE: I've replaced the traditional method of procedurally breaking
# apart the request line with a (rather unsightly) regular expression.
# However, Java's regexp support sucks so bad that it actually takes
# longer in Jython to process the regexp than procedurally. So I've
# left the old code here for Jython's sake...for now.
if IS_JYTHON:
return self._read_request_line_jython(d)
match = re_REQUEST_LINE.match(d)
if not match:
self.send_response('400 Bad Request')
raise BadRequest
req = match.groupdict()
for k, v in req.iteritems():
if not v:
req[k] = ""
if k == 'path':
req['path'] = r'%2F'.join(
[unquote(x) for x in re_SLASH.split(v)])
self.protocol = req['protocol']
return req
def _read_request_line_jython(self, d):
d = d.strip()
try:
method, uri, proto = d.split(' ')
if not proto.startswith('HTTP') or \
proto[-3:] not in ('1.0', '1.1') or \
method not in HTTP_METHODS:
self.send_response('400 Bad Request')
raise BadRequest
except ValueError:
self.send_response('400 Bad Request')
raise BadRequest
req = dict(method=method, protocol=proto)
scheme = ''
host = ''
if uri == '*' or uri.startswith('/'):
path = uri
elif '://' in uri:
scheme, rest = uri.split('://')
host, path = rest.split('/', 1)
path = '/' + path
else:
self.send_response('400 Bad Request')
raise BadRequest
query_string = ''
if '?' in path:
path, query_string = path.split('?', 1)
path = r'%2F'.join([unquote(x) for x in re_SLASH.split(path)])
req.update(path=path,
query_string=query_string,
scheme=scheme.lower(),
host=host)
return req
def read_headers(self, sock_file):
try:
headers = dict()
lname = None
lval = None
while True:
l = sock_file.readline()
if PY3K:
try:
l = str(l, 'ISO-8859-1')
except UnicodeDecodeError:
self.err_log.warning(
'Client sent invalid header: ' + repr(l))
if l.strip().replace('\0', '') == '':
break
if l[0] in ' \t' and lname:
# Some headers take more than one line
lval += ' ' + l.strip()
else:
# HTTP header values are latin-1 encoded
l = l.split(':', 1)
# HTTP header names are us-ascii encoded
lname = l[0].strip().upper().replace('-', '_')
lval = l[-1].strip()
headers[str(lname)] = str(lval)
except socket.timeout:
raise SocketTimeout("Socket timed out before request.")
return headers
class SocketTimeout(Exception):
"Exception for when a socket times out between requests."
pass
class BadRequest(Exception):
"Exception for when a client sends an incomprehensible request."
pass
class SocketClosed(Exception):
"Exception for when a socket is closed by the client."
pass
class ChunkedReader(object):
def __init__(self, sock_file):
self.stream = sock_file
self.chunk_size = 0
def _read_header(self):
chunk_len = ""
try:
while "" == chunk_len:
chunk_len = self.stream.readline().strip()
return int(chunk_len, 16)
except ValueError:
return 0
def read(self, size):
data = b('')
chunk_size = self.chunk_size
while size:
if not chunk_size:
chunk_size = self._read_header()
if size < chunk_size:
data += self.stream.read(size)
chunk_size -= size
break
else:
if not chunk_size:
break
data += self.stream.read(chunk_size)
size -= chunk_size
chunk_size = 0
self.chunk_size = chunk_size
return data
def readline(self):
data = b('')
c = self.read(1)
while c and c != b('\n'):
data += c
c = self.read(1)
data += c
return data
def readlines(self):
yield self.readline()
def get_method(method):
methods = dict(wsgi=WSGIWorker)
return methods[method.lower()]
# Monolithic build...end of module: rocket/worker.py
# Monolithic build...start of module: rocket/methods/__init__.py
# Monolithic build...end of module: rocket/methods/__init__.py
# Monolithic build...start of module: rocket/methods/wsgi.py
# Import System Modules
import sys
import socket
from wsgiref.headers import Headers
from wsgiref.util import FileWrapper
# Import Package Modules
# package imports removed in monolithic build
if PY3K:
from email.utils import formatdate
else:
# Caps Utils for Py2.4 compatibility
from email.Utils import formatdate
# Define Constants
NEWLINE = b('\r\n')
HEADER_RESPONSE = '''HTTP/1.1 %s\r\n%s'''
BASE_ENV = {'SERVER_NAME': SERVER_NAME,
'SCRIPT_NAME': '', # Direct call WSGI does not need a name
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'wsgi.file_wrapper': FileWrapper
}
class WSGIWorker(Worker):
def __init__(self, *args, **kwargs):
"""Builds some instance variables that will last the life of the
thread."""
Worker.__init__(self, *args, **kwargs)
if isinstance(self.app_info, dict):
multithreaded = self.app_info.get('max_threads') != 1
else:
multithreaded = False
self.base_environ = dict(
{'SERVER_SOFTWARE': self.app_info['server_software'],
'wsgi.multithread': multithreaded,
})
self.base_environ.update(BASE_ENV)
# Grab our application
self.app = self.app_info.get('wsgi_app')
if not hasattr(self.app, "__call__"):
raise TypeError("The wsgi_app specified (%s) is not a valid WSGI application." % repr(self.app))
# Enable futures
if has_futures and self.app_info.get('futures'):
executor = self.app_info['executor']
self.base_environ.update({"wsgiorg.executor": executor,
"wsgiorg.futures": executor.futures})
def build_environ(self, sock_file, conn):
""" Build the execution environment. """
# Grab the request line
request = self.read_request_line(sock_file)
# Copy the Base Environment
environ = self.base_environ.copy()
# Grab the headers
for k, v in self.read_headers(sock_file).iteritems():
environ[str('HTTP_' + k)] = v
# Add CGI Variables
environ['REQUEST_METHOD'] = request['method']
environ['PATH_INFO'] = request['path']
environ['SERVER_PROTOCOL'] = request['protocol']
environ['SERVER_PORT'] = str(conn.server_port)
environ['REMOTE_PORT'] = str(conn.client_port)
environ['REMOTE_ADDR'] = str(conn.client_addr)
environ['QUERY_STRING'] = request['query_string']
if 'HTTP_CONTENT_LENGTH' in environ:
environ['CONTENT_LENGTH'] = environ['HTTP_CONTENT_LENGTH']
if 'HTTP_CONTENT_TYPE' in environ:
environ['CONTENT_TYPE'] = environ['HTTP_CONTENT_TYPE']
# Save the request method for later
self.request_method = environ['REQUEST_METHOD']
# Add Dynamic WSGI Variables
if conn.ssl:
environ['wsgi.url_scheme'] = 'https'
environ['HTTPS'] = 'on'
try:
peercert = conn.socket.getpeercert(binary_form=True)
environ['SSL_CLIENT_RAW_CERT'] = \
peercert and ssl.DER_cert_to_PEM_cert(peercert)
except Exception:
print sys.exc_info()[1]
else:
environ['wsgi.url_scheme'] = 'http'
if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked':
environ['wsgi.input'] = ChunkedReader(sock_file)
else:
environ['wsgi.input'] = sock_file
return environ
def send_headers(self, data, sections):
h_set = self.header_set
# Does the app want us to send output chunked?
self.chunked = h_set.get('Transfer-Encoding', '').lower() == 'chunked'
# Add a Date header if it's not there already
if not 'Date' in h_set:
h_set['Date'] = formatdate(usegmt=True)
# Add a Server header if it's not there already
if not 'Server' in h_set:
h_set['Server'] = HTTP_SERVER_SOFTWARE
if 'Content-Length' in h_set:
self.size = int(h_set['Content-Length'])
else:
s = int(self.status.split(' ')[0])
if (s < 200 or s not in (204, 205, 304)) and not self.chunked:
if sections == 1 or self.protocol != 'HTTP/1.1':
# Add a Content-Length header because it's not there
self.size = len(data)
h_set['Content-Length'] = str(self.size)
else:
# If they sent us more than one section, we blow chunks
h_set['Transfer-Encoding'] = 'Chunked'
self.chunked = True
if __debug__:
self.err_log.debug('Adding header...'
'Transfer-Encoding: Chunked')
if 'Connection' not in h_set:
# If the application did not provide a connection header,
# fill it in
client_conn = self.environ.get('HTTP_CONNECTION', '').lower()
if self.environ['SERVER_PROTOCOL'] == 'HTTP/1.1':
# HTTP = 1.1 defaults to keep-alive connections
if client_conn:
h_set['Connection'] = client_conn
else:
h_set['Connection'] = 'keep-alive'
else:
# HTTP < 1.1 supports keep-alive but it's quirky
# so we don't support it
h_set['Connection'] = 'close'
# Close our connection if we need to.
self.closeConnection = h_set.get('Connection', '').lower() == 'close'
# Build our output headers
header_data = HEADER_RESPONSE % (self.status, str(h_set))
# Send the headers
if __debug__:
self.err_log.debug('Sending Headers: %s' % repr(header_data))
self.conn.sendall(b(header_data))
self.headers_sent = True
def write_warning(self, data, sections=None):
self.err_log.warning('WSGI app called write method directly. This is '
'deprecated behavior. Please update your app.')
return self.write(data, sections)
def write(self, data, sections=None):
""" Write the data to the output socket. """
if self.error[0]:
self.status = self.error[0]
data = b(self.error[1])
if not self.headers_sent:
self.send_headers(data, sections)
if self.request_method != 'HEAD':
try:
if self.chunked:
self.conn.sendall(b('%x\r\n%s\r\n' % (len(data), data)))
else:
self.conn.sendall(data)
except socket.timeout:
self.closeConnection = True
except socket.error:
# But some clients will close the connection before that
# resulting in a socket error.
self.closeConnection = True
def start_response(self, status, response_headers, exc_info=None):
""" Store the HTTP status and headers to be sent when self.write is
called. """
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
# because this violates WSGI specification.
raise
finally:
exc_info = None
elif self.header_set:
raise AssertionError("Headers already set!")
if PY3K and not isinstance(status, str):
self.status = str(status, 'ISO-8859-1')
else:
self.status = status
# Make sure headers are bytes objects
try:
self.header_set = Headers(response_headers)
except UnicodeDecodeError:
self.error = ('500 Internal Server Error',
'HTTP Headers should be bytes')
self.err_log.error('Received HTTP Headers from client that contain'
' invalid characters for Latin-1 encoding.')
return self.write_warning
def run_app(self, conn):
self.size = 0
self.header_set = Headers([])
self.headers_sent = False
self.error = (None, None)
self.chunked = False
sections = None
output = None
if __debug__:
self.err_log.debug('Getting sock_file')
# Build our file-like object
if PY3K:
sock_file = conn.makefile(mode='rb', buffering=BUF_SIZE)
else:
sock_file = conn.makefile(BUF_SIZE)
try:
# Read the headers and build our WSGI environment
self.environ = environ = self.build_environ(sock_file, conn)
# Handle 100 Continue
if environ.get('HTTP_EXPECT', '') == '100-continue':
res = environ['SERVER_PROTOCOL'] + ' 100 Continue\r\n\r\n'
conn.sendall(b(res))
# Send it to our WSGI application
output = self.app(environ, self.start_response)
if not hasattr(output, '__len__') and not hasattr(output, '__iter__'):
self.error = ('500 Internal Server Error',
'WSGI applications must return a list or '
'generator type.')
if hasattr(output, '__len__'):
sections = len(output)
for data in output:
# Don't send headers until body appears
if data:
self.write(data, sections)
if self.chunked:
# If chunked, send our final chunk length
self.conn.sendall(b('0\r\n\r\n'))
elif not self.headers_sent:
# Send headers if the body was empty
self.send_headers('', sections)
# Don't capture exceptions here. The Worker class handles
# them appropriately.
finally:
if __debug__:
self.err_log.debug('Finally closing output and sock_file')
if hasattr(output, 'close'):
output.close()
sock_file.close()
# Monolithic build...end of module: rocket/methods/wsgi.py
| gpl-2.0 | 7,902,708,074,817,093,000 | 30.482095 | 108 | 0.519957 | false |
MSylvia/pyNES | pynes/tests/adc_test.py | 27 | 4340 | # -*- coding: utf-8 -*-
'''
ADC, Add with Carry Test
This is an arithmetic instruction of the 6502.
'''
import unittest
from pynes.tests import MetaInstructionCase
class AdcImmTest(unittest.TestCase):
'''
Test the arithmetic operation ADC between decimal 16
and the content of the accumulator.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC #$10'
lex = [('T_INSTRUCTION', 'ADC'), ('T_HEX_NUMBER', '#$10')]
syn = ['S_IMMEDIATE']
code = [0x69, 0x10]
class AdcImmWithDecimalTest(unittest.TestCase):
'''
Test the arithmetic operation ADC between decimal 10
and the content of the accumulator.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC #10'
lex = [('T_INSTRUCTION', 'ADC'), ('T_DECIMAL_NUMBER', '#10')]
syn = ['S_IMMEDIATE']
code = [0x69, 0x0A]
class AdcImmWithBinaryTest(unittest.TestCase):
'''
Test the arithmetic operation ADC between binary %00000100
(Decimal 4) and the content of the accumulator.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC #%00000100'
lex = [('T_INSTRUCTION', 'ADC'), ('T_BINARY_NUMBER', '#%00000100')]
syn = ['S_IMMEDIATE']
code = [0x69, 0x04]
class AdcZpTest(unittest.TestCase):
'''
Test the arithmetic operation ADC between the content of
the accumulator and the content of the zero page address.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC $00'
lex = [('T_INSTRUCTION', 'ADC'), ('T_ADDRESS', '$00')]
syn = ['S_ZEROPAGE']
code = [0x65, 0x00]
class AdcZpxTest(unittest.TestCase):
'''
Test the arithmetic operation ADC between the content of the
accumulator and the content of the zero page with address
calculated from $10 adding content of X.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC $10,X'
lex = [('T_INSTRUCTION', 'ADC'), ('T_ADDRESS', '$10'),
('T_SEPARATOR', ','),('T_REGISTER','X')]
syn = ['S_ZEROPAGE_X']
code = [0x75, 0x10]
class AdcAbsTest(unittest.TestCase):
'''
Test the arithmetic operation ADC between the content of
the accumulator and the content located at address $1234.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC $1234'
lex = [('T_INSTRUCTION', 'ADC'), ('T_ADDRESS', '$1234')]
syn = ['S_ABSOLUTE']
code = [0x6d, 0x34, 0x12]
class AdcAbsx(unittest.TestCase):
'''
Test the arithmetic operation ADC between the content of the
accumulator and the content located at address $1234
adding the content of X.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC $1234,X'
lex = [('T_INSTRUCTION', 'ADC'), ('T_ADDRESS', '$1234'),
('T_SEPARATOR', ','), ('T_REGISTER', 'X')]
syn = ['S_ABSOLUTE_X']
code = [0x7d, 0x34, 0x12]
class AdcAbsy(unittest.TestCase):
'''
Test the arithmetic operation ADC between the content of the
accumulator and the content located at address $1234
adding the content of Y.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC $1234,Y'
lex = [('T_INSTRUCTION', 'ADC'), ('T_ADDRESS', '$1234'),
('T_SEPARATOR', ','), ('T_REGISTER', 'Y')]
syn = ['S_ABSOLUTE_Y']
code = [0x79, 0x34, 0x12]
class AdcIndx(unittest.TestCase):
'''
Test the arithmetic ADC operation between the content of the
accumulator and the content located at the address
obtained from the address calculated from the value
stored in the address $20 adding the content of Y.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC ($20,X)'
lex = [('T_INSTRUCTION', 'ADC'), ('T_OPEN', '('),
('T_ADDRESS', '$20'), ('T_SEPARATOR', ','),
('T_REGISTER', 'X'), ('T_CLOSE', ')')]
syn = ['S_INDIRECT_X']
code = [0x61, 0x20]
class AdcIndy(unittest.TestCase):
'''
Test arithmetic operation ADC between the content of the
accumulator and the content located at the address
obtained from the address calculated from the value
stored in the address $20 adding the content of Y.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC ($20),Y'
lex = [('T_INSTRUCTION', 'ADC'), ('T_OPEN', '('),
('T_ADDRESS', '$20'), ('T_CLOSE', ')'),
('T_SEPARATOR', ','), ('T_REGISTER', 'Y')]
syn = ['S_INDIRECT_Y']
code = [0x71, 0x20]
| bsd-3-clause | -7,589,190,946,173,644,000 | 29.34965 | 71 | 0.607604 | false |
awalls-cx18/gnuradio | gr-blocks/python/blocks/qa_skiphead.py | 2 | 4748 | #!/usr/bin/env python
#
# Copyright 2007,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
import pmt
import numpy
def make_tag(key, value, offset, srcid=None):
tag = gr.tag_t()
tag.key = pmt.string_to_symbol(key)
tag.value = pmt.to_pmt(value)
tag.offset = offset
if srcid is not None:
tag.srcid = pmt.to_pmt(srcid)
return tag
class test_skiphead(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
self.src_data = [int(x) for x in range(65536)]
def tearDown(self):
self.tb = None
def test_skip_0(self):
skip_cnt = 0
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_1(self):
skip_cnt = 1
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_1023(self):
skip_cnt = 1023
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_6339(self):
skip_cnt = 6339
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_12678(self):
skip_cnt = 12678
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_all(self):
skip_cnt = len(self.src_data)
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_tags(self):
skip_cnt = 25
expected_result = tuple(self.src_data[skip_cnt:])
src_tags = tuple([make_tag('foo', 'bar', 1, 'src'),
make_tag('baz', 'qux', 50, 'src')])
src1 = blocks.vector_source_i(self.src_data, tags=src_tags)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
self.assertEqual(dst1.tags()[0].offset, 25, "Tag offset is incorrect")
self.assertEqual(len(dst1.tags()), 1, "Wrong number of tags received")
self.assertEqual(pmt.to_python(
dst1.tags()[0].key), "baz", "Tag key is incorrect")
self.assertEqual(pmt.to_python(
dst1.tags()[0].value), "qux", "Tag value is incorrect")
if __name__ == '__main__':
gr_unittest.run(test_skiphead, "test_skiphead.xml")
| gpl-3.0 | -7,484,946,431,934,732,000 | 34.17037 | 78 | 0.619419 | false |
materialsproject/pymatgen | pymatgen/io/xtb/inputs.py | 1 | 4045 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Classes for writing XTB input files
"""
import logging
import os
from typing import Dict, Optional, Union, List
from monty.json import MSONable
from pymatgen.core import Molecule
__author__ = "Alex Epstein"
__copyright__ = "Copyright 2020, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Alex Epstein"
__email__ = "[email protected]"
__credits__ = "Sam Blau, Evan Spotte-Smith"
logger = logging.getLogger(__name__)
class CRESTInput(MSONable):
"""
An object representing CREST input files.
Because CREST is controlled through command line flags and external
files, the CRESTInput class mainly consists of methods for containing
and writing external files.
"""
def __init__(
self,
molecule: Molecule,
working_dir: str = ".",
coords_filename: Optional[str] = "crest_in.xyz",
constraints: Optional[Dict[str, Union[List[int], float]]] = None,
):
"""
:param molecule (pymatgen Molecule object):
Input molecule, the only required CREST input.
:param working_dir (str):
Location to write input files, defaults to current directory
:param coords_filename (str):
Name of input coordinates file
:param constraints (Dict):
Dictionary of common editable parameters for .constrains file.
{"atoms": [List of 1-indexed atoms to fix], "force_constant":
float]
"""
self.molecule = molecule
self.coords_filename = coords_filename
self.constraints = constraints
self.working_dir = working_dir
def write_input_files(self):
"""
Write input files to working directory
"""
self.molecule.to(filename=os.path.join(self.working_dir, self.coords_filename))
if self.constraints:
constrains_string = self.constrains_template(
molecule=self.molecule,
reference_fnm=self.coords_filename,
constraints=self.constraints,
)
with open(".constrains", "w") as f:
f.write(constrains_string)
@staticmethod
def constrains_template(molecule, reference_fnm, constraints) -> str:
"""
:param molecule (pymatgen Molecule):
Molecule the constraints will be performed on
:param reference_fnm:
Name of file containing reference structure in same directory
:param constraints:
Dictionary of common editable parameters for .constrains file.
{"atoms": [List of 1-indexed atoms to fix], "force_constant":
float]
:return:
String for .constrains file
"""
atoms_to_constrain = constraints["atoms"]
force_constant = constraints["force_constant"]
reference_fnm = reference_fnm
mol = molecule
atoms_for_mtd = [i for i in range(1, len(mol.sites) + 1) if i not in atoms_to_constrain]
# Write as 1-3,5 instead of 1,2,3,5
interval_list = [atoms_for_mtd[0]]
for i, v in enumerate(atoms_for_mtd):
if v + 1 not in atoms_for_mtd:
interval_list.append(v)
if i != len(atoms_for_mtd) - 1:
interval_list.append(atoms_for_mtd[i + 1])
force_constant = force_constant
allowed_mtd_string = ",".join(
["{}-{}".format(interval_list[i], interval_list[i + 1]) for i in range(len(interval_list)) if i % 2 == 0]
)
constrains_file_string = (
"$constrain\n"
+ " atoms: {}\n".format(",".join([str(i) for i in atoms_to_constrain]))
+ " force constant={}\n".format(force_constant)
+ " reference={}\n".format(reference_fnm)
+ "$metadyn\n"
+ " atoms: {}\n".format(allowed_mtd_string)
+ "$end"
)
return constrains_file_string
| mit | -6,063,907,934,486,460,000 | 34.79646 | 117 | 0.593078 | false |
shayneholmes/plover | plover/machine/base.py | 7 | 5860 | # Copyright (c) 2010-2011 Joshua Harlan Lifton.
# See LICENSE.txt for details.
# TODO: add tests for all machines
# TODO: add tests for new status callbacks
"""Base classes for machine types. Do not use directly."""
import serial
import threading
from plover.exception import SerialPortException
import collections
STATE_STOPPED = 'closed'
STATE_INITIALIZING = 'initializing'
STATE_RUNNING = 'connected'
STATE_ERROR = 'disconnected'
class StenotypeBase(object):
"""The base class for all Stenotype classes."""
def __init__(self):
self.stroke_subscribers = []
self.state_subscribers = []
self.state = STATE_STOPPED
self.suppress = None
def start_capture(self):
"""Begin listening for output from the stenotype machine."""
pass
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
pass
def add_stroke_callback(self, callback):
"""Subscribe to output from the stenotype machine.
Argument:
callback -- The function to call whenever there is output from
the stenotype machine and output is being captured.
"""
self.stroke_subscribers.append(callback)
def remove_stroke_callback(self, callback):
"""Unsubscribe from output from the stenotype machine.
Argument:
callback -- A function that was previously subscribed.
"""
self.stroke_subscribers.remove(callback)
def add_state_callback(self, callback):
self.state_subscribers.append(callback)
def remove_state_callback(self, callback):
self.state_subscribers.remove(callback)
def _notify(self, steno_keys):
"""Invoke the callback of each subscriber with the given argument."""
# If the stroke matches a command while the keyboard is not suppressed
# then the stroke needs to be suppressed after the fact. One of the
# handlers will set the suppress function. This function is passed in to
# prevent threading issues with the gui.
self.suppress = None
for callback in self.stroke_subscribers:
callback(steno_keys)
if self.suppress:
self._post_suppress(self.suppress, steno_keys)
def _post_suppress(self, suppress, steno_keys):
"""This is a complicated way for the application to tell the machine to
suppress this stroke after the fact. This only currently has meaning for
the keyboard machine so it can backspace over the last stroke when used
to issue a command when plover is 'off'.
"""
pass
def _set_state(self, state):
self.state = state
for callback in self.state_subscribers:
callback(state)
def _stopped(self):
self._set_state(STATE_STOPPED)
def _initializing(self):
self._set_state(STATE_INITIALIZING)
def _ready(self):
self._set_state(STATE_RUNNING)
def _error(self):
self._set_state(STATE_ERROR)
@staticmethod
def get_option_info():
"""Get the default options for this machine."""
return {}
class ThreadedStenotypeBase(StenotypeBase, threading.Thread):
"""Base class for thread based machines.
Subclasses should override run.
"""
def __init__(self):
threading.Thread.__init__(self)
StenotypeBase.__init__(self)
self.finished = threading.Event()
def run(self):
"""This method should be overridden by a subclass."""
pass
def start_capture(self):
"""Begin listening for output from the stenotype machine."""
self.finished.clear()
self._initializing()
self.start()
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
self.finished.set()
try:
self.join()
except RuntimeError:
pass
self._stopped()
class SerialStenotypeBase(ThreadedStenotypeBase):
"""For use with stenotype machines that connect via serial port.
This class implements the three methods necessary for a standard
stenotype interface: start_capture, stop_capture, and
add_callback.
"""
def __init__(self, serial_params):
"""Monitor the stenotype over a serial port.
Keyword arguments are the same as the keyword arguments for a
serial.Serial object.
"""
ThreadedStenotypeBase.__init__(self)
self.serial_port = None
self.serial_params = serial_params
def start_capture(self):
if self.serial_port:
self.serial_port.close()
try:
self.serial_port = serial.Serial(**self.serial_params)
except (serial.SerialException, OSError) as e:
print e
self._error()
return
if self.serial_port is None or not self.serial_port.isOpen():
self._error()
return
return ThreadedStenotypeBase.start_capture(self)
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
ThreadedStenotypeBase.stop_capture(self)
if self.serial_port:
self.serial_port.close()
@staticmethod
def get_option_info():
"""Get the default options for this machine."""
bool_converter = lambda s: s == 'True'
sb = lambda s: int(float(s)) if float(s).is_integer() else float(s)
return {
'port': (None, str), # TODO: make first port default
'baudrate': (9600, int),
'bytesize': (8, int),
'parity': ('N', str),
'stopbits': (1, sb),
'timeout': (2.0, float),
'xonxoff': (False, bool_converter),
'rtscts': (False, bool_converter)
}
| gpl-2.0 | 8,348,072,675,902,209,000 | 30.005291 | 81 | 0.619625 | false |
google-code/android-scripting | python/src/Lib/test/test_curses.py | 55 | 8582 | #
# Test script for the curses module
#
# This script doesn't actually display anything very coherent. but it
# does call every method and function.
#
# Functions not tested: {def,reset}_{shell,prog}_mode, getch(), getstr(),
# init_color()
# Only called, not tested: getmouse(), ungetmouse()
#
import curses, sys, tempfile, os
import curses.panel
# Optionally test curses module. This currently requires that the
# 'curses' resource be given on the regrtest command line using the -u
# option. If not available, nothing after this line will be executed.
from test.test_support import requires, TestSkipped
requires('curses')
# XXX: if newterm was supported we could use it instead of initscr and not exit
term = os.environ.get('TERM')
if not term or term == 'unknown':
raise TestSkipped, "$TERM=%r, calling initscr() may cause exit" % term
if sys.platform == "cygwin":
raise TestSkipped("cygwin's curses mostly just hangs")
def window_funcs(stdscr):
"Test the methods of windows"
win = curses.newwin(10,10)
win = curses.newwin(5,5, 5,5)
win2 = curses.newwin(15,15, 5,5)
for meth in [stdscr.addch, stdscr.addstr]:
for args in [('a'), ('a', curses.A_BOLD),
(4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
meth(*args)
for meth in [stdscr.box, stdscr.clear, stdscr.clrtobot,
stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
stdscr.getparyx, stdscr.getyx, stdscr.inch,
stdscr.insertln, stdscr.instr, stdscr.is_wintouched,
win.noutrefresh, stdscr.redrawwin, stdscr.refresh,
stdscr.standout, stdscr.standend, stdscr.syncdown,
stdscr.syncup, stdscr.touchwin, stdscr.untouchwin]:
meth()
stdscr.addnstr('1234', 3)
stdscr.addnstr('1234', 3, curses.A_BOLD)
stdscr.addnstr(4,4, '1234', 3)
stdscr.addnstr(5,5, '1234', 3, curses.A_BOLD)
stdscr.attron(curses.A_BOLD)
stdscr.attroff(curses.A_BOLD)
stdscr.attrset(curses.A_BOLD)
stdscr.bkgd(' ')
stdscr.bkgd(' ', curses.A_REVERSE)
stdscr.bkgdset(' ')
stdscr.bkgdset(' ', curses.A_REVERSE)
win.border(65, 66, 67, 68,
69, 70, 71, 72)
win.border('|', '!', '-', '_',
'+', '\\', '#', '/')
try:
win.border(65, 66, 67, 68,
69, [], 71, 72)
except TypeError:
pass
else:
raise RuntimeError, "Expected win.border() to raise TypeError"
stdscr.clearok(1)
win4 = stdscr.derwin(2,2)
win4 = stdscr.derwin(1,1, 5,5)
win4.mvderwin(9,9)
stdscr.echochar('a')
stdscr.echochar('a', curses.A_BOLD)
stdscr.hline('-', 5)
stdscr.hline('-', 5, curses.A_BOLD)
stdscr.hline(1,1,'-', 5)
stdscr.hline(1,1,'-', 5, curses.A_BOLD)
stdscr.idcok(1)
stdscr.idlok(1)
stdscr.immedok(1)
stdscr.insch('c')
stdscr.insdelln(1)
stdscr.insnstr('abc', 3)
stdscr.insnstr('abc', 3, curses.A_BOLD)
stdscr.insnstr(5, 5, 'abc', 3)
stdscr.insnstr(5, 5, 'abc', 3, curses.A_BOLD)
stdscr.insstr('def')
stdscr.insstr('def', curses.A_BOLD)
stdscr.insstr(5, 5, 'def')
stdscr.insstr(5, 5, 'def', curses.A_BOLD)
stdscr.is_linetouched(0)
stdscr.keypad(1)
stdscr.leaveok(1)
stdscr.move(3,3)
win.mvwin(2,2)
stdscr.nodelay(1)
stdscr.notimeout(1)
win2.overlay(win)
win2.overwrite(win)
win2.overlay(win, 1, 2, 3, 3, 2, 1)
win2.overwrite(win, 1, 2, 3, 3, 2, 1)
stdscr.redrawln(1,2)
stdscr.scrollok(1)
stdscr.scroll()
stdscr.scroll(2)
stdscr.scroll(-3)
stdscr.move(12, 2)
stdscr.setscrreg(10,15)
win3 = stdscr.subwin(10,10)
win3 = stdscr.subwin(10,10, 5,5)
stdscr.syncok(1)
stdscr.timeout(5)
stdscr.touchline(5,5)
stdscr.touchline(5,5,0)
stdscr.vline('a', 3)
stdscr.vline('a', 3, curses.A_STANDOUT)
stdscr.chgat(5, 2, 3, curses.A_BLINK)
stdscr.chgat(3, curses.A_BOLD)
stdscr.chgat(5, 8, curses.A_UNDERLINE)
stdscr.chgat(curses.A_BLINK)
stdscr.refresh()
stdscr.vline(1,1, 'a', 3)
stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
if hasattr(curses, 'resize'):
stdscr.resize()
if hasattr(curses, 'enclose'):
stdscr.enclose()
def module_funcs(stdscr):
"Test module-level functions"
for func in [curses.baudrate, curses.beep, curses.can_change_color,
curses.cbreak, curses.def_prog_mode, curses.doupdate,
curses.filter, curses.flash, curses.flushinp,
curses.has_colors, curses.has_ic, curses.has_il,
curses.isendwin, curses.killchar, curses.longname,
curses.nocbreak, curses.noecho, curses.nonl,
curses.noqiflush, curses.noraw,
curses.reset_prog_mode, curses.termattrs,
curses.termname, curses.erasechar, curses.getsyx]:
func()
# Functions that actually need arguments
if curses.tigetstr("cnorm"):
curses.curs_set(1)
curses.delay_output(1)
curses.echo() ; curses.echo(1)
f = tempfile.TemporaryFile()
stdscr.putwin(f)
f.seek(0)
curses.getwin(f)
f.close()
curses.halfdelay(1)
curses.intrflush(1)
curses.meta(1)
curses.napms(100)
curses.newpad(50,50)
win = curses.newwin(5,5)
win = curses.newwin(5,5, 1,1)
curses.nl() ; curses.nl(1)
curses.putp('abc')
curses.qiflush()
curses.raw() ; curses.raw(1)
curses.setsyx(5,5)
curses.tigetflag('hc')
curses.tigetnum('co')
curses.tigetstr('cr')
curses.tparm('cr')
curses.typeahead(sys.__stdin__.fileno())
curses.unctrl('a')
curses.ungetch('a')
curses.use_env(1)
# Functions only available on a few platforms
if curses.has_colors():
curses.start_color()
curses.init_pair(2, 1,1)
curses.color_content(1)
curses.color_pair(2)
curses.pair_content(curses.COLOR_PAIRS - 1)
curses.pair_number(0)
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
if hasattr(curses, 'keyname'):
curses.keyname(13)
if hasattr(curses, 'has_key'):
curses.has_key(13)
if hasattr(curses, 'getmouse'):
(availmask, oldmask) = curses.mousemask(curses.BUTTON1_PRESSED)
# availmask indicates that mouse stuff not available.
if availmask != 0:
curses.mouseinterval(10)
# just verify these don't cause errors
m = curses.getmouse()
curses.ungetmouse(*m)
if hasattr(curses, 'is_term_resized'):
curses.is_term_resized(*stdscr.getmaxyx())
if hasattr(curses, 'resizeterm'):
curses.resizeterm(*stdscr.getmaxyx())
if hasattr(curses, 'resize_term'):
curses.resize_term(*stdscr.getmaxyx())
def unit_tests():
from curses import ascii
for ch, expected in [('a', 'a'), ('A', 'A'),
(';', ';'), (' ', ' '),
('\x7f', '^?'), ('\n', '^J'), ('\0', '^@'),
# Meta-bit characters
('\x8a', '!^J'), ('\xc1', '!A'),
]:
if ascii.unctrl(ch) != expected:
print 'curses.unctrl fails on character', repr(ch)
def test_userptr_without_set(stdscr):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
# try to access userptr() before calling set_userptr() -- segfaults
try:
p.userptr()
raise RuntimeError, 'userptr should fail since not set'
except curses.panel.error:
pass
def test_resize_term(stdscr):
if hasattr(curses, 'resizeterm'):
lines, cols = curses.LINES, curses.COLS
curses.resizeterm(lines - 1, cols + 1)
if curses.LINES != lines - 1 or curses.COLS != cols + 1:
raise RuntimeError, "Expected resizeterm to update LINES and COLS"
def main(stdscr):
curses.savetty()
try:
module_funcs(stdscr)
window_funcs(stdscr)
test_userptr_without_set(stdscr)
test_resize_term(stdscr)
finally:
curses.resetty()
if __name__ == '__main__':
curses.wrapper(main)
unit_tests()
else:
# testing setupterm() inside initscr/endwin
# causes terminal breakage
curses.setupterm(fd=sys.__stdout__.fileno())
try:
stdscr = curses.initscr()
main(stdscr)
finally:
curses.endwin()
unit_tests()
| apache-2.0 | 9,054,400,872,793,077,000 | 29.65 | 79 | 0.595432 | false |
msarana/selenium_python | ENV/Lib/site-packages/pip/utils/ui.py | 25 | 11320 | from __future__ import absolute_import
from __future__ import division
import itertools
import sys
from signal import signal, SIGINT, default_int_handler
import time
import contextlib
import logging
from pip.compat import WINDOWS
from pip.utils import format_size
from pip.utils.logging import get_indentation
from pip._vendor import six
from pip._vendor.progress.bar import Bar, IncrementalBar
from pip._vendor.progress.helpers import (WritelnMixin,
HIDE_CURSOR, SHOW_CURSOR)
from pip._vendor.progress.spinner import Spinner
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
logger = logging.getLogger(__name__)
def _select_progress_class(preferred, fallback):
encoding = getattr(preferred.file, "encoding", None)
# If we don't know what encoding this file is in, then we'll just assume
# that it doesn't support unicode and use the ASCII bar.
if not encoding:
return fallback
# Collect all of the possible characters we want to use with the preferred
# bar.
characters = [
getattr(preferred, "empty_fill", six.text_type()),
getattr(preferred, "fill", six.text_type()),
]
characters += list(getattr(preferred, "phases", []))
# Try to decode the characters we're using for the bar using the encoding
# of the given file, if this works then we'll assume that we can use the
# fancier bar and if not we'll fall back to the plaintext bar.
try:
six.text_type().join(characters).encode(encoding)
except UnicodeEncodeError:
return fallback
else:
return preferred
_BaseBar = _select_progress_class(IncrementalBar, Bar)
class InterruptibleMixin(object):
"""
Helper to ensure that self.finish() gets called on keyboard interrupt.
This allows downloads to be interrupted without leaving temporary state
(like hidden cursors) behind.
This class is similar to the progress library's existing SigIntMixin
helper, but as of version 1.2, that helper has the following problems:
1. It calls sys.exit().
2. It discards the existing SIGINT handler completely.
3. It leaves its own handler in place even after an uninterrupted finish,
which will have unexpected delayed effects if the user triggers an
unrelated keyboard interrupt some time after a progress-displaying
download has already completed, for example.
"""
def __init__(self, *args, **kwargs):
"""
Save the original SIGINT handler for later.
"""
super(InterruptibleMixin, self).__init__(*args, **kwargs)
self.original_handler = signal(SIGINT, self.handle_sigint)
# If signal() returns None, the previous handler was not installed from
# Python, and we cannot restore it. This probably should not happen,
# but if it does, we must restore something sensible instead, at least.
# The least bad option should be Python's default SIGINT handler, which
# just raises KeyboardInterrupt.
if self.original_handler is None:
self.original_handler = default_int_handler
def finish(self):
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
"""
super(InterruptibleMixin, self).finish()
signal(SIGINT, self.original_handler)
def handle_sigint(self, signum, frame):
"""
Call self.finish() before delegating to the original SIGINT handler.
This handler should only be in place while the progress display is
active.
"""
self.finish()
self.original_handler(signum, frame)
class DownloadProgressMixin(object):
def __init__(self, *args, **kwargs):
super(DownloadProgressMixin, self).__init__(*args, **kwargs)
self.message = (" " * (get_indentation() + 2)) + self.message
@property
def downloaded(self):
return format_size(self.index)
@property
def download_speed(self):
# Avoid zero division errors...
if self.avg == 0.0:
return "..."
return format_size(1 / self.avg) + "/s"
@property
def pretty_eta(self):
if self.eta:
return "eta %s" % self.eta_td
return ""
def iter(self, it, n=1):
for x in it:
yield x
self.next(n)
self.finish()
class WindowsMixin(object):
def __init__(self, *args, **kwargs):
# The Windows terminal does not support the hide/show cursor ANSI codes
# even with colorama. So we'll ensure that hide_cursor is False on
# Windows.
# This call neds to go before the super() call, so that hide_cursor
# is set in time. The base progress bar class writes the "hide cursor"
# code to the terminal in its init, so if we don't set this soon
# enough, we get a "hide" with no corresponding "show"...
if WINDOWS and self.hide_cursor:
self.hide_cursor = False
super(WindowsMixin, self).__init__(*args, **kwargs)
# Check if we are running on Windows and we have the colorama module,
# if we do then wrap our file with it.
if WINDOWS and colorama:
self.file = colorama.AnsiToWin32(self.file)
# The progress code expects to be able to call self.file.isatty()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.isatty = lambda: self.file.wrapped.isatty()
# The progress code expects to be able to call self.file.flush()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.flush = lambda: self.file.wrapped.flush()
class DownloadProgressBar(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin, _BaseBar):
file = sys.stdout
message = "%(percent)d%%"
suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin, WritelnMixin, Spinner):
file = sys.stdout
suffix = "%(downloaded)s %(download_speed)s"
def next_phase(self):
if not hasattr(self, "_phaser"):
self._phaser = itertools.cycle(self.phases)
return next(self._phaser)
def update(self):
message = self.message % self
phase = self.next_phase()
suffix = self.suffix % self
line = ''.join([
message,
" " if message else "",
phase,
" " if suffix else "",
suffix,
])
self.writeln(line)
################################################################
# Generic "something is happening" spinners
#
# We don't even try using progress.spinner.Spinner here because it's actually
# simpler to reimplement from scratch than to coerce their code into doing
# what we need.
################################################################
@contextlib.contextmanager
def hidden_cursor(file):
# The Windows terminal does not support the hide/show cursor ANSI codes,
# even via colorama. So don't even try.
if WINDOWS:
yield
else:
file.write(HIDE_CURSOR)
try:
yield
finally:
file.write(SHOW_CURSOR)
class RateLimiter(object):
def __init__(self, min_update_interval_seconds):
self._min_update_interval_seconds = min_update_interval_seconds
self._last_update = 0
def ready(self):
now = time.time()
delta = now - self._last_update
return delta >= self._min_update_interval_seconds
def reset(self):
self._last_update = time.time()
class InteractiveSpinner(object):
def __init__(self, message, file=None, spin_chars="-\\|/",
# Empirically, 8 updates/second looks nice
min_update_interval_seconds=0.125):
self._message = message
if file is None:
file = sys.stdout
self._file = file
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._finished = False
self._spin_cycle = itertools.cycle(spin_chars)
self._file.write(" " * get_indentation() + self._message + " ... ")
self._width = 0
def _write(self, status):
assert not self._finished
# Erase what we wrote before by backspacing to the beginning, writing
# spaces to overwrite the old text, and then backspacing again
backup = "\b" * self._width
self._file.write(backup + " " * self._width + backup)
# Now we have a blank slate to add our status
self._file.write(status)
self._width = len(status)
self._file.flush()
self._rate_limiter.reset()
def spin(self):
if self._finished:
return
if not self._rate_limiter.ready():
return
self._write(next(self._spin_cycle))
def finish(self, final_status):
if self._finished:
return
self._write(final_status)
self._file.write("\n")
self._file.flush()
self._finished = True
# Used for dumb terminals, non-interactive installs (no tty), etc.
# We still print updates occasionally (once every 60 seconds by default) to
# act as a keep-alive for systems like Travis-CI that take lack-of-output as
# an indication that a task has frozen.
class NonInteractiveSpinner(object):
def __init__(self, message, min_update_interval_seconds=60):
self._message = message
self._finished = False
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._update("started")
def _update(self, status):
assert not self._finished
self._rate_limiter.reset()
logger.info("%s: %s", self._message, status)
def spin(self):
if self._finished:
return
if not self._rate_limiter.ready():
return
self._update("still running...")
def finish(self, final_status):
if self._finished:
return
self._update("finished with status '%s'" % (final_status,))
self._finished = True
@contextlib.contextmanager
def open_spinner(message):
# Interactive spinner goes directly to sys.stdout rather than being routed
# through the logging system, but it acts like it has level INFO,
# i.e. it's only displayed if we're at level INFO or better.
# Non-interactive spinner goes through the logging system, so it is always
# in sync with logging configuration.
if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO:
spinner = InteractiveSpinner(message)
else:
spinner = NonInteractiveSpinner(message)
try:
with hidden_cursor(sys.stdout):
yield spinner
except KeyboardInterrupt:
spinner.finish("canceled")
raise
except Exception:
spinner.finish("error")
raise
else:
spinner.finish("done")
| apache-2.0 | 7,285,293,368,798,881,000 | 32.39233 | 79 | 0.62553 | false |
edgedb/edgedb | edb/pgsql/delta.py | 1 | 188387 | # mypy: ignore-errors
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import collections.abc
import dataclasses
import itertools
import textwrap
from typing import *
from edb import errors
from edb.edgeql import ast as ql_ast
from edb.edgeql import qltypes as ql_ft
from edb.edgeql import compiler as qlcompiler
from edb.schema import annos as s_anno
from edb.schema import casts as s_casts
from edb.schema import scalars as s_scalars
from edb.schema import objtypes as s_objtypes
from edb.schema import constraints as s_constr
from edb.schema import database as s_db
from edb.schema import delta as sd
from edb.schema import expr as s_expr
from edb.schema import expraliases as s_aliases
from edb.schema import extensions as s_exts
from edb.schema import functions as s_funcs
from edb.schema import indexes as s_indexes
from edb.schema import links as s_links
from edb.schema import lproperties as s_props
from edb.schema import migrations as s_migrations
from edb.schema import modules as s_mod
from edb.schema import name as sn
from edb.schema import objects as so
from edb.schema import operators as s_opers
from edb.schema import pointers as s_pointers
from edb.schema import pseudo as s_pseudo
from edb.schema import roles as s_roles
from edb.schema import sources as s_sources
from edb.schema import types as s_types
from edb.schema import version as s_ver
from edb.schema import utils as s_utils
from edb.common import markup
from edb.common import ordered
from edb.common import topological
from edb.common import uuidgen
from edb.ir import pathid as irpathid
from edb.ir import typeutils as irtyputils
from edb.ir import utils as irutils
from edb.pgsql import common
from edb.pgsql import dbops
from edb.server import defines as edbdef
from edb.server import pgcluster
from . import ast as pg_ast
from .common import qname as q
from .common import quote_literal as ql
from .common import quote_ident as qi
from .common import quote_type as qt
from . import compiler
from . import codegen
from . import schemamech
from . import types
if TYPE_CHECKING:
from edb.schema import schema as s_schema
def has_table(obj, schema):
if isinstance(obj, s_objtypes.ObjectType):
return not (
obj.is_compound_type(schema) or
obj.get_is_derived(schema) or
obj.is_view(schema)
)
elif obj.is_pure_computable(schema) or obj.get_is_derived(schema):
return False
elif obj.generic(schema):
return (
not isinstance(obj, s_props.Property)
and str(obj.get_name(schema)) != 'std::link'
)
elif obj.is_link_property(schema):
return not obj.singular(schema)
elif not has_table(obj.get_source(schema), schema):
return False
else:
ptr_stor_info = types.get_pointer_storage_info(
obj, resolve_type=False, schema=schema, link_bias=True)
return (
ptr_stor_info is not None
and ptr_stor_info.table_type == 'link'
)
class CommandMeta(sd.CommandMeta):
pass
class MetaCommand(sd.Command, metaclass=CommandMeta):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pgops = ordered.OrderedSet()
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
for op in self.before_ops:
if not isinstance(op, sd.AlterObjectProperty):
self.pgops.add(op)
for op in self.ops:
if not isinstance(op, sd.AlterObjectProperty):
self.pgops.add(op)
return schema
def generate(self, block: dbops.PLBlock) -> None:
for op in sorted(
self.pgops, key=lambda i: getattr(i, 'priority', 0),
reverse=True):
op.generate(block)
@classmethod
def as_markup(cls, self, *, ctx):
node = markup.elements.lang.TreeNode(name=str(self))
for dd in self.pgops:
if isinstance(dd, AlterObjectProperty):
diff = markup.elements.doc.ValueDiff(
before=repr(dd.old_value), after=repr(dd.new_value))
if dd.new_inherited:
diff.comment = 'inherited'
elif dd.new_computed:
diff.comment = 'computed'
node.add_child(label=dd.property, node=diff)
else:
node.add_child(node=markup.serialize(dd, ctx=ctx))
return node
def _get_backend_params(
self,
context: sd.CommandContext,
) -> pgcluster.BackendRuntimeParams:
ctx_backend_params = context.backend_runtime_params
if ctx_backend_params is not None:
backend_params = cast(
pgcluster.BackendRuntimeParams, ctx_backend_params)
else:
backend_params = pgcluster.get_default_runtime_params()
return backend_params
def _get_instance_params(
self,
context: sd.CommandContext,
) -> pgcluster.BackendInstanceParams:
return self._get_backend_params(context).instance_params
def _get_tenant_id(self, context: sd.CommandContext) -> str:
return self._get_instance_params(context).tenant_id
class CommandGroupAdapted(MetaCommand, adapts=sd.CommandGroup):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = sd.CommandGroup.apply(self, schema, context)
schema = MetaCommand.apply(self, schema, context)
return schema
class Record:
def __init__(self, items):
self._items = items
def __iter__(self):
return iter(self._items)
def __len__(self):
return len(self._items)
def __repr__(self):
return '<_Record {!r}>'.format(self._items)
class ObjectMetaCommand(MetaCommand, sd.ObjectCommand,
metaclass=CommandMeta):
op_priority = 0
class CreateObject(ObjectMetaCommand):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = self.__class__.get_adaptee().apply(self, schema, context)
return ObjectMetaCommand.apply(self, schema, context)
class RenameObject(ObjectMetaCommand):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = self.__class__.get_adaptee().apply(self, schema, context)
return ObjectMetaCommand.apply(self, schema, context)
class RebaseObject(ObjectMetaCommand):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = self.__class__.get_adaptee().apply(self, schema, context)
return ObjectMetaCommand.apply(self, schema, context)
class AlterObject(ObjectMetaCommand):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = ObjectMetaCommand.apply(self, schema, context)
return self.__class__.get_adaptee().apply(self, schema, context)
class DeleteObject(ObjectMetaCommand):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = self.__class__.get_adaptee().apply(self, schema, context)
return ObjectMetaCommand.apply(self, schema, context)
class Nop(MetaCommand, adapts=sd.Nop):
pass
class Query(MetaCommand, adapts=sd.Query):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().apply(schema, context)
sql_tree = compiler.compile_ir_to_sql_tree(
self.expr.irast,
output_format=compiler.OutputFormat.NATIVE_INTERNAL,
explicit_top_cast=irtyputils.type_to_typeref(
schema,
schema.get('std::str'),
),
)
sql_text = codegen.generate_source(sql_tree)
# The INTO _dummy_text bit is needed because PL/pgSQL _really_
# wants the result of a returning query to be stored in a variable,
# and the PERFORM hack does not work if the query has DML CTEs.
self.pgops.add(dbops.Query(
text=f'{sql_text} INTO _dummy_text',
))
return schema
class AlterObjectProperty(MetaCommand, adapts=sd.AlterObjectProperty):
pass
class SchemaVersionCommand(ObjectMetaCommand):
pass
class CreateSchemaVersion(
SchemaVersionCommand,
CreateObject,
adapts=s_ver.CreateSchemaVersion,
):
pass
class AlterSchemaVersion(
SchemaVersionCommand,
AlterObject,
adapts=s_ver.AlterSchemaVersion,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().apply(schema, context)
expected_ver = self.get_orig_attribute_value('version')
check = dbops.Query(
f'''
SELECT
edgedb.raise_on_not_null(
(SELECT NULLIF(
(SELECT
version::text
FROM
edgedb."_SchemaSchemaVersion"
FOR UPDATE),
{ql(str(expected_ver))}
)),
'serialization_failure',
msg => (
'Cannot serialize DDL: '
|| (SELECT version::text FROM
edgedb."_SchemaSchemaVersion")
)
)
INTO _dummy_text
'''
)
self.pgops.add(check)
return schema
class GlobalSchemaVersionCommand(ObjectMetaCommand):
pass
class CreateGlobalSchemaVersion(
ObjectMetaCommand,
adapts=s_ver.CreateGlobalSchemaVersion,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_ver.CreateGlobalSchemaVersion.apply(self, schema, context)
schema = ObjectMetaCommand.apply(self, schema, context)
ver_id = str(self.scls.id)
ver_name = str(self.scls.get_name(schema))
tenant_id = self._get_tenant_id(context)
self.pgops.add(
dbops.UpdateMetadataSection(
dbops.Database(name=common.get_database_backend_name(
edbdef.EDGEDB_TEMPLATE_DB, tenant_id=tenant_id)),
section='GlobalSchemaVersion',
metadata={
ver_id: {
'id': ver_id,
'name': ver_name,
'version': str(self.scls.get_version(schema)),
'builtin': self.scls.get_builtin(schema),
'internal': self.scls.get_internal(schema),
}
}
)
)
return schema
class AlterGlobalSchemaVersion(
ObjectMetaCommand,
adapts=s_ver.AlterGlobalSchemaVersion,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_ver.AlterGlobalSchemaVersion.apply(self, schema, context)
schema = ObjectMetaCommand.apply(self, schema, context)
ver_id = str(self.scls.id)
ver_name = str(self.scls.get_name(schema))
ctx_backend_params = context.backend_runtime_params
if ctx_backend_params is not None:
backend_params = cast(
pgcluster.BackendRuntimeParams, ctx_backend_params)
else:
backend_params = pgcluster.get_default_runtime_params()
instance_params = backend_params.instance_params
capabilities = instance_params.capabilities
tenant_id = instance_params.tenant_id
tpl_db_name = common.get_database_backend_name(
edbdef.EDGEDB_TEMPLATE_DB, tenant_id=tenant_id)
if capabilities & pgcluster.BackendCapabilities.SUPERUSER_ACCESS:
# Only superusers are generally allowed to make an UPDATE
# lock on shared catalogs.
lock = dbops.Query(
f'''
SELECT
description
FROM
pg_catalog.pg_shdescription
WHERE
objoid = (
SELECT oid
FROM pg_database
WHERE datname = {ql(tpl_db_name)}
)
AND classoid = 'pg_database'::regclass::oid
FOR UPDATE
INTO _dummy_text
'''
)
else:
# Without superuser access we have to resort to lock polling.
# This is racy, but is unfortunately the best we can do.
lock = dbops.Query(f'''
SELECT
edgedb.raise_on_not_null(
(
SELECT 'locked'
FROM pg_catalog.pg_locks
WHERE
locktype = 'object'
AND classid = 'pg_database'::regclass::oid
AND objid = (
SELECT oid
FROM pg_database
WHERE
datname = {ql(tpl_db_name)}
)
AND mode = 'ShareUpdateExclusiveLock'
AND granted
AND pid != pg_backend_pid()
),
'serialization_failure',
msg => (
'Cannot serialize global DDL: '
|| (SELECT version::text FROM
edgedb."_SysGlobalSchemaVersion")
)
)
INTO _dummy_text
''')
self.pgops.add(lock)
expected_ver = self.get_orig_attribute_value('version')
check = dbops.Query(
f'''
SELECT
edgedb.raise_on_not_null(
(SELECT NULLIF(
(SELECT
version::text
FROM
edgedb."_SysGlobalSchemaVersion"
),
{ql(str(expected_ver))}
)),
'serialization_failure',
msg => (
'Cannot serialize global DDL: '
|| (SELECT version::text FROM
edgedb."_SysGlobalSchemaVersion")
)
)
INTO _dummy_text
'''
)
self.pgops.add(check)
self.pgops.add(
dbops.UpdateMetadataSection(
dbops.Database(name=tpl_db_name),
section='GlobalSchemaVersion',
metadata={
ver_id: {
'id': ver_id,
'name': ver_name,
'version': str(self.scls.get_version(schema)),
'builtin': self.scls.get_builtin(schema),
'internal': self.scls.get_internal(schema),
}
}
)
)
return schema
class PseudoTypeCommand(ObjectMetaCommand):
pass
class CreatePseudoType(
PseudoTypeCommand,
CreateObject,
adapts=s_pseudo.CreatePseudoType,
):
pass
class TupleCommand(ObjectMetaCommand):
pass
class CreateTuple(TupleCommand, adapts=s_types.CreateTuple):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = self.__class__.get_adaptee().apply(self, schema, context)
schema = TupleCommand.apply(self, schema, context)
if self.scls.is_polymorphic(schema):
return schema
elements = self.scls.get_element_types(schema).items(schema)
ctype = dbops.CompositeType(
name=common.get_backend_name(schema, self.scls, catenate=False),
columns=[
dbops.Column(
name=n,
type=qt(types.pg_type_from_object(
schema, t, persistent_tuples=True)),
)
for n, t in elements
]
)
self.pgops.add(dbops.CreateCompositeType(type=ctype))
return schema
class AlterTuple(TupleCommand, adapts=s_types.AlterTuple):
pass
class RenameTuple(TupleCommand, adapts=s_types.RenameTuple):
pass
class DeleteTuple(TupleCommand, adapts=s_types.DeleteTuple):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
tup = schema.get_global(s_types.Tuple, self.classname)
if not tup.is_polymorphic(schema):
self.pgops.add(dbops.DropCompositeType(
name=common.get_backend_name(schema, tup, catenate=False),
priority=2,
))
schema = self.__class__.get_adaptee().apply(self, schema, context)
schema = TupleCommand.apply(self, schema, context)
return schema
class ExprAliasCommand(ObjectMetaCommand):
pass
class CreateAlias(
ExprAliasCommand,
CreateObject,
adapts=s_aliases.CreateAlias,
):
pass
class RenameAlias(
ExprAliasCommand,
RenameObject,
adapts=s_aliases.RenameAlias,
):
pass
class AlterAlias(
ExprAliasCommand,
AlterObject,
adapts=s_aliases.AlterAlias,
):
pass
class DeleteAlias(
ExprAliasCommand,
DeleteObject,
adapts=s_aliases.DeleteAlias,
):
pass
class TupleExprAliasCommand(ObjectMetaCommand):
pass
class CreateTupleExprAlias(
TupleExprAliasCommand, CreateObject,
adapts=s_types.CreateTupleExprAlias):
pass
class RenameTupleExprAlias(
TupleExprAliasCommand, RenameObject,
adapts=s_types.RenameTupleExprAlias):
pass
class AlterTupleExprAlias(
TupleExprAliasCommand, AlterObject,
adapts=s_types.AlterTupleExprAlias):
pass
class DeleteTupleExprAlias(
TupleExprAliasCommand, DeleteObject,
adapts=s_types.DeleteTupleExprAlias):
pass
class ArrayCommand(ObjectMetaCommand):
pass
class CreateArray(ArrayCommand, adapts=s_types.CreateArray):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = self.__class__.get_adaptee().apply(self, schema, context)
schema = ArrayCommand.apply(self, schema, context)
return schema
class AlterArray(ArrayCommand, adapts=s_types.AlterArray):
pass
class RenameArray(ArrayCommand, adapts=s_types.RenameArray):
pass
class DeleteArray(ArrayCommand, adapts=s_types.DeleteArray):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = self.__class__.get_adaptee().apply(self, schema, context)
schema = ArrayCommand.apply(self, schema, context)
return schema
class ArrayExprAliasCommand(ObjectMetaCommand):
pass
class CreateArrayExprAlias(
ArrayExprAliasCommand, CreateObject,
adapts=s_types.CreateArrayExprAlias):
pass
class RenameArrayExprAlias(
ArrayExprAliasCommand, RenameObject,
adapts=s_types.RenameArrayExprAlias):
pass
class AlterArrayExprAlias(
ArrayExprAliasCommand, AlterObject,
adapts=s_types.AlterArrayExprAlias):
pass
class DeleteArrayExprAlias(
ArrayExprAliasCommand, DeleteObject,
adapts=s_types.DeleteArrayExprAlias):
pass
class ParameterCommand(sd.ObjectCommand,
metaclass=CommandMeta):
pass
class CreateParameter(ParameterCommand, CreateObject,
adapts=s_funcs.CreateParameter):
pass
class DeleteParameter(ParameterCommand, DeleteObject,
adapts=s_funcs.DeleteParameter):
pass
class RenameParameter(ParameterCommand, RenameObject,
adapts=s_funcs.RenameParameter):
pass
class AlterParameter(ParameterCommand, AlterObject,
adapts=s_funcs.AlterParameter):
pass
class FunctionCommand:
def get_pgname(self, func: s_funcs.Function, schema):
return common.get_backend_name(schema, func, catenate=False)
def get_pgtype(self, func: s_funcs.Function, obj, schema):
if obj.is_any(schema):
return ('anyelement',)
try:
return types.pg_type_from_object(
schema, obj, persistent_tuples=True)
except ValueError:
raise errors.QueryError(
f'could not compile parameter type {obj!r} '
f'of function {func.get_shortname(schema)}',
context=self.source_context) from None
def compile_default(self, func: s_funcs.Function,
default: s_expr.Expression, schema):
try:
comp = s_expr.Expression.compiled(
default,
schema=schema,
as_fragment=True,
)
ir = comp.irast
if not irutils.is_const(ir.expr):
raise ValueError('expression not constant')
sql_tree = compiler.compile_ir_to_sql_tree(
ir.expr, singleton_mode=True)
return codegen.SQLSourceGenerator.to_source(sql_tree)
except Exception as ex:
raise errors.QueryError(
f'could not compile default expression {default!r} '
f'of function {func.get_shortname(schema)}: {ex}',
context=self.source_context) from ex
def compile_args(self, func: s_funcs.Function, schema):
func_params = func.get_params(schema)
has_inlined_defaults = func.has_inlined_defaults(schema)
args = []
if has_inlined_defaults:
args.append(('__defaults_mask__', ('bytea',), None))
compile_defaults = not (
has_inlined_defaults or func_params.find_named_only(schema)
)
for param in func_params.get_in_canonical_order(schema):
param_type = param.get_type(schema)
param_default = param.get_default(schema)
pg_at = self.get_pgtype(func, param_type, schema)
default = None
if compile_defaults and param_default is not None:
default = self.compile_default(func, param_default, schema)
args.append((param.get_parameter_name(schema), pg_at, default))
return args
def make_function(self, func: s_funcs.Function, code, schema):
func_return_typemod = func.get_return_typemod(schema)
func_params = func.get_params(schema)
return dbops.Function(
name=self.get_pgname(func, schema),
args=self.compile_args(func, schema),
has_variadic=func_params.find_variadic(schema) is not None,
set_returning=func_return_typemod is ql_ft.TypeModifier.SetOfType,
volatility=func.get_volatility(schema),
returns=self.get_pgtype(
func, func.get_return_type(schema), schema),
text=code)
def compile_sql_function(self, func: s_funcs.Function, schema):
return self.make_function(func, func.get_code(schema), schema)
def fix_return_type(
self, func: s_funcs.Function, nativecode, schema, context):
return_type = self._get_attribute_value(schema, context, 'return_type')
ir = nativecode.irast
if not (
return_type.is_object_type()
or s_types.is_type_compatible(return_type, ir.stype,
schema=nativecode.schema)
):
# Add a cast and recompile it
qlexpr = qlcompiler.astutils.ensure_qlstmt(ql_ast.TypeCast(
type=s_utils.typeref_to_ast(schema, return_type),
expr=nativecode.qlast,
))
nativecode = self.compile_function(
schema, context, type(nativecode).from_ast(qlexpr, schema))
return nativecode
def compile_edgeql_function(self, func: s_funcs.Function, schema, context):
nativecode = func.get_nativecode(schema)
if nativecode.irast is None:
nativecode = self.compile_function(schema, context, nativecode)
nativecode = self.fix_return_type(func, nativecode, schema, context)
sql_text, _ = compiler.compile_ir_to_sql(
nativecode.irast,
ignore_shapes=True,
explicit_top_cast=irtyputils.type_to_typeref( # note: no cache
schema, func.get_return_type(schema)),
output_format=compiler.OutputFormat.NATIVE,
use_named_params=True)
return self.make_function(func, sql_text, schema)
def sql_rval_consistency_check(
self,
cobj: s_funcs.CallableObject,
expr: str,
schema: s_schema.Schema,
) -> dbops.Command:
fname = cobj.get_verbosename(schema)
rtype = types.pg_type_from_object(
schema,
cobj.get_return_type(schema),
persistent_tuples=True,
)
rtype_desc = '.'.join(rtype)
# Determine the actual returned type of the SQL function.
# We can't easily do this by looking in system catalogs because
# of polymorphic dispatch, but, fortunately, there's pg_typeof().
# We only need to be sure to actually NOT call the target function,
# as we can't assume how it'll behave with dummy inputs. Hence, the
# weird looking query below, where we rely in Postgres executor to
# skip the call, because no rows satisfy the WHERE condition, but
# we then still generate a NULL row via a LEFT JOIN.
f_test = textwrap.dedent(f'''\
(SELECT
pg_typeof(f.i)
FROM
(SELECT NULL::text) AS spreader
LEFT JOIN (SELECT {expr} WHERE False) AS f(i) ON (true))''')
check = dbops.Query(text=f'''
PERFORM
edgedb.raise_on_not_null(
NULLIF(
pg_typeof(NULL::{qt(rtype)}),
{f_test}
),
'invalid_function_definition',
msg => format(
'%s is declared to return SQL type "%s", but '
|| 'the underlying SQL function returns "%s"',
{ql(fname)},
{ql(rtype_desc)},
{f_test}::text
),
hint => (
'Declare the function with '
|| '`force_return_cast := true`, '
|| 'or add an explicit cast to its body.'
)
);
''')
return check
def get_dummy_func_call(
self,
cobj: s_funcs.CallableObject,
sql_func: str,
schema: s_schema.Schema,
) -> str:
args = []
func_params = cobj.get_params(schema)
for param in func_params.get_in_canonical_order(schema):
param_type = param.get_type(schema)
pg_at = self.get_pgtype(cobj, param_type, schema)
args.append(f'NULL::{qt(pg_at)}')
return f'{sql_func}({", ".join(args)})'
def make_op(
self,
func: s_funcs.Function,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
or_replace: bool=False,
) -> Iterable[dbops.Command]:
if func.get_from_expr(schema):
# Intrinsic function, handled directly by the compiler.
return ()
elif sql_func := func.get_from_function(schema):
func_params = func.get_params(schema)
if (
func.get_force_return_cast(schema)
or func_params.has_polymorphic(schema)
or func.get_sql_func_has_out_params(schema)
):
return ()
else:
# Function backed directly by an SQL function.
# Check the consistency of the return type.
dexpr = self.get_dummy_func_call(func, sql_func, schema)
check = self.sql_rval_consistency_check(func, dexpr, schema)
return (check,)
else:
func_language = func.get_language(schema)
if func_language is ql_ast.Language.SQL:
dbf = self.compile_sql_function(func, schema)
elif func_language is ql_ast.Language.EdgeQL:
dbf = self.compile_edgeql_function(func, schema, context)
else:
raise errors.QueryError(
f'cannot compile function {func.get_shortname(schema)}: '
f'unsupported language {func_language}',
context=self.source_context)
op = dbops.CreateFunction(dbf, or_replace=or_replace)
return (op,)
class CreateFunction(FunctionCommand, CreateObject,
adapts=s_funcs.CreateFunction):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().apply(schema, context)
self.pgops.update(self.make_op(self.scls, schema, context))
return schema
class RenameFunction(
FunctionCommand, RenameObject, adapts=s_funcs.RenameFunction):
pass
class AlterFunction(
FunctionCommand, AlterObject, adapts=s_funcs.AlterFunction):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().apply(schema, context)
if self.metadata_only:
return schema
if (
self.get_attribute_value('volatility') is not None or
self.get_attribute_value('nativecode') is not None
):
self.pgops.update(
self.make_op(self.scls, schema, context, or_replace=True))
return schema
class DeleteFunction(
FunctionCommand, DeleteObject, adapts=s_funcs.DeleteFunction):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = super().apply(schema, context)
func = self.scls
if func.get_code(orig_schema) or func.get_nativecode(orig_schema):
# An EdgeQL or a SQL function
# (not just an alias to a SQL function).
variadic = func.get_params(orig_schema).find_variadic(orig_schema)
self.pgops.add(
dbops.DropFunction(
name=self.get_pgname(func, orig_schema),
args=self.compile_args(func, orig_schema),
has_variadic=variadic is not None,
)
)
return schema
class OperatorCommand(FunctionCommand):
def oper_name_to_pg_name(
self,
schema,
name: sn.QualName,
) -> Tuple[str, str]:
return common.get_operator_backend_name(
name, catenate=False)
def get_pg_operands(self, schema, oper: s_opers.Operator):
left_type = None
right_type = None
oper_params = list(oper.get_params(schema).objects(schema))
oper_kind = oper.get_operator_kind(schema)
if oper_kind is ql_ft.OperatorKind.Infix:
left_type = types.pg_type_from_object(
schema, oper_params[0].get_type(schema))
right_type = types.pg_type_from_object(
schema, oper_params[1].get_type(schema))
elif oper_kind is ql_ft.OperatorKind.Prefix:
right_type = types.pg_type_from_object(
schema, oper_params[0].get_type(schema))
elif oper_kind is ql_ft.OperatorKind.Postfix:
left_type = types.pg_type_from_object(
schema, oper_params[0].get_type(schema))
else:
raise RuntimeError(
f'unexpected operator type: {oper.get_type(schema)!r}')
return left_type, right_type
def compile_args(self, oper: s_opers.Operator, schema):
args = []
oper_params = oper.get_params(schema)
for param in oper_params.get_in_canonical_order(schema):
pg_at = self.get_pgtype(oper, param.get_type(schema), schema)
args.append((param.get_parameter_name(schema), pg_at))
return args
def make_operator_function(self, oper: s_opers.Operator, schema):
return dbops.Function(
name=common.get_backend_name(
schema, oper, catenate=False, aspect='function'),
args=self.compile_args(oper, schema),
volatility=oper.get_volatility(schema),
returns=self.get_pgtype(
oper, oper.get_return_type(schema), schema),
text=oper.get_code(schema))
def get_dummy_operator_call(
self,
oper: s_opers.Operator,
pgop: str,
from_args: Tuple[Tuple[str, ...], ...],
schema: s_schema.Schema,
) -> str:
# Need a proxy function with casts
oper_kind = oper.get_operator_kind(schema)
if oper_kind is ql_ft.OperatorKind.Infix:
op = f'NULL::{qt(from_args[0])} {pgop} NULL::{qt(from_args[1])}'
elif oper_kind is ql_ft.OperatorKind.Postfix:
op = f'NULL::{qt(from_args[0])} {pgop}'
elif oper_kind is ql_ft.OperatorKind.Prefix:
op = f'{pgop} NULL::{qt(from_args[1])}'
else:
raise RuntimeError(f'unexpected operator kind: {oper_kind!r}')
return op
class CreateOperator(OperatorCommand, CreateObject,
adapts=s_opers.CreateOperator):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().apply(schema, context)
oper = self.scls
if oper.get_abstract(schema):
return schema
oper_language = oper.get_language(schema)
oper_fromop = oper.get_from_operator(schema)
oper_fromfunc = oper.get_from_function(schema)
oper_code = oper.get_code(schema)
oper_comm = oper.get_commutator(schema)
if oper_comm:
commutator = self.oper_name_to_pg_name(schema, oper_comm)
else:
commutator = None
oper_neg = oper.get_negator(schema)
if oper_neg:
negator = self.oper_name_to_pg_name(schema, oper_neg)
else:
negator = None
if oper_language is ql_ast.Language.SQL and oper_fromop:
pg_oper_name = oper_fromop[0]
args = self.get_pg_operands(schema, oper)
if len(oper_fromop) > 1:
# Explicit operand types given in FROM SQL OPERATOR.
from_args = oper_fromop[1:]
else:
from_args = args
if oper_code:
oper_func = self.make_operator_function(oper, schema)
self.pgops.add(dbops.CreateFunction(oper_func))
oper_func_name = common.qname(*oper_func.name)
elif oper_fromfunc:
oper_func_name = oper_fromfunc
elif from_args != args:
# Need a proxy function with casts
oper_kind = oper.get_operator_kind(schema)
if oper_kind is ql_ft.OperatorKind.Infix:
op = (f'$1::{from_args[0]} {pg_oper_name} '
f'$2::{from_args[1]}')
elif oper_kind is ql_ft.OperatorKind.Postfix:
op = f'$1::{from_args[0]} {pg_oper_name}'
elif oper_kind is ql_ft.OperatorKind.Prefix:
op = f'{pg_oper_name} $1::{from_args[1]}'
else:
raise RuntimeError(
f'unexpected operator kind: {oper_kind!r}')
rtype = self.get_pgtype(
oper, oper.get_return_type(schema), schema)
oper_func = dbops.Function(
name=common.get_backend_name(
schema, oper, catenate=False, aspect='function'),
args=[(None, a) for a in args if a],
volatility=oper.get_volatility(schema),
returns=rtype,
text=f'SELECT ({op})::{qt(rtype)}',
)
self.pgops.add(dbops.CreateFunction(oper_func))
oper_func_name = common.qname(*oper_func.name)
else:
oper_func_name = None
params = oper.get_params(schema)
if (not params.has_polymorphic(schema) or
all(p.get_type(schema).is_array()
for p in params.objects(schema))):
self.pgops.add(dbops.CreateOperatorAlias(
name=common.get_backend_name(schema, oper, catenate=False),
args=args,
procedure=oper_func_name,
base_operator=('pg_catalog', pg_oper_name),
operator_args=from_args,
commutator=commutator,
negator=negator,
))
if oper_func_name is not None:
cexpr = self.get_dummy_func_call(
oper, oper_func_name, schema)
else:
cexpr = self.get_dummy_operator_call(
oper, pg_oper_name, from_args, schema)
check = self.sql_rval_consistency_check(oper, cexpr, schema)
self.pgops.add(check)
elif oper_language is ql_ast.Language.SQL and oper_code:
args = self.get_pg_operands(schema, oper)
oper_func = self.make_operator_function(oper, schema)
self.pgops.add(dbops.CreateFunction(oper_func))
oper_func_name = common.qname(*oper_func.name)
self.pgops.add(dbops.CreateOperator(
name=common.get_backend_name(schema, oper, catenate=False),
args=args,
procedure=oper_func_name,
))
cexpr = self.get_dummy_func_call(
oper, q(*oper_func.name), schema)
check = self.sql_rval_consistency_check(oper, cexpr, schema)
self.pgops.add(check)
elif oper.get_from_expr(schema):
# This operator is handled by the compiler and does not
# need explicit representation in the backend.
pass
else:
raise errors.QueryError(
f'cannot create operator {oper.get_shortname(schema)}: '
f'only "FROM SQL" and "FROM SQL OPERATOR" operators '
f'are currently supported',
context=self.source_context)
return schema
class RenameOperator(
OperatorCommand, RenameObject, adapts=s_opers.RenameOperator):
pass
class AlterOperator(
OperatorCommand, AlterObject, adapts=s_opers.AlterOperator):
pass
class DeleteOperator(
OperatorCommand, DeleteObject, adapts=s_opers.DeleteOperator):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
oper = schema.get(self.classname)
if oper.get_abstract(schema):
return super().apply(schema, context)
name = common.get_backend_name(schema, oper, catenate=False)
args = self.get_pg_operands(schema, oper)
schema = super().apply(schema, context)
if not oper.get_from_expr(orig_schema):
self.pgops.add(dbops.DropOperator(name=name, args=args))
return schema
class CastCommand:
def make_cast_function(self, cast: s_casts.Cast, schema):
name = common.get_backend_name(
schema, cast, catenate=False, aspect='function')
args = [(
'val',
types.pg_type_from_object(schema, cast.get_from_type(schema))
)]
returns = types.pg_type_from_object(schema, cast.get_to_type(schema))
return dbops.Function(
name=name,
args=args,
returns=returns,
text=cast.get_code(schema),
)
class CreateCast(CastCommand, CreateObject,
adapts=s_casts.CreateCast):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().apply(schema, context)
cast = self.scls
cast_language = cast.get_language(schema)
cast_code = cast.get_code(schema)
from_cast = cast.get_from_cast(schema)
from_expr = cast.get_from_expr(schema)
if cast_language is ql_ast.Language.SQL and cast_code:
cast_func = self.make_cast_function(cast, schema)
self.pgops.add(dbops.CreateFunction(cast_func))
elif from_cast is not None or from_expr is not None:
# This operator is handled by the compiler and does not
# need explicit representation in the backend.
pass
else:
raise errors.QueryError(
f'cannot create cast: '
f'only "FROM SQL" and "FROM SQL FUNCTION" casts '
f'are currently supported',
context=self.source_context)
return schema
class RenameCast(
CastCommand, RenameObject, adapts=s_casts.RenameCast):
pass
class AlterCast(
CastCommand, AlterObject, adapts=s_casts.AlterCast):
pass
class DeleteCast(
CastCommand, DeleteObject, adapts=s_casts.DeleteCast):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
cast = schema.get(self.classname)
cast_language = cast.get_language(schema)
cast_code = cast.get_code(schema)
schema = super().apply(schema, context)
if cast_language is ql_ast.Language.SQL and cast_code:
cast_func = self.make_cast_function(cast, schema)
self.pgops.add(dbops.DropFunction(
cast_func.name, cast_func.args))
return schema
class AnnotationCommand:
pass
class CreateAnnotation(
AnnotationCommand, CreateObject,
adapts=s_anno.CreateAnnotation):
op_priority = 1
class RenameAnnotation(
AnnotationCommand, RenameObject,
adapts=s_anno.RenameAnnotation):
pass
class AlterAnnotation(
AnnotationCommand, AlterObject, adapts=s_anno.AlterAnnotation):
pass
class DeleteAnnotation(
AnnotationCommand, DeleteObject,
adapts=s_anno.DeleteAnnotation):
pass
class AnnotationValueCommand(sd.ObjectCommand,
metaclass=CommandMeta):
op_priority = 4
class CreateAnnotationValue(
AnnotationValueCommand, CreateObject,
adapts=s_anno.CreateAnnotationValue):
pass
class AlterAnnotationValue(
AnnotationValueCommand, AlterObject,
adapts=s_anno.AlterAnnotationValue):
pass
class AlterAnnotationValueOwned(
AnnotationValueCommand, AlterObject,
adapts=s_anno.AlterAnnotationValueOwned):
pass
class RenameAnnotationValue(
AnnotationValueCommand, RenameObject,
adapts=s_anno.RenameAnnotationValue):
pass
class RebaseAnnotationValue(
AnnotationValueCommand,
RebaseObject,
adapts=s_anno.RebaseAnnotationValue,
):
pass
class DeleteAnnotationValue(
AnnotationValueCommand, DeleteObject,
adapts=s_anno.DeleteAnnotationValue):
pass
class ConstraintCommand(sd.ObjectCommand,
metaclass=CommandMeta):
op_priority = 3
@classmethod
def constraint_is_effective(cls, schema, constraint):
subject = constraint.get_subject(schema)
if subject is None:
return False
ancestors = [
a for a in constraint.get_ancestors(schema).objects(schema)
if not a.generic(schema)
]
if (
constraint.get_delegated(schema)
and all(ancestor.get_delegated(schema) for ancestor in ancestors)
):
return False
elif isinstance(subject, s_pointers.Pointer):
if subject.generic(schema):
return True
else:
return has_table(subject.get_source(schema), schema)
elif isinstance(subject, s_objtypes.ObjectType):
return has_table(subject, schema)
else:
return True
@classmethod
def create_constraint(
cls, constraint, schema, context, source_context=None):
if cls.constraint_is_effective(schema, constraint):
subject = constraint.get_subject(schema)
if subject is not None:
schemac_to_backendc = \
schemamech.ConstraintMech.\
schema_constraint_to_backend_constraint
bconstr = schemac_to_backendc(
subject, constraint, schema, context,
source_context)
return bconstr.create_ops()
else:
return dbops.CommandGroup()
@classmethod
def delete_constraint(
cls, constraint, schema, context, source_context=None):
op = dbops.CommandGroup()
if cls.constraint_is_effective(schema, constraint):
subject = constraint.get_subject(schema)
if subject is not None:
schemac_to_backendc = \
schemamech.ConstraintMech.\
schema_constraint_to_backend_constraint
bconstr = schemac_to_backendc(
subject, constraint, schema, context,
source_context)
op.add_command(bconstr.delete_ops())
return op
class CreateConstraint(
ConstraintCommand, CreateObject,
adapts=s_constr.CreateConstraint):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().apply(schema, context)
constraint = self.scls
op = self.create_constraint(
constraint, schema, context, self.source_context)
self.pgops.add(op)
return schema
class RenameConstraint(
ConstraintCommand, RenameObject,
adapts=s_constr.RenameConstraint):
pass
class AlterConstraintOwned(
ConstraintCommand,
AlterObject,
adapts=s_constr.AlterConstraintOwned,
):
pass
class AlterConstraint(
ConstraintCommand, AlterObject,
adapts=s_constr.AlterConstraint):
def apply(self, schema, context):
orig_schema = schema
schema = super().apply(schema, context)
constraint = self.scls
if self.metadata_only:
return schema
if (
not self.constraint_is_effective(schema, constraint)
and not self.constraint_is_effective(orig_schema, constraint)
):
return schema
subject = constraint.get_subject(schema)
subcommands = list(self.get_subcommands())
if (not subcommands or
isinstance(subcommands[0], s_constr.RenameConstraint)):
# This is a pure rename, so everything had been handled by
# RenameConstraint above.
return schema
if subject is not None:
schemac_to_backendc = \
schemamech.ConstraintMech.\
schema_constraint_to_backend_constraint
bconstr = schemac_to_backendc(
subject, constraint, schema, context, self.source_context)
orig_bconstr = schemac_to_backendc(
constraint.get_subject(orig_schema),
constraint,
orig_schema,
context,
self.source_context,
)
op = dbops.CommandGroup(priority=1)
if not self.constraint_is_effective(orig_schema, constraint):
op.add_command(bconstr.create_ops())
for child in constraint.children(schema):
orig_cbconstr = schemac_to_backendc(
child.get_subject(orig_schema),
child,
orig_schema,
context,
self.source_context,
)
cbconstr = schemac_to_backendc(
child.get_subject(schema),
child,
schema,
context,
self.source_context,
)
op.add_command(cbconstr.alter_ops(orig_cbconstr))
elif not self.constraint_is_effective(schema, constraint):
op.add_command(bconstr.alter_ops(orig_bconstr))
for child in constraint.children(schema):
orig_cbconstr = schemac_to_backendc(
child.get_subject(orig_schema),
child,
orig_schema,
context,
self.source_context,
)
cbconstr = schemac_to_backendc(
child.get_subject(schema),
child,
schema,
context,
self.source_context,
)
op.add_command(cbconstr.alter_ops(orig_cbconstr))
else:
op.add_command(bconstr.alter_ops(orig_bconstr))
self.pgops.add(op)
return schema
class DeleteConstraint(
ConstraintCommand, DeleteObject,
adapts=s_constr.DeleteConstraint):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
delta_root_ctx = context.top()
orig_schema = delta_root_ctx.original_schema
constraint = schema.get(self.classname)
op = self.delete_constraint(
constraint, orig_schema, context, self.source_context)
self.pgops.add(op)
schema = super().apply(schema, context)
return schema
class RebaseConstraint(
ConstraintCommand, RebaseObject,
adapts=s_constr.RebaseConstraint):
pass
class AliasCapableObjectMetaCommand(ObjectMetaCommand):
pass
class ScalarTypeMetaCommand(AliasCapableObjectMetaCommand):
def is_sequence(self, schema, scalar):
seq = schema.get('std::sequence', default=None)
return seq is not None and scalar.issubclass(schema, seq)
class CreateScalarType(ScalarTypeMetaCommand,
adapts=s_scalars.CreateScalarType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_scalars.CreateScalarType.apply(self, schema, context)
schema = ScalarTypeMetaCommand.apply(self, schema, context)
return schema
def _create_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super()._create_begin(schema, context)
scalar = self.scls
if scalar.get_abstract(schema):
return schema
new_domain_name = types.pg_type_from_scalar(schema, scalar)
if types.is_builtin_scalar(schema, scalar):
return schema
enum_values = scalar.get_enum_values(schema)
if enum_values:
new_enum_name = common.get_backend_name(
schema, scalar, catenate=False)
self.pgops.add(dbops.CreateEnum(
dbops.Enum(name=new_enum_name, values=enum_values)))
base = q(*new_enum_name)
else:
base = types.get_scalar_base(schema, scalar)
if self.is_sequence(schema, scalar):
seq_name = common.get_backend_name(
schema, scalar, catenate=False, aspect='sequence')
self.pgops.add(dbops.CreateSequence(name=seq_name))
domain = dbops.Domain(name=new_domain_name, base=base)
self.pgops.add(dbops.CreateDomain(domain=domain))
default = self.get_resolved_attribute_value(
'default',
schema=schema,
context=context,
)
if (default is not None
and not isinstance(default, s_expr.Expression)):
# We only care to support literal defaults here. Supporting
# defaults based on queries has no sense on the database
# level since the database forbids queries for DEFAULT and
# pre- calculating the value does not make sense either
# since the whole point of query defaults is for them to be
# dynamic.
self.pgops.add(
dbops.AlterDomainAlterDefault(
name=new_domain_name, default=default))
return schema
class RenameScalarType(ScalarTypeMetaCommand, RenameObject,
adapts=s_scalars.RenameScalarType):
pass
class RebaseScalarType(ScalarTypeMetaCommand,
adapts=s_scalars.RebaseScalarType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
# Actual rebase is taken care of in AlterScalarType
schema = ScalarTypeMetaCommand.apply(self, schema, context)
return s_scalars.RebaseScalarType.apply(self, schema, context)
class AlterScalarType(ScalarTypeMetaCommand, adapts=s_scalars.AlterScalarType):
def _get_problematic_refs(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
composite_only: bool,
) -> Optional[Tuple[
Tuple[so.Object, ...],
List[Tuple[s_props.Property, s_types.TypeShell]],
]]:
"""Find problematic references to this scalar type that need handled.
This is used to work around two irritating limitations of Postgres:
1. That elements of enum types may not be removed or reordered
2. That a constraint may not be added to a domain type if that
domain type appears in a *composite* type that is used in a
column somewhere.
We don't want to have these limitations, and we need to do a decent
amount of work to work around them.
1. Find all of the affected properties. For case 2, this is any
property whose type is a container type that contains this
scalar. (Possibly transitively.) For case 1, the container type
restriction is dropped.
2. Change the type of all offending properties to an equivalent type
that does not reference this scalar. This may require creating
new types. (See _undo_everything.)
3. Add the constraint.
4. Restore the type of all offending properties. If existing data
violates the new constraint, we will fail here. Delete any
temporarily created types. (See _redo_everything.)
Somewhat hackily, _undo_everything and _redo_everything
operate by creating new schema delta command objects, and
adapting and applying them. This is the most straightforward
way to perform the high-level operations needed here.
I've kept this code in pgsql/delta instead of trying to put in
schema/delta because it is pretty aggressively an irritating
pgsql implementation detail and because I didn't want it to
have to interact with ordering ever.
This function finds all of the relevant properties and returns
a list of them along with the appropriate replacement type.
In case 1, it also finds other referencing objects which need
to be deleted and then recreated.
"""
seen_props = set()
seen_other = set()
typ = self.scls
# Do a worklist driven search for properties that refer to this scalar
# through a collection type. We search backwards starting from
# referring collection types or from all refs, depending on
# composite_only.
scls_type = s_types.Collection if composite_only else None
wl = list(schema.get_referrers(typ, scls_type=scls_type))
while wl:
obj = wl.pop()
if isinstance(obj, s_props.Property):
seen_props.add(obj)
elif isinstance(obj, s_scalars.ScalarType):
pass
elif isinstance(obj, s_types.Collection):
wl.extend(schema.get_referrers(obj))
elif isinstance(obj, s_funcs.Parameter) and not composite_only:
wl.extend(schema.get_referrers(obj))
elif isinstance(obj, s_funcs.Function) and not composite_only:
wl.extend(schema.get_referrers(obj))
seen_other.add(obj)
elif isinstance(obj, s_constr.Constraint) and not composite_only:
seen_other.add(obj)
elif isinstance(obj, s_indexes.Index) and not composite_only:
seen_other.add(obj)
if not seen_props and not seen_other:
return None
props = []
if seen_props:
# Find a concrete ancestor to substitute in.
if typ.is_enum(schema):
ancestor = schema.get(sn.QualName('std', 'str'))
else:
for ancestor in typ.get_ancestors(schema).objects(schema):
if not ancestor.get_abstract(schema):
break
else:
raise AssertionError("can't find concrete base for scalar")
replacement_shell = ancestor.as_shell(schema)
props = [
(
prop,
s_utils.type_shell_substitute(
typ.get_name(schema),
replacement_shell,
prop.get_target(schema).as_shell(schema))
)
for prop in seen_props
]
other = sd.sort_by_cross_refs(schema, seen_other)
return other, props
def _undo_everything(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
other: Tuple[so.Object, ...],
props: List[Tuple[s_props.Property, s_types.TypeShell]],
) -> s_schema.Schema:
"""Rewrite the type of everything that uses this scalar dangerously.
See _get_problematic_refs above for details.
"""
# First we need to strip out any default value that might reference
# one of the functions we are going to delete.
cmd = sd.CommandGroup()
for prop, _ in props:
if prop.get_default(schema):
delta_alter, cmd_alter, alter_context = prop.init_delta_branch(
schema, context, cmdtype=sd.AlterObject)
cmd_alter.set_attribute_value('default', None)
cmd.add(delta_alter)
acmd = CommandMeta.adapt(cmd)
schema = acmd.apply(schema, context)
self.pgops.update(acmd.get_subcommands())
for obj in other:
if isinstance(obj, s_funcs.Function):
# Force function deletions at the SQL level without ever
# bothering to remove them from our schema.
fc = FunctionCommand()
variadic = obj.get_params(schema).find_variadic(schema)
self.pgops.add(
dbops.DropFunction(
name=fc.get_pgname(obj, schema),
args=fc.compile_args(obj, schema),
has_variadic=variadic is not None,
)
)
elif isinstance(obj, s_constr.Constraint):
self.pgops.add(
ConstraintCommand.delete_constraint(obj, schema, context))
elif isinstance(obj, s_indexes.Index):
self.pgops.add(
DeleteIndex.delete_index(
obj, schema, context, priority=0))
cmd = sd.DeltaRoot()
for prop, new_typ in props:
try:
cmd.add(new_typ.as_create_delta(schema))
except NotImplementedError:
pass
delta_alter, cmd_alter, alter_context = prop.init_delta_branch(
schema, context, cmdtype=sd.AlterObject)
cmd_alter.set_attribute_value('target', new_typ)
cmd_alter.set_attribute_value('default', None)
cmd.add(delta_alter)
cmd.apply(schema, context)
for sub in cmd.get_subcommands():
acmd = CommandMeta.adapt(sub)
schema = acmd.apply(schema, context)
self.pgops.add(acmd)
return schema
def _redo_everything(
self,
schema: s_schema.Schema,
orig_schema: s_schema.Schema,
context: sd.CommandContext,
other: Tuple[so.Object, ...],
props: List[Tuple[s_props.Property, s_types.TypeShell]],
) -> s_schema.Schema:
"""Restore the type of everything that uses this scalar dangerously.
See _get_problematic_refs above for details.
"""
for obj in reversed(other):
if isinstance(obj, s_funcs.Function):
# Super hackily recreate the functions
fc = CreateFunction(classname=obj.get_name(schema))
for f in ('language', 'params', 'return_type'):
fc.set_attribute_value(f, obj.get_field_value(schema, f))
self.pgops.update(fc.make_op(obj, schema, context))
elif isinstance(obj, s_constr.Constraint):
self.pgops.add(
ConstraintCommand.create_constraint(obj, schema, context))
elif isinstance(obj, s_indexes.Index):
self.pgops.add(
CreateIndex.create_index(obj, orig_schema, context))
cmd = sd.DeltaRoot()
for prop, new_typ in props:
delta_alter, cmd_alter, alter_context = prop.init_delta_branch(
schema, context, cmdtype=sd.AlterObject)
cmd_alter.set_attribute_value(
'target', prop.get_target(orig_schema))
cmd_alter.set_attribute_value(
'default', prop.get_default(orig_schema))
cmd.add_prerequisite(delta_alter)
rnew_typ = new_typ.resolve(schema)
if delete := rnew_typ.as_type_delete_if_dead(schema):
cmd.add(delete)
# do an apply of the schema-level command to force it to canonicalize,
# which prunes out duplicate deletions
cmd.apply(schema, context)
for sub in cmd.get_subcommands():
acmd = CommandMeta.adapt(sub)
schema = acmd.apply(schema, context)
self.pgops.add(acmd)
return schema
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_scalars.AlterScalarType.apply(self, schema, context)
new_scalar = self.scls
old_enum_values = new_scalar.get_enum_values(orig_schema)
new_enum_values = new_scalar.get_enum_values(schema)
# If values were deleted or reordered, we need to drop the enum
# and recreate it.
needs_recreate = (
old_enum_values != new_enum_values
and old_enum_values != new_enum_values[:len(old_enum_values)])
has_create_constraint = bool(
list(self.get_subcommands(type=s_constr.CreateConstraint)))
problematic_refs = None
if needs_recreate or has_create_constraint:
problematic_refs = self._get_problematic_refs(
schema, context, composite_only=not needs_recreate)
if problematic_refs:
other, props = problematic_refs
schema = self._undo_everything(schema, context, other, props)
schema = ScalarTypeMetaCommand.apply(self, schema, context)
if new_enum_values:
type_name = common.get_backend_name(
schema, new_scalar, catenate=False)
if needs_recreate:
self.pgops.add(
dbops.DropEnum(name=type_name))
self.pgops.add(dbops.CreateEnum(
dbops.Enum(name=type_name, values=new_enum_values)))
elif old_enum_values != new_enum_values:
old_idx = 0
old_enum_values = list(old_enum_values)
for v in new_enum_values:
if old_idx >= len(old_enum_values):
self.pgops.add(
dbops.AlterEnumAddValue(
type_name, v,
)
)
elif v != old_enum_values[old_idx]:
self.pgops.add(
dbops.AlterEnumAddValue(
type_name, v, before=old_enum_values[old_idx],
)
)
old_enum_values.insert(old_idx, v)
else:
old_idx += 1
if problematic_refs:
other, props = problematic_refs
schema = self._redo_everything(
schema, orig_schema, context, other, props)
default_delta = self.get_resolved_attribute_value(
'default',
schema=schema,
context=context,
)
if default_delta:
if (default_delta is None or
isinstance(default_delta, s_expr.Expression)):
new_default = None
else:
new_default = default_delta
domain_name = common.get_backend_name(
schema, new_scalar, catenate=False)
adad = dbops.AlterDomainAlterDefault(
name=domain_name, default=new_default)
self.pgops.add(adad)
return schema
class DeleteScalarType(ScalarTypeMetaCommand,
adapts=s_scalars.DeleteScalarType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_scalars.DeleteScalarType.apply(self, schema, context)
scalar = self.scls
schema = ScalarTypeMetaCommand.apply(self, schema, context)
link = None
if context:
link = context.get(s_links.LinkCommandContext)
ops = link.op.pgops if link else self.pgops
old_domain_name = common.get_backend_name(
orig_schema, scalar, catenate=False)
# Domain dropping gets low priority since other things may
# depend on it.
if scalar.is_enum(orig_schema):
old_enum_name = common.get_backend_name(
orig_schema, scalar, catenate=False)
cond = dbops.EnumExists(old_enum_name)
ops.add(
dbops.DropEnum(
name=old_enum_name, conditions=[cond], priority=3))
else:
cond = dbops.DomainExists(old_domain_name)
ops.add(
dbops.DropDomain(
name=old_domain_name, conditions=[cond], priority=3))
if self.is_sequence(orig_schema, scalar):
seq_name = common.get_backend_name(
orig_schema, scalar, catenate=False, aspect='sequence')
self.pgops.add(dbops.DropSequence(name=seq_name))
return schema
class CompositeObjectMetaCommand(ObjectMetaCommand):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.table_name = None
self._multicommands = {}
self.update_search_indexes = None
def _get_multicommand(
self, context, cmdtype, object_name, *, priority=0,
force_new=False, manual=False, cmdkwargs=None):
if cmdkwargs is None:
cmdkwargs = {}
key = (object_name, priority, frozenset(cmdkwargs.items()))
try:
typecommands = self._multicommands[cmdtype]
except KeyError:
typecommands = self._multicommands[cmdtype] = {}
commands = typecommands.get(key)
if commands is None or force_new or manual:
command = cmdtype(object_name, priority=priority, **cmdkwargs)
if not manual:
try:
commands = typecommands[key]
except KeyError:
commands = typecommands[key] = []
commands.append(command)
else:
command = commands[-1]
return command
def _attach_multicommand(self, context, cmdtype):
try:
typecommands = self._multicommands[cmdtype]
except KeyError:
return
else:
commands = list(
itertools.chain.from_iterable(typecommands.values()))
if commands:
commands = sorted(commands, key=lambda i: i.priority)
self.pgops.update(commands)
def get_alter_table(
self, schema, context, priority=0, force_new=False,
contained=False, manual=False, table_name=None):
tabname = table_name if table_name else self.table_name
if not tabname:
ctx = context.get(self.__class__)
assert ctx
tabname = common.get_backend_name(schema, ctx.scls, catenate=False)
if table_name is None:
self.table_name = tabname
return self._get_multicommand(
context, dbops.AlterTable, tabname, priority=priority,
force_new=force_new, manual=manual,
cmdkwargs={'contained': contained})
def attach_alter_table(self, context):
self._attach_multicommand(context, dbops.AlterTable)
@classmethod
def get_source_and_pointer_ctx(cls, schema, context):
if context:
objtype = context.get(s_objtypes.ObjectTypeCommandContext)
link = context.get(s_links.LinkCommandContext)
else:
objtype = link = None
if objtype:
source, pointer = objtype, link
elif link:
property = context.get(s_props.PropertyCommandContext)
source, pointer = link, property
else:
source = pointer = None
return source, pointer
def schedule_inhviews_update(
self,
schema,
context,
obj,
*,
update_ancestors: Optional[bool]=None,
update_descendants: Optional[bool]=None,
):
self.pgops.add(
self.drop_inhview(
schema, context, obj, drop_ancestors=update_ancestors)
)
root = context.get(sd.DeltaRootContext).op
updates = root.update_inhviews.view_updates
update = updates.get(obj)
if update is None:
update = updates[obj] = InheritanceViewUpdate()
if update_ancestors is not None:
update.update_ancestors = update_ancestors
if update_descendants is not None:
update.update_descendants = update_descendants
def schedule_inhview_deletion(
self,
schema,
context,
obj,
):
root = context.get(sd.DeltaRootContext).op
updates = root.update_inhviews.view_updates
updates.pop(obj, None)
deletions = root.update_inhviews.view_deletions
deletions[obj] = schema
def update_base_inhviews(self, schema, context, obj):
for base in obj.get_bases(schema).objects(schema):
if not context.is_deleting(base):
self.schedule_inhviews_update(
schema, context, base, update_ancestors=True)
def update_lineage_inhviews(self, schema, context, obj):
self.schedule_inhviews_update(
schema, context, obj, update_ancestors=True)
def update_base_inhviews_on_rebase(
self,
schema,
orig_schema,
context,
obj,
):
bases = set(obj.get_bases(schema).objects(schema))
orig_bases = set(obj.get_bases(orig_schema).objects(orig_schema))
for new_base in bases - orig_bases:
self.schedule_inhviews_update(
schema, context, new_base, update_ancestors=True)
for old_base in orig_bases - bases:
self.schedule_inhviews_update(
schema, context, old_base, update_ancestors=True)
def drop_inhview(
self,
schema,
context,
obj,
*,
drop_ancestors=False,
) -> dbops.CommandGroup:
cmd = dbops.CommandGroup()
objs = [obj]
if drop_ancestors:
objs.extend(obj.get_ancestors(schema).objects(schema))
for obj in objs:
if not has_table(obj, schema):
continue
inhview_name = common.get_backend_name(
schema, obj, catenate=False, aspect='inhview')
cmd.add_command(
dbops.DropView(
inhview_name,
conditions=[dbops.ViewExists(inhview_name)],
),
)
return cmd
class IndexCommand(sd.ObjectCommand, metaclass=CommandMeta):
pass
class CreateIndex(IndexCommand, CreateObject, adapts=s_indexes.CreateIndex):
@classmethod
def create_index(cls, index, schema, context):
subject = index.get_subject(schema)
if not isinstance(subject, s_pointers.Pointer):
singletons = [subject]
path_prefix_anchor = ql_ast.Subject().name
else:
singletons = []
path_prefix_anchor = None
index_expr = index.get_expr(schema)
ir = index_expr.irast
if ir is None:
index_expr = type(index_expr).compiled(
index_expr,
schema=schema,
options=qlcompiler.CompilerOptions(
modaliases=context.modaliases,
schema_object_context=cls.get_schema_metaclass(),
anchors={ql_ast.Subject().name: subject},
path_prefix_anchor=path_prefix_anchor,
singletons=singletons,
apply_query_rewrites=not context.stdmode,
),
)
ir = index_expr.irast
table_name = common.get_backend_name(
schema, subject, catenate=False)
sql_tree = compiler.compile_ir_to_sql_tree(
ir.expr, singleton_mode=True)
sql_expr = codegen.SQLSourceGenerator.to_source(sql_tree)
if isinstance(sql_tree, pg_ast.ImplicitRowExpr):
# Trim the parentheses to avoid PostgreSQL choking on double
# parentheses. since it expects only a single set around the column
# list.
sql_expr = sql_expr[1:-1]
module_name = index.get_name(schema).module
index_name = common.get_index_backend_name(
index.id, module_name, catenate=False)
pg_index = dbops.Index(
name=index_name[1], table_name=table_name, expr=sql_expr,
unique=False, inherit=True,
metadata={'schemaname': str(index.get_name(schema))})
return dbops.CreateIndex(pg_index, priority=3)
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = CreateObject.apply(self, schema, context)
index = self.scls
self.pgops.add(self.create_index(index, schema, context))
return schema
class RenameIndex(IndexCommand, RenameObject, adapts=s_indexes.RenameIndex):
pass
class AlterIndexOwned(
IndexCommand,
AlterObject,
adapts=s_indexes.AlterIndexOwned,
):
pass
class AlterIndex(IndexCommand, AlterObject, adapts=s_indexes.AlterIndex):
pass
class DeleteIndex(IndexCommand, DeleteObject, adapts=s_indexes.DeleteIndex):
@classmethod
def delete_index(cls, index, schema, context, priority=3):
subject = index.get_subject(schema)
table_name = common.get_backend_name(
schema, subject, catenate=False)
module_name = index.get_name(schema).module
orig_idx_name = common.get_index_backend_name(
index.id, module_name, catenate=False)
index = dbops.Index(
name=orig_idx_name[1], table_name=table_name, inherit=True)
index_exists = dbops.IndexExists(
(table_name[0], index.name_in_catalog))
conditions = (index_exists, ) if priority else ()
return dbops.DropIndex(
index, priority=priority, conditions=conditions)
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = DeleteObject.apply(self, schema, context)
index = self.scls
source = context.get(s_links.LinkCommandContext)
if not source:
source = context.get(s_objtypes.ObjectTypeCommandContext)
if not isinstance(source.op, sd.DeleteObject):
# We should not drop indexes when the host is being dropped since
# the indexes are dropped automatically in this case.
#
self.pgops.add(self.delete_index(index, orig_schema, context))
return schema
class RebaseIndex(
IndexCommand, RebaseObject,
adapts=s_indexes.RebaseIndex):
pass
class CreateUnionType(
MetaCommand,
adapts=s_types.CreateUnionType,
metaclass=CommandMeta,
):
def apply(self, schema, context):
schema = self.__class__.get_adaptee().apply(self, schema, context)
schema = ObjectMetaCommand.apply(self, schema, context)
return schema
class ObjectTypeMetaCommand(AliasCapableObjectMetaCommand,
CompositeObjectMetaCommand):
def schedule_endpoint_delete_action_update(self, obj, schema, context):
endpoint_delete_actions = context.get(
sd.DeltaRootContext).op.update_endpoint_delete_actions
changed_targets = endpoint_delete_actions.changed_targets
changed_targets.add((self, obj))
class CreateObjectType(ObjectTypeMetaCommand,
adapts=s_objtypes.CreateObjectType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_objtypes.CreateObjectType.apply(self, schema, context)
schema = ObjectTypeMetaCommand.apply(self, schema, context)
objtype = self.scls
if objtype.is_compound_type(schema) or objtype.get_is_derived(schema):
return schema
self.update_lineage_inhviews(schema, context, objtype)
self.attach_alter_table(context)
if self.update_search_indexes:
schema = self.update_search_indexes.apply(schema, context)
self.pgops.add(self.update_search_indexes)
self.schedule_endpoint_delete_action_update(self.scls, schema, context)
return schema
def _create_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super()._create_begin(schema, context)
objtype = self.scls
if objtype.is_compound_type(schema) or objtype.get_is_derived(schema):
return schema
new_table_name = common.get_backend_name(
schema, self.scls, catenate=False)
self.table_name = new_table_name
columns = []
token_col = dbops.Column(
name='__edb_token', type='uuid', required=False)
columns.append(token_col)
objtype_table = dbops.Table(name=new_table_name, columns=columns)
self.pgops.add(dbops.CreateTable(table=objtype_table))
self.pgops.add(dbops.Comment(
object=objtype_table,
text=str(objtype.get_verbosename(schema)),
))
return schema
class RenameObjectType(ObjectTypeMetaCommand, RenameObject,
adapts=s_objtypes.RenameObjectType):
pass
class RebaseObjectType(ObjectTypeMetaCommand,
adapts=s_objtypes.RebaseObjectType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_objtypes.RebaseObjectType.apply(self, schema, context)
result = self.scls
schema = ObjectTypeMetaCommand.apply(self, schema, context)
if has_table(result, schema):
self.update_base_inhviews_on_rebase(
schema, orig_schema, context, self.scls)
self.schedule_endpoint_delete_action_update(self.scls, schema, context)
return schema
class AlterObjectType(ObjectTypeMetaCommand,
adapts=s_objtypes.AlterObjectType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_objtypes.AlterObjectType.apply(
self, schema, context=context)
objtype = self.scls
self.table_name = common.get_backend_name(
schema, objtype, catenate=False)
schema = ObjectTypeMetaCommand.apply(self, schema, context)
if has_table(objtype, schema):
self.attach_alter_table(context)
if self.update_search_indexes:
schema = self.update_search_indexes.apply(schema, context)
self.pgops.add(self.update_search_indexes)
return schema
class DeleteObjectType(ObjectTypeMetaCommand,
adapts=s_objtypes.DeleteObjectType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
self.scls = objtype = schema.get(self.classname)
old_table_name = common.get_backend_name(
schema, objtype, catenate=False)
orig_schema = schema
schema = ObjectTypeMetaCommand.apply(self, schema, context)
schema = s_objtypes.DeleteObjectType.apply(self, schema, context)
if has_table(objtype, orig_schema):
self.attach_alter_table(context)
self.pgops.add(dbops.DropTable(name=old_table_name, priority=3))
self.update_base_inhviews(orig_schema, context, objtype)
self.schedule_inhview_deletion(orig_schema, context, objtype)
return schema
class SchedulePointerCardinalityUpdate(MetaCommand):
pass
class CancelPointerCardinalityUpdate(MetaCommand):
pass
class PointerMetaCommand(MetaCommand, sd.ObjectCommand,
metaclass=CommandMeta):
def get_host(self, schema, context):
if context:
link = context.get(s_links.LinkCommandContext)
if link and isinstance(self, s_props.PropertyCommand):
return link
objtype = context.get(s_objtypes.ObjectTypeCommandContext)
if objtype:
return objtype
def alter_host_table_column(self, ptr, schema, orig_schema, context):
old_target = ptr.get_target(orig_schema)
new_target = ptr.get_target(schema)
alter_table = context.get(
s_objtypes.ObjectTypeCommandContext).op.get_alter_table(
schema, context, priority=1)
ptr_stor_info = types.get_pointer_storage_info(ptr, schema=schema)
if isinstance(new_target, s_scalars.ScalarType):
target_type = types.pg_type_from_object(schema, new_target)
if isinstance(old_target, s_scalars.ScalarType):
alter_type = dbops.AlterTableAlterColumnType(
ptr_stor_info.column_name, common.qname(*target_type))
alter_table.add_operation(alter_type)
else:
cols = self.get_columns(ptr, schema)
ops = [dbops.AlterTableAddColumn(col) for col in cols]
for op in ops:
alter_table.add_operation(op)
else:
col = dbops.Column(
name=ptr_stor_info.column_name,
type=ptr_stor_info.column_type)
alter_table.add_operation(dbops.AlterTableDropColumn(col))
def get_pointer_default(self, ptr, schema, context):
if ptr.is_pure_computable(schema):
return None
default = self.get_resolved_attribute_value(
'default',
schema=schema,
context=context,
)
default_value = None
if default is not None:
if isinstance(default, s_expr.Expression):
default_value = schemamech.ptr_default_to_col_default(
schema, ptr, default)
else:
default_value = common.quote_literal(
str(default))
elif (tgt := ptr.get_target(schema)) and tgt.issubclass(
schema, schema.get('std::sequence')):
# TODO: replace this with a generic scalar type default
# using std::nextval().
seq_name = common.quote_literal(
common.get_backend_name(
schema, ptr.get_target(schema), aspect='sequence'))
default_value = f'nextval({seq_name}::regclass)'
return default_value
def alter_pointer_default(self, pointer, orig_schema, schema, context):
default_value = self.get_pointer_default(pointer, schema, context)
if default_value is None and not (
not orig_schema
or pointer.get_default(orig_schema)
or (tgt := pointer.get_target(orig_schema)) and tgt.issubclass(
orig_schema, schema.get('std::sequence'))
):
return
source_ctx = context.get_ancestor(
s_sources.SourceCommandContext, self)
alter_table = source_ctx.op.get_alter_table(
schema, context, contained=True, priority=0)
ptr_stor_info = types.get_pointer_storage_info(
pointer, schema=schema)
alter_table.add_operation(
dbops.AlterTableAlterColumnDefault(
column_name=ptr_stor_info.column_name,
default=default_value))
@classmethod
def get_columns(cls, pointer, schema, default=None, sets_required=False):
ptr_stor_info = types.get_pointer_storage_info(pointer, schema=schema)
col_type = list(ptr_stor_info.column_type)
if col_type[-1].endswith('[]'):
# Array
col_type[-1] = col_type[-1][:-2]
col_type = common.qname(*col_type) + '[]'
else:
col_type = common.qname(*col_type)
return [
dbops.Column(
name=ptr_stor_info.column_name,
type=col_type,
required=(
(
pointer.get_required(schema)
and not pointer.is_pure_computable(schema)
and not sets_required
) or (
ptr_stor_info.table_type == 'link'
and not pointer.is_link_property(schema)
)
),
default=default,
comment=str(pointer.get_shortname(schema)),
),
]
def create_table(self, ptr, schema, context, conditional=False):
c = self._create_table(ptr, schema, context, conditional=conditional)
self.pgops.add(c)
def provide_table(self, ptr, schema, context):
if has_table(ptr, schema):
self.create_table(ptr, schema, context, conditional=True)
self.update_lineage_inhviews(schema, context, ptr)
return True
else:
return False
def _alter_pointer_cardinality(
self,
schema: s_schema.Schema,
orig_schema: s_schema.Schema,
context: sd.CommandContext,
) -> None:
ptr = self.scls
ptr_stor_info = types.get_pointer_storage_info(ptr, schema=schema)
old_ptr_stor_info = types.get_pointer_storage_info(
ptr, schema=orig_schema)
ptr_table = ptr_stor_info.table_type == 'link'
is_lprop = ptr.is_link_property(schema)
is_multi = ptr_table and not is_lprop
is_required = ptr.get_required(schema)
ref_op = self.get_referrer_context_or_die(context).op
if is_multi:
if isinstance(self, sd.AlterObjectFragment):
source_op = self.get_parent_op(context)
else:
source_op = self
else:
source_op = ref_op
# Ignore cardinality changes resulting from the creation of
# an overloaded pointer as there is no data yet.
if isinstance(source_op, sd.CreateObject):
return
if self.conv_expr is not None:
_, conv_sql_expr, orig_rel_alias, _ = (
self._compile_conversion_expr(
pointer=ptr,
conv_expr=self.conv_expr,
schema=schema,
orig_schema=orig_schema,
context=context,
orig_rel_is_always_source=True,
target_as_singleton=False,
)
)
if is_lprop:
obj_id_ref = f'{qi(orig_rel_alias)}.source'
else:
obj_id_ref = f'{qi(orig_rel_alias)}.id'
if is_required and not is_multi:
conv_sql_expr = textwrap.dedent(f'''\
edgedb.raise_on_null(
({conv_sql_expr}),
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || {obj_id_ref} || '"}}',
"column" => {ql(str(ptr.id))}
)
''')
else:
orig_rel_alias = f'alias_{uuidgen.uuid1mc()}'
if not is_multi:
raise AssertionError(
'explicit conversion expression was expected'
' for multi->single transition'
)
else:
# single -> multi
conv_sql_expr = (
f'SELECT '
f'{qi(orig_rel_alias)}.{qi(old_ptr_stor_info.column_name)}'
)
tab = q(*ptr_stor_info.table_name)
target_col = ptr_stor_info.column_name
if not is_multi:
# Moving from pointer table to source table.
cols = self.get_columns(ptr, schema)
alter_table = source_op.get_alter_table(
schema, context, manual=True)
for col in cols:
cond = dbops.ColumnExists(
ptr_stor_info.table_name,
column_name=col.name,
)
op = (dbops.AlterTableAddColumn(col), None, (cond, ))
alter_table.add_operation(op)
self.pgops.add(alter_table)
update_qry = textwrap.dedent(f'''\
UPDATE {tab} AS {qi(orig_rel_alias)}
SET {qi(target_col)} = ({conv_sql_expr})
''')
self.pgops.add(dbops.Query(update_qry))
if not has_table(ptr, schema):
self.pgops.add(
self.drop_inhview(
orig_schema,
context,
source_op.scls,
drop_ancestors=True,
),
)
self.pgops.add(
self.drop_inhview(
orig_schema,
context,
ptr,
drop_ancestors=True
),
)
otabname = common.get_backend_name(
orig_schema, ptr, catenate=False)
condition = dbops.TableExists(name=otabname)
dt = dbops.DropTable(name=otabname, conditions=[condition])
self.pgops.add(dt)
self.schedule_inhviews_update(
schema,
context,
source_op.scls,
update_descendants=True,
)
else:
# Moving from source table to pointer table.
self.provide_table(ptr, schema, context)
source = ptr.get_source(orig_schema)
src_tab = q(*common.get_backend_name(
orig_schema,
source,
catenate=False,
))
update_qry = textwrap.dedent(f'''\
INSERT INTO {tab} (source, target)
(
SELECT
{qi(orig_rel_alias)}.id,
q.val
FROM
{src_tab} AS {qi(orig_rel_alias)},
LATERAL (
{conv_sql_expr}
) AS q(val)
WHERE
q.val IS NOT NULL
)
ON CONFLICT (source, target) DO NOTHING
''')
self.pgops.add(dbops.Query(update_qry))
check_qry = textwrap.dedent(f'''\
SELECT
edgedb.raise(
NULL::text,
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || id || '"}}',
"column" => {ql(str(ptr.id))}
)
FROM {src_tab}
WHERE id != ALL (SELECT source FROM {tab})
LIMIT 1
INTO _dummy_text;
''')
self.pgops.add(dbops.Query(check_qry))
self.pgops.add(
self.drop_inhview(
orig_schema,
context,
ref_op.scls,
drop_ancestors=True,
),
)
ref_op = self.get_referrer_context_or_die(context).op
alter_table = ref_op.get_alter_table(
schema, context, manual=True)
col = dbops.Column(
name=old_ptr_stor_info.column_name,
type=common.qname(*old_ptr_stor_info.column_type),
)
alter_table.add_operation(dbops.AlterTableDropColumn(col))
self.pgops.add(alter_table)
self.schedule_inhviews_update(
schema,
context,
ref_op.scls,
update_descendants=True,
update_ancestors=True,
)
def _alter_pointer_optionality(
self,
schema: s_schema.Schema,
orig_schema: s_schema.Schema,
context: sd.CommandContext,
*,
fill_expr: Optional[s_expr.Expression],
) -> None:
new_required = self.scls.get_required(schema)
ptr = self.scls
ptr_stor_info = types.get_pointer_storage_info(ptr, schema=schema)
ptr_table = ptr_stor_info.table_type == 'link'
is_lprop = ptr.is_link_property(schema)
is_multi = ptr_table and not is_lprop
is_required = ptr.get_required(schema)
source_ctx = self.get_referrer_context_or_die(context)
source_op = source_ctx.op
# Ignore optionality changes resulting from the creation of
# an overloaded pointer as there is no data yet.
if isinstance(source_op, sd.CreateObject):
return
ops = dbops.CommandGroup(priority=1)
# For multi pointers, if there is no fill expression, we
# synthesize a bogus one so that an error will trip if there
# are any objects with empty values.
if fill_expr is None and is_multi:
if (
ptr.get_cardinality(schema).is_multi()
and fill_expr is None
and (target := ptr.get_target(schema))
):
fill_ast = ql_ast.TypeCast(
expr=ql_ast.Set(elements=[]),
type=s_utils.typeref_to_ast(schema, target),
)
fill_expr = s_expr.Expression.from_ast(
qltree=fill_ast, schema=schema
)
if fill_expr is not None:
_, fill_sql_expr, orig_rel_alias, _ = (
self._compile_conversion_expr(
pointer=ptr,
conv_expr=fill_expr,
schema=schema,
orig_schema=orig_schema,
context=context,
orig_rel_is_always_source=True,
)
)
if is_lprop:
obj_id_ref = f'{qi(orig_rel_alias)}.source'
else:
obj_id_ref = f'{qi(orig_rel_alias)}.id'
if is_required and not is_multi:
fill_sql_expr = textwrap.dedent(f'''\
edgedb.raise_on_null(
({fill_sql_expr}),
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || {obj_id_ref} || '"}}',
"column" => {ql(str(ptr.id))}
)
''')
tab = q(*ptr_stor_info.table_name)
target_col = ptr_stor_info.column_name
if not is_multi:
# For singleton pointers we simply update the
# requisite column of the host source in every
# row where it is NULL.
update_qry = textwrap.dedent(f'''\
UPDATE {tab} AS {qi(orig_rel_alias)}
SET {qi(target_col)} = ({fill_sql_expr})
WHERE {qi(target_col)} IS NULL
''')
ops.add_command(dbops.Query(update_qry))
else:
# For multi pointers we have to INSERT the
# result of USING into the link table for
# every source object that has _no entries_
# in said link table.
source = ptr.get_source(orig_schema)
src_tab = q(*common.get_backend_name(
orig_schema,
source,
catenate=False,
))
update_qry = textwrap.dedent(f'''\
INSERT INTO {tab} (source, target)
(
SELECT
{qi(orig_rel_alias)}.id,
q.val
FROM
(
SELECT *
FROM {src_tab}
WHERE id != ALL (
SELECT source FROM {tab}
)
) AS {qi(orig_rel_alias)},
LATERAL (
{fill_sql_expr}
) AS q(val)
WHERE
q.val IS NOT NULL
)
''')
ops.add_command(dbops.Query(update_qry))
check_qry = textwrap.dedent(f'''\
SELECT
edgedb.raise(
NULL::text,
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || id || '"}}',
"column" => {ql(str(ptr.id))}
)
FROM {src_tab}
WHERE id != ALL (SELECT source FROM {tab})
LIMIT 1
INTO _dummy_text;
''')
ops.add_command(dbops.Query(check_qry))
if not ptr_table or is_lprop:
alter_table = source_op.get_alter_table(
schema,
context,
manual=True,
)
alter_table.add_operation(
dbops.AlterTableAlterColumnNull(
column_name=ptr_stor_info.column_name,
null=not new_required,
)
)
ops.add_command(alter_table)
self.pgops.add(ops)
def _drop_constraints(self, pointer, schema, context):
# We need to be able to drop all the constraints referencing a
# pointer before modifying its type, and then recreate them
# once the change is done.
# We look at all referrers to the pointer (and not just the
# constraints directly on the pointer) because we want to
# pick up object constraints that reference it as well.
for cnstr in schema.get_referrers(
pointer, scls_type=s_constr.Constraint):
self.pgops.add(
ConstraintCommand.delete_constraint(cnstr, schema, context))
def _recreate_constraints(self, pointer, schema, context):
for cnstr in schema.get_referrers(
pointer, scls_type=s_constr.Constraint):
self.pgops.add(
ConstraintCommand.create_constraint(cnstr, schema, context))
def _alter_pointer_type(self, pointer, schema, orig_schema, context):
old_ptr_stor_info = types.get_pointer_storage_info(
pointer, schema=orig_schema)
new_target = pointer.get_target(schema)
ptr_table = old_ptr_stor_info.table_type == 'link'
is_link = isinstance(pointer, s_links.Link)
is_lprop = pointer.is_link_property(schema)
is_multi = ptr_table and not is_lprop
is_required = pointer.get_required(schema)
changing_col_type = not is_link
source_ctx = self.get_referrer_context_or_die(context)
if is_multi:
if isinstance(self, sd.AlterObjectFragment):
source_op = self.get_parent_op(context)
else:
source_op = self
else:
source_op = source_ctx.op
# Ignore type narrowing resulting from a creation of a subtype
# as there isn't any data in the link yet.
if is_link and isinstance(source_ctx.op, sd.CreateObject):
return
new_target = pointer.get_target(schema)
orig_target = pointer.get_target(orig_schema)
new_type = types.pg_type_from_object(
schema, new_target, persistent_tuples=True)
source = source_op.scls
using_eql_expr = self.cast_expr
# For links, when the new type is a supertype of the old, no
# SQL-level changes are necessary, unless an explicit conversion
# expression was specified.
if (
is_link
and using_eql_expr is None
and orig_target.issubclass(orig_schema, new_target)
):
return
# We actually have work to do, so drop any constraints we have
self._drop_constraints(pointer, schema, context)
if using_eql_expr is None and not is_link:
# A lack of an explicit EdgeQL conversion expression means
# that the new type is assignment-castable from the old type
# in the EdgeDB schema. BUT, it would not necessarily be
# assignment-castable in Postgres, especially if the types are
# compound. Thus, generate an explicit cast expression.
pname = pointer.get_shortname(schema).name
using_eql_expr = s_expr.Expression.from_ast(
ql_ast.TypeCast(
expr=ql_ast.Path(
partial=True,
steps=[
ql_ast.Ptr(
ptr=ql_ast.ObjectRef(name=pname),
type='property' if is_lprop else None,
),
],
),
type=s_utils.typeref_to_ast(schema, new_target),
),
schema=orig_schema,
)
# There are two major possibilities about the USING claus:
# 1) trivial case, where the USING clause refers only to the
# columns of the source table, in which case we simply compile that
# into an equivalent SQL USING clause, and 2) complex case, which
# supports arbitrary queries, but requires a temporary column,
# which is populated with the transition query and then used as the
# source for the SQL USING clause.
using_eql_expr, using_sql_expr, orig_rel_alias, sql_expr_is_trivial = (
self._compile_conversion_expr(
pointer=pointer,
conv_expr=using_eql_expr,
schema=schema,
orig_schema=orig_schema,
context=context,
)
)
expr_is_nullable = using_eql_expr.cardinality.can_be_zero()
need_temp_col = (
(is_multi and expr_is_nullable)
or (changing_col_type and not sql_expr_is_trivial)
)
if changing_col_type:
self.pgops.add(source_op.drop_inhview(
schema,
context,
source,
drop_ancestors=True,
))
tab = q(*old_ptr_stor_info.table_name)
target_col = old_ptr_stor_info.column_name
aux_ptr_table = None
aux_ptr_col = None
if is_link:
old_lb_ptr_stor_info = types.get_pointer_storage_info(
pointer, link_bias=True, schema=orig_schema)
if (
old_lb_ptr_stor_info is not None
and old_lb_ptr_stor_info.table_type == 'link'
):
aux_ptr_table = old_lb_ptr_stor_info.table_name
aux_ptr_col = old_lb_ptr_stor_info.column_name
if not sql_expr_is_trivial:
if need_temp_col:
alter_table = source_op.get_alter_table(
schema, context, priority=0, force_new=True, manual=True)
temp_column = dbops.Column(
name=f'??{pointer.id}_{common.get_unique_random_name()}',
type=qt(new_type),
)
alter_table.add_operation(
dbops.AlterTableAddColumn(temp_column))
self.pgops.add(alter_table)
target_col = temp_column.name
if is_multi:
obj_id_ref = f'{qi(orig_rel_alias)}.source'
else:
obj_id_ref = f'{qi(orig_rel_alias)}.id'
if is_required and not is_multi:
using_sql_expr = textwrap.dedent(f'''\
edgedb.raise_on_null(
({using_sql_expr}),
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || {obj_id_ref} || '"}}',
"column" => {ql(str(pointer.id))}
)
''')
update_qry = textwrap.dedent(f'''\
UPDATE {tab} AS {qi(orig_rel_alias)}
SET {qi(target_col)} = ({using_sql_expr})
''')
self.pgops.add(dbops.Query(update_qry))
actual_using_expr = qi(target_col)
else:
actual_using_expr = using_sql_expr
if changing_col_type or need_temp_col:
alter_table = source_op.get_alter_table(
schema, context, priority=0, force_new=True, manual=True)
if is_multi:
# Remove all rows where the conversion expression produced NULLs.
col = qi(target_col)
if pointer.get_required(schema):
clean_nulls = dbops.Query(textwrap.dedent(f'''\
WITH d AS (
DELETE FROM {tab} WHERE {col} IS NULL RETURNING source
)
SELECT
edgedb.raise(
NULL::text,
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || l.source || '"}}',
"column" => {ql(str(pointer.id))}
)
FROM
{tab} AS l
WHERE
l.source IN (SELECT source FROM d)
AND True = ALL (
SELECT {col} IS NULL
FROM {tab} AS l2
WHERE l2.source = l.source
)
LIMIT
1
INTO _dummy_text;
'''))
else:
clean_nulls = dbops.Query(textwrap.dedent(f'''\
DELETE FROM {tab} WHERE {col} IS NULL
'''))
self.pgops.add(clean_nulls)
elif aux_ptr_table is not None:
# SINGLE links with link properties are represented in
# _two_ tables (the host type table and a link table with
# properties), and we must update both.
actual_col = qi(old_ptr_stor_info.column_name)
if expr_is_nullable and not is_required:
cleanup_qry = textwrap.dedent(f'''\
DELETE FROM {q(*aux_ptr_table)} AS aux
USING {tab} AS main
WHERE
main.id = aux.source
AND {actual_col} IS NULL
''')
self.pgops.add(dbops.Query(cleanup_qry))
update_qry = textwrap.dedent(f'''\
UPDATE {q(*aux_ptr_table)} AS aux
SET {qi(aux_ptr_col)} = main.{actual_col}
FROM {tab} AS main
WHERE
main.id = aux.source
''')
self.pgops.add(dbops.Query(update_qry))
if changing_col_type:
alter_type = dbops.AlterTableAlterColumnType(
old_ptr_stor_info.column_name,
common.quote_type(new_type),
using_expr=actual_using_expr,
)
alter_table.add_operation(alter_type)
elif need_temp_col:
move_data = dbops.Query(textwrap.dedent(f'''\
UPDATE
{q(*old_ptr_stor_info.table_name)} AS {qi(orig_rel_alias)}
SET
{qi(old_ptr_stor_info.column_name)} = ({qi(target_col)})
'''))
self.pgops.add(move_data)
if need_temp_col:
alter_table.add_operation(dbops.AlterTableDropColumn(temp_column))
if changing_col_type or need_temp_col:
self.pgops.add(alter_table)
self._recreate_constraints(pointer, schema, context)
if changing_col_type:
self.schedule_inhviews_update(
schema,
context,
source,
update_descendants=True,
update_ancestors=True,
)
def _compile_conversion_expr(
self,
*,
pointer: s_pointers.Pointer,
conv_expr: s_expr.Expression,
schema: s_schema.Schema,
orig_schema: s_schema.Schema,
context: sd.CommandContext,
orig_rel_is_always_source: bool = False,
target_as_singleton: bool = True,
) -> Tuple[
s_expr.Expression, # Possibly-amended EdgeQL conversion expression
str, # SQL text
str, # original relation alias
bool, # whether SQL expression is trivial
]:
old_ptr_stor_info = types.get_pointer_storage_info(
pointer, schema=orig_schema)
ptr_table = old_ptr_stor_info.table_type == 'link'
is_link = isinstance(pointer, s_links.Link)
is_lprop = pointer.is_link_property(schema)
is_multi = ptr_table and not is_lprop
is_required = pointer.get_required(schema)
new_target = pointer.get_target(schema)
expr_is_trivial = False
if conv_expr.irast is not None:
ir = conv_expr.irast
else:
conv_expr = self._compile_expr(
orig_schema,
context,
conv_expr,
target_as_singleton=target_as_singleton,
)
ir = conv_expr.irast
assert ir is not None
if ir.stype != new_target and not is_link:
# The result of an EdgeQL USING clause does not match
# the target type exactly, but is castable. Like in the
# case of an empty USING clause, we still have to make
# ane explicit EdgeQL cast rather than rely on Postgres
# casting.
conv_expr = self._compile_expr(
orig_schema,
context,
s_expr.Expression.from_ast(
ql_ast.TypeCast(
expr=conv_expr.qlast,
type=s_utils.typeref_to_ast(schema, new_target),
),
schema=orig_schema,
),
target_as_singleton=target_as_singleton,
)
ir = conv_expr.irast
expr_is_nullable = conv_expr.cardinality.can_be_zero()
refs = irutils.get_longest_paths(ir.expr)
ref_tables = schemamech.get_ref_storage_info(ir.schema, refs)
local_table_only = all(
t == old_ptr_stor_info.table_name
for t in ref_tables
)
# TODO: implement IR complexity inference
can_translate_to_sql_value_expr = False
expr_is_trivial = (
# Only allow trivial USING if we can compile the
# EdgeQL expression into a trivial SQL value expression.
can_translate_to_sql_value_expr
# No link expr is trivially translatable into
# a USING SQL clause.
and not is_link
# SQL SET TYPE cannot contain references
# outside of the local table.
and local_table_only
# Changes to a multi-pointer might involve contraction of
# the overall cardinality, i.e. the deletion some rows.
and not is_multi
# If the property is required, and the USING expression
# was not proven by the compiler to not return ZERO, we
# must inject an explicit NULL guard, as the SQL null
# violation error is very nondescript in the context of
# a table rewrite, making it hard to pinpoint the failing
# object.
and (not is_required or not expr_is_nullable)
)
alias = f'alias_{uuidgen.uuid1mc()}'
if not expr_is_trivial:
# Non-trivial conversion expression means that we
# are compiling a full-blown EdgeQL statement as
# opposed to compiling a scalar fragment in trivial
# expression mode.
external_rvars = {}
if is_lprop:
tgt_path_id = irpathid.PathId.from_pointer(
orig_schema,
pointer,
).src_path()
else:
tgt_path_id = irpathid.PathId.from_pointer(
orig_schema,
pointer,
)
ptr_path_id = tgt_path_id.ptr_path()
src_path_id = ptr_path_id.src_path()
if ptr_table and not orig_rel_is_always_source:
rvar = compiler.new_external_rvar(
rel_name=(alias,),
path_id=ptr_path_id,
outputs={
(src_path_id, ('identity',)): 'source',
},
)
external_rvars[ptr_path_id, 'source'] = rvar
external_rvars[ptr_path_id, 'value'] = rvar
external_rvars[src_path_id, 'identity'] = rvar
if local_table_only and not is_lprop:
external_rvars[src_path_id, 'source'] = rvar
external_rvars[src_path_id, 'value'] = rvar
elif is_lprop:
external_rvars[tgt_path_id, 'identity'] = rvar
external_rvars[tgt_path_id, 'value'] = rvar
else:
src_rvar = compiler.new_external_rvar(
rel_name=(alias,),
path_id=src_path_id,
outputs={},
)
external_rvars[src_path_id, 'identity'] = src_rvar
external_rvars[src_path_id, 'value'] = src_rvar
external_rvars[src_path_id, 'source'] = src_rvar
else:
external_rvars = None
sql_tree = compiler.compile_ir_to_sql_tree(
ir,
output_format=compiler.OutputFormat.NATIVE_INTERNAL,
singleton_mode=expr_is_trivial,
external_rvars=external_rvars,
)
sql_text = codegen.generate_source(sql_tree)
return (conv_expr, sql_text, alias, expr_is_trivial)
class LinkMetaCommand(CompositeObjectMetaCommand, PointerMetaCommand):
@classmethod
def _create_table(
cls, link, schema, context, conditional=False, create_bases=True,
create_children=True):
new_table_name = common.get_backend_name(schema, link, catenate=False)
create_c = dbops.CommandGroup()
constraints = []
columns = []
src_col = 'source'
tgt_col = 'target'
columns.append(
dbops.Column(
name=src_col, type='uuid', required=True))
columns.append(
dbops.Column(
name=tgt_col, type='uuid', required=True))
constraints.append(
dbops.UniqueConstraint(
table_name=new_table_name,
columns=[src_col, tgt_col]))
if not link.generic(schema) and link.scalar():
tgt_prop = link.getptr(schema, 'target')
tgt_ptr = types.get_pointer_storage_info(
tgt_prop, schema=schema)
columns.append(
dbops.Column(
name=tgt_ptr.column_name,
type=common.qname(*tgt_ptr.column_type)))
table = dbops.Table(name=new_table_name)
table.add_columns(columns)
table.constraints = constraints
ct = dbops.CreateTable(table=table)
index_name = common.edgedb_name_to_pg_name(
str(link.get_name(schema)) + 'target_id_default_idx')
index = dbops.Index(index_name, new_table_name, unique=False)
index.add_columns([tgt_col])
ci = dbops.CreateIndex(index)
if conditional:
c = dbops.CommandGroup(
neg_conditions=[dbops.TableExists(new_table_name)])
else:
c = dbops.CommandGroup()
c.add_command(ct)
c.add_command(ci)
c.add_command(dbops.Comment(table, str(link.get_name(schema))))
create_c.add_command(c)
if create_children:
for l_descendant in link.descendants(schema):
if has_table(l_descendant, schema):
lc = LinkMetaCommand._create_table(
l_descendant, schema, context, conditional=True,
create_bases=False, create_children=False)
create_c.add_command(lc)
return create_c
def schedule_endpoint_delete_action_update(
self, link, orig_schema, schema, context):
endpoint_delete_actions = context.get(
sd.DeltaRootContext).op.update_endpoint_delete_actions
link_ops = endpoint_delete_actions.link_ops
if isinstance(self, sd.DeleteObject):
for i, (_, ex_link, _) in enumerate(link_ops):
if ex_link == link:
link_ops.pop(i)
break
link_ops.append((self, link, orig_schema))
class CreateLink(LinkMetaCommand, adapts=s_links.CreateLink):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
# Need to do this early, since potential table alters triggered by
# sub-commands need this.
orig_schema = schema
schema = s_links.CreateLink.apply(self, schema, context)
link = self.scls
self.table_name = common.get_backend_name(schema, link, catenate=False)
schema = LinkMetaCommand.apply(self, schema, context)
self.provide_table(link, schema, context)
objtype = context.get(s_objtypes.ObjectTypeCommandContext)
extra_ops = []
source = link.get_source(schema)
if source is not None:
source_is_view = (
source.is_view(schema)
or source.is_compound_type(schema)
or source.get_is_derived(schema)
)
else:
source_is_view = None
if source is not None and not source_is_view:
ptr_stor_info = types.get_pointer_storage_info(
link, resolve_type=False, schema=schema)
sets_required = bool(
self.get_subcommands(
type=s_pointers.AlterPointerLowerCardinality))
if ptr_stor_info.table_type == 'ObjectType':
default_value = self.get_pointer_default(link, schema, context)
cols = self.get_columns(
link, schema, default_value, sets_required)
table_name = common.get_backend_name(
schema, objtype.scls, catenate=False)
objtype_alter_table = objtype.op.get_alter_table(
schema, context)
for col in cols:
cmd = dbops.AlterTableAddColumn(col)
objtype_alter_table.add_operation(cmd)
if col.name == '__type__':
constr_name = common.edgedb_name_to_pg_name(
str(objtype.op.classname) + '.class_check')
constr_expr = dbops.Query(textwrap.dedent(f"""\
SELECT
'"__type__" = ' ||
quote_literal({ql(str(objtype.scls.id))})
"""), type='text')
cid_constraint = dbops.CheckConstraint(
self.table_name,
constr_name,
constr_expr,
inherit=False,
)
objtype_alter_table.add_operation(
dbops.AlterTableAddConstraint(cid_constraint),
)
if default_value is not None:
self.alter_pointer_default(link, None, schema, context)
index_name = common.get_backend_name(
schema, link, catenate=False, aspect='index'
)[1]
pg_index = dbops.Index(
name=index_name, table_name=table_name,
unique=False, columns=[c.name for c in cols],
inherit=True)
ci = dbops.CreateIndex(pg_index, priority=3)
extra_ops.append(ci)
self.update_lineage_inhviews(schema, context, link)
self.schedule_inhviews_update(
schema,
context,
source,
update_descendants=True,
)
# If we're creating a required multi pointer without a SET
# REQUIRED USING inside, run the alter_pointer_optionality
# path to produce an error if there is existing data.
if (
link.get_cardinality(schema).is_multi()
and link.get_required(schema)
and not sets_required
):
self._alter_pointer_optionality(
schema, schema, context, fill_expr=None)
objtype = context.get(s_objtypes.ObjectTypeCommandContext)
self.attach_alter_table(context)
self.pgops.update(extra_ops)
if (source is not None and not source_is_view
and not link.is_pure_computable(schema)):
self.schedule_endpoint_delete_action_update(
link, orig_schema, schema, context)
return schema
class RenameLink(LinkMetaCommand, RenameObject, adapts=s_links.RenameLink):
pass
class RebaseLink(LinkMetaCommand, adapts=s_links.RebaseLink):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_links.RebaseLink.apply(self, schema, context)
schema = LinkMetaCommand.apply(self, schema, context)
link_ctx = context.get(s_links.LinkCommandContext)
source = link_ctx.scls
if has_table(source, schema):
self.update_base_inhviews_on_rebase(
schema, orig_schema, context, source)
if not source.is_pure_computable(schema):
self.schedule_endpoint_delete_action_update(
source, orig_schema, schema, context)
return schema
class SetLinkType(LinkMetaCommand, adapts=s_links.SetLinkType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_links.SetLinkType.apply(self, schema, context)
schema = LinkMetaCommand.apply(self, schema, context)
pop = self.get_parent_op(context)
orig_type = self.scls.get_target(orig_schema)
new_type = self.scls.get_target(schema)
if (
not pop.maybe_get_object_aux_data('from_alias')
and (orig_type != new_type or self.cast_expr is not None)
):
self._alter_pointer_type(self.scls, schema, orig_schema, context)
return schema
class AlterLinkUpperCardinality(
LinkMetaCommand,
adapts=s_links.AlterLinkUpperCardinality,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
pop = self.get_parent_op(context)
orig_schema = schema
schema = s_links.AlterLinkUpperCardinality.apply(self, schema, context)
schema = LinkMetaCommand.apply(self, schema, context)
if (
not self.scls.generic(schema)
and not self.scls.is_pure_computable(schema)
and not pop.maybe_get_object_aux_data('from_alias')
):
orig_card = self.scls.get_cardinality(orig_schema)
new_card = self.scls.get_cardinality(schema)
if orig_card != new_card:
self._alter_pointer_cardinality(schema, orig_schema, context)
return schema
class AlterLinkLowerCardinality(
LinkMetaCommand,
adapts=s_links.AlterLinkLowerCardinality,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
pop = self.get_parent_op(context)
orig_schema = schema
schema = s_links.AlterLinkLowerCardinality.apply(
self, schema, context)
schema = LinkMetaCommand.apply(self, schema, context)
if not self.scls.generic(schema):
orig_required = self.scls.get_required(orig_schema)
new_required = self.scls.get_required(schema)
if (
not pop.maybe_get_object_aux_data('from_alias')
and not self.scls.is_endpoint_pointer(schema)
and orig_required != new_required
):
self._alter_pointer_optionality(
schema, orig_schema, context, fill_expr=self.fill_expr)
return schema
class AlterLinkOwned(
LinkMetaCommand,
AlterObject,
adapts=s_links.AlterLinkOwned,
):
pass
class AlterLink(LinkMetaCommand, adapts=s_links.AlterLink):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_links.AlterLink.apply(self, schema, context)
link = self.scls
schema = LinkMetaCommand.apply(self, schema, context)
with context(s_links.LinkCommandContext(schema, self, link)) as ctx:
ctx.original_schema = orig_schema
self.provide_table(link, schema, context)
self.attach_alter_table(context)
otd = self.get_resolved_attribute_value(
'on_target_delete',
schema=schema,
context=context,
)
card = self.get_resolved_attribute_value(
'cardinality',
schema=schema,
context=context,
)
if (otd or card) and not link.is_pure_computable(schema):
self.schedule_endpoint_delete_action_update(
link, orig_schema, schema, context)
return schema
class DeleteLink(LinkMetaCommand, adapts=s_links.DeleteLink):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
delta_root_ctx = context.top()
orig_schema = delta_root_ctx.original_schema
link = schema.get(self.classname)
old_table_name = common.get_backend_name(
schema, link, catenate=False)
schema = LinkMetaCommand.apply(self, schema, context)
schema = s_links.DeleteLink.apply(self, schema, context)
if (
not link.generic(orig_schema)
and has_table(link.get_source(orig_schema), orig_schema)
):
link_name = link.get_shortname(orig_schema).name
ptr_stor_info = types.get_pointer_storage_info(
link, schema=orig_schema)
objtype = context.get(s_objtypes.ObjectTypeCommandContext)
if (not isinstance(objtype.op, s_objtypes.DeleteObjectType)
and ptr_stor_info.table_type == 'ObjectType'
and objtype.scls.maybe_get_ptr(schema, link_name) is None):
# Only drop the column if the parent is not being dropped
# and the link was not reinherited in the same delta.
if objtype.scls.maybe_get_ptr(schema, link_name) is None:
# This must be a separate so that objects depending
# on this column can be dropped correctly.
#
alter_table = objtype.op.get_alter_table(
schema, context, manual=True, priority=2)
col = dbops.Column(
name=ptr_stor_info.column_name,
type=common.qname(*ptr_stor_info.column_type))
col = dbops.AlterTableDropColumn(col)
alter_table.add_operation(col)
self.pgops.add(alter_table)
self.schedule_inhviews_update(
schema,
context,
objtype.scls,
update_descendants=True,
)
self.schedule_endpoint_delete_action_update(
link, orig_schema, schema, context)
self.attach_alter_table(context)
self.pgops.add(
self.drop_inhview(orig_schema, context, link, drop_ancestors=True)
)
self.pgops.add(
dbops.DropTable(
name=old_table_name,
priority=1,
conditions=[dbops.TableExists(old_table_name)],
)
)
self.update_base_inhviews(orig_schema, context, link)
self.schedule_inhview_deletion(orig_schema, context, link)
return schema
class PropertyMetaCommand(CompositeObjectMetaCommand, PointerMetaCommand):
@classmethod
def _create_table(
cls, prop, schema, context, conditional=False, create_bases=True,
create_children=True):
new_table_name = common.get_backend_name(schema, prop, catenate=False)
create_c = dbops.CommandGroup()
constraints = []
columns = []
src_col = common.edgedb_name_to_pg_name('source')
columns.append(
dbops.Column(
name=src_col, type='uuid', required=True))
id = sn.QualName(
module=prop.get_name(schema).module, name=str(prop.id))
index_name = common.convert_name(id, 'idx0', catenate=True)
pg_index = dbops.Index(
name=index_name, table_name=new_table_name,
unique=False, columns=[src_col])
ci = dbops.CreateIndex(pg_index)
if not prop.generic(schema):
tgt_cols = cls.get_columns(prop, schema, None)
columns.extend(tgt_cols)
constraints.append(
dbops.UniqueConstraint(
table_name=new_table_name,
columns=[src_col] + [tgt_col.name for tgt_col in tgt_cols]
)
)
table = dbops.Table(name=new_table_name)
table.add_columns(columns)
table.constraints = constraints
ct = dbops.CreateTable(table=table)
if conditional:
c = dbops.CommandGroup(
neg_conditions=[dbops.TableExists(new_table_name)])
else:
c = dbops.CommandGroup()
c.add_command(ct)
c.add_command(ci)
c.add_command(dbops.Comment(table, str(prop.get_name(schema))))
create_c.add_command(c)
if create_children:
for p_descendant in prop.descendants(schema):
if has_table(p_descendant, schema):
pc = PropertyMetaCommand._create_table(
p_descendant, schema, context, conditional=True,
create_bases=False, create_children=False)
create_c.add_command(pc)
return create_c
class CreateProperty(PropertyMetaCommand, adapts=s_props.CreateProperty):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_props.CreateProperty.apply(self, schema, context)
prop = self.scls
propname = prop.get_shortname(schema).name
schema = PropertyMetaCommand.apply(self, schema, context)
src = context.get(s_sources.SourceCommandContext)
self.provide_table(prop, schema, context)
if src and has_table(src.scls, schema):
if isinstance(src.scls, s_links.Link):
src.op.provide_table(src.scls, schema, context)
ptr_stor_info = types.get_pointer_storage_info(
prop, resolve_type=False, schema=schema)
sets_required = bool(
self.get_subcommands(
type=s_pointers.AlterPointerLowerCardinality))
if (
(
not isinstance(src.scls, s_objtypes.ObjectType)
or ptr_stor_info.table_type == 'ObjectType'
)
and (
not isinstance(src.scls, s_links.Link)
or propname not in {'source', 'target'}
)
):
alter_table = src.op.get_alter_table(
schema,
context,
force_new=True,
manual=True,
)
default_value = self.get_pointer_default(prop, schema, context)
cols = self.get_columns(
prop, schema, default_value, sets_required)
for col in cols:
cmd = dbops.AlterTableAddColumn(col)
alter_table.add_operation(cmd)
if col.name == 'id':
constraint = dbops.PrimaryKey(
table_name=alter_table.name,
columns=[col.name],
)
alter_table.add_operation(
dbops.AlterTableAddConstraint(constraint),
)
self.pgops.add(alter_table)
self.update_lineage_inhviews(schema, context, prop)
if has_table(src.op.scls, schema):
self.schedule_inhviews_update(
schema,
context,
src.op.scls,
update_descendants=True,
)
# If we're creating a required multi pointer without a SET
# REQUIRED USING inside, run the alter_pointer_optionality
# path to produce an error if there is existing data.
if (
prop.get_cardinality(schema).is_multi()
and prop.get_required(schema)
and not sets_required
):
self._alter_pointer_optionality(
schema, schema, context, fill_expr=None)
return schema
class RenameProperty(
PropertyMetaCommand, RenameObject, adapts=s_props.RenameProperty):
pass
class RebaseProperty(
PropertyMetaCommand, adapts=s_props.RebaseProperty):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_props.RebaseProperty.apply(self, schema, context)
schema = PropertyMetaCommand.apply(self, schema, context)
prop_ctx = context.get(s_props.PropertyCommandContext)
source = prop_ctx.scls
if has_table(source, schema):
self.update_base_inhviews_on_rebase(
schema, orig_schema, context, source)
return schema
class SetPropertyType(
PropertyMetaCommand, adapts=s_props.SetPropertyType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
pop = self.get_parent_op(context)
orig_schema = schema
schema = s_props.SetPropertyType.apply(self, schema, context)
schema = PropertyMetaCommand.apply(self, schema, context)
orig_type = self.scls.get_target(orig_schema)
new_type = self.scls.get_target(schema)
if (
not pop.maybe_get_object_aux_data('from_alias')
and not self.scls.is_endpoint_pointer(schema)
and (orig_type != new_type or self.cast_expr is not None)
):
self._alter_pointer_type(self.scls, schema, orig_schema, context)
return schema
class AlterPropertyUpperCardinality(
PropertyMetaCommand,
adapts=s_props.AlterPropertyUpperCardinality,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
pop = self.get_parent_op(context)
orig_schema = schema
schema = s_props.AlterPropertyUpperCardinality.apply(
self, schema, context)
schema = PropertyMetaCommand.apply(self, schema, context)
if (
not self.scls.generic(schema)
and not self.scls.is_pure_computable(schema)
and not self.scls.is_endpoint_pointer(schema)
and not pop.maybe_get_object_aux_data('from_alias')
):
orig_card = self.scls.get_cardinality(orig_schema)
new_card = self.scls.get_cardinality(schema)
if orig_card != new_card:
self._alter_pointer_cardinality(schema, orig_schema, context)
return schema
class AlterPropertyLowerCardinality(
PropertyMetaCommand,
adapts=s_props.AlterPropertyLowerCardinality,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
pop = self.get_parent_op(context)
orig_schema = schema
schema = s_props.AlterPropertyLowerCardinality.apply(
self, schema, context)
schema = PropertyMetaCommand.apply(self, schema, context)
if not self.scls.generic(schema):
orig_required = self.scls.get_required(orig_schema)
new_required = self.scls.get_required(schema)
if (
not pop.maybe_get_object_aux_data('from_alias')
and not self.scls.is_endpoint_pointer(schema)
and orig_required != new_required
):
self._alter_pointer_optionality(
schema, orig_schema, context, fill_expr=self.fill_expr)
return schema
class AlterPropertyOwned(
PropertyMetaCommand,
AlterObject,
adapts=s_props.AlterPropertyOwned,
):
pass
class AlterProperty(
PropertyMetaCommand, adapts=s_props.AlterProperty):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_props.AlterProperty.apply(self, schema, context)
prop = self.scls
schema = PropertyMetaCommand.apply(self, schema, context)
if self.metadata_only:
return schema
if prop.is_pure_computable(orig_schema):
return schema
with context(
s_props.PropertyCommandContext(schema, self, prop)) as ctx:
ctx.original_schema = orig_schema
self.provide_table(prop, schema, context)
self.alter_pointer_default(prop, orig_schema, schema, context)
return schema
class DeleteProperty(
PropertyMetaCommand, adapts=s_props.DeleteProperty):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
prop = schema.get(self.classname)
schema = s_props.DeleteProperty.apply(self, schema, context)
schema = PropertyMetaCommand.apply(self, schema, context)
source_ctx = context.get(s_sources.SourceCommandContext)
if source_ctx is not None:
source = source_ctx.scls
source_op = source_ctx.op
else:
source = source_op = None
if (source
and not source.maybe_get_ptr(
schema, prop.get_shortname(orig_schema).name)
and has_table(source, schema)):
self.pgops.add(
self.drop_inhview(schema, context, source, drop_ancestors=True)
)
alter_table = source_op.get_alter_table(
schema, context, force_new=True)
ptr_stor_info = types.get_pointer_storage_info(
prop,
schema=orig_schema,
link_bias=prop.is_link_property(orig_schema),
)
if ptr_stor_info.table_type == 'ObjectType':
col = dbops.AlterTableDropColumn(
dbops.Column(name=ptr_stor_info.column_name,
type=ptr_stor_info.column_type))
alter_table.add_operation(col)
if has_table(prop, orig_schema):
self.pgops.add(
self.drop_inhview(
orig_schema, context, prop, drop_ancestors=True)
)
old_table_name = common.get_backend_name(
orig_schema, prop, catenate=False)
self.pgops.add(dbops.DropTable(name=old_table_name, priority=1))
self.update_base_inhviews(orig_schema, context, prop)
self.schedule_inhview_deletion(orig_schema, context, prop)
if (
source is not None
and not context.is_deleting(source)
):
self.schedule_inhviews_update(
schema,
context,
source,
update_descendants=True,
)
return schema
class UpdateEndpointDeleteActions(MetaCommand):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.link_ops = []
self.changed_targets = set()
def _get_link_table_union(self, schema, links, include_children) -> str:
selects = []
aspect = 'inhview' if include_children else None
for link in links:
selects.append(textwrap.dedent('''\
(SELECT
{id}::uuid AS __sobj_id__,
{src} as source,
{tgt} as target
FROM {table})
''').format(
id=ql(str(link.id)),
src=common.quote_ident('source'),
tgt=common.quote_ident('target'),
table=common.get_backend_name(
schema,
link,
aspect=aspect,
),
))
return '(' + '\nUNION ALL\n '.join(selects) + ') as q'
def _get_inline_link_table_union(
self, schema, links, include_children) -> str:
selects = []
aspect = 'inhview' if include_children else None
for link in links:
link_psi = types.get_pointer_storage_info(link, schema=schema)
link_col = link_psi.column_name
selects.append(textwrap.dedent('''\
(SELECT
{id}::uuid AS __sobj_id__,
{src} as source,
{tgt} as target
FROM {table})
''').format(
id=ql(str(link.id)),
src=common.quote_ident('id'),
tgt=common.quote_ident(link_col),
table=common.get_backend_name(
schema,
link.get_source(schema),
aspect=aspect,
),
))
return '(' + '\nUNION ALL\n '.join(selects) + ') as q'
def get_trigger_name(self, schema, target,
disposition, deferred=False, inline=False):
if disposition == 'target':
aspect = 'target-del'
else:
aspect = 'source-del'
if deferred:
aspect += '-def'
else:
aspect += '-imm'
if inline:
aspect += '-inl'
else:
aspect += '-otl'
aspect += '-t'
return common.get_backend_name(
schema, target, catenate=False, aspect=aspect)[1]
def get_trigger_proc_name(self, schema, target,
disposition, deferred=False, inline=False):
if disposition == 'target':
aspect = 'target-del'
else:
aspect = 'source-del'
if deferred:
aspect += '-def'
else:
aspect += '-imm'
if inline:
aspect += '-inl'
else:
aspect += '-otl'
aspect += '-f'
return common.get_backend_name(
schema, target, catenate=False, aspect=aspect)
def get_trigger_proc_text(self, target, links, *,
disposition, inline, schema):
if inline:
return self._get_inline_link_trigger_proc_text(
target, links, disposition=disposition, schema=schema)
else:
return self._get_outline_link_trigger_proc_text(
target, links, disposition=disposition, schema=schema)
def _get_outline_link_trigger_proc_text(
self, target, links, *, disposition, schema):
chunks = []
DA = s_links.LinkTargetDeleteAction
if disposition == 'target':
groups = itertools.groupby(
links, lambda l: l.get_on_target_delete(schema))
near_endpoint, far_endpoint = 'target', 'source'
else:
groups = [(DA.Allow, links)]
near_endpoint, far_endpoint = 'source', 'target'
for action, links in groups:
if action is DA.Restrict or action is DA.DeferredRestrict:
# Inherited link targets with restrict actions are
# elided by apply() to enable us to use inhviews here
# when looking for live references.
tables = self._get_link_table_union(
schema, links, include_children=True)
text = textwrap.dedent('''\
SELECT
q.__sobj_id__, q.source, q.target
INTO link_type_id, srcid, tgtid
FROM
{tables}
WHERE
q.{near_endpoint} = OLD.{id}
LIMIT 1;
IF FOUND THEN
SELECT
edgedb.shortname_from_fullname(link.name),
edgedb._get_schema_object_name(link.{far_endpoint})
INTO linkname, endname
FROM
edgedb."_SchemaLink" AS link
WHERE
link.id = link_type_id;
RAISE foreign_key_violation
USING
TABLE = TG_TABLE_NAME,
SCHEMA = TG_TABLE_SCHEMA,
MESSAGE = 'deletion of {tgtname} (' || tgtid
|| ') is prohibited by link target policy',
DETAIL = 'Object is still referenced in link '
|| linkname || ' of ' || endname || ' ('
|| srcid || ').';
END IF;
''').format(
tables=tables,
id='id',
tgtname=target.get_displayname(schema),
near_endpoint=near_endpoint,
far_endpoint=far_endpoint,
)
chunks.append(text)
elif action == s_links.LinkTargetDeleteAction.Allow:
for link in links:
link_table = common.get_backend_name(
schema, link)
# Since enforcement of 'required' on multi links
# is enforced manually on the query side and (not
# through constraints/triggers of its own), we
# also need to do manual enforcement of it when
# deleting a required multi link.
if link.get_required(schema) and disposition == 'target':
required_text = textwrap.dedent('''\
SELECT q.source INTO srcid
FROM {link_table} as q
WHERE q.target = OLD.{id}
AND NOT EXISTS (
SELECT FROM {link_table} as q2
WHERE q.source = q2.source
AND q2.target != OLD.{id}
);
IF FOUND THEN
RAISE not_null_violation
USING
TABLE = TG_TABLE_NAME,
SCHEMA = TG_TABLE_SCHEMA,
MESSAGE = 'missing value',
COLUMN = '{link_id}';
END IF;
''').format(
link_table=link_table,
link_id=str(link.id),
id='id'
)
chunks.append(required_text)
# Otherwise just delete it from the link table.
text = textwrap.dedent('''\
DELETE FROM
{link_table}
WHERE
{endpoint} = OLD.{id};
''').format(
link_table=link_table,
endpoint=common.quote_ident(near_endpoint),
id='id'
)
chunks.append(text)
elif action == s_links.LinkTargetDeleteAction.DeleteSource:
sources = collections.defaultdict(list)
for link in links:
sources[link.get_source(schema)].append(link)
for source, source_links in sources.items():
tables = self._get_link_table_union(
schema, source_links, include_children=False)
text = textwrap.dedent('''\
DELETE FROM
{source_table}
WHERE
{source_table}.{id} IN (
SELECT source
FROM {tables}
WHERE target = OLD.{id}
);
''').format(
source_table=common.get_backend_name(schema, source),
id='id',
tables=tables,
)
chunks.append(text)
text = textwrap.dedent('''\
DECLARE
link_type_id uuid;
srcid uuid;
tgtid uuid;
linkname text;
endname text;
BEGIN
{chunks}
RETURN OLD;
END;
''').format(chunks='\n\n'.join(chunks))
return text
def _get_inline_link_trigger_proc_text(
self, target, links, *, disposition, schema):
if disposition == 'source':
raise RuntimeError(
'source disposition link target delete action trigger does '
'not make sense for inline links')
chunks = []
DA = s_links.LinkTargetDeleteAction
groups = itertools.groupby(
links, lambda l: l.get_on_target_delete(schema))
near_endpoint, far_endpoint = 'target', 'source'
for action, links in groups:
if action is DA.Restrict or action is DA.DeferredRestrict:
# Inherited link targets with restrict actions are
# elided by apply() to enable us to use inhviews here
# when looking for live references.
tables = self._get_inline_link_table_union(
schema, links, include_children=True)
text = textwrap.dedent('''\
SELECT
q.__sobj_id__, q.source, q.target
INTO link_type_id, srcid, tgtid
FROM
{tables}
WHERE
q.{near_endpoint} = OLD.{id}
LIMIT 1;
IF FOUND THEN
SELECT
edgedb.shortname_from_fullname(link.name),
edgedb._get_schema_object_name(link.{far_endpoint})
INTO linkname, endname
FROM
edgedb."_SchemaLink" AS link
WHERE
link.id = link_type_id;
RAISE foreign_key_violation
USING
TABLE = TG_TABLE_NAME,
SCHEMA = TG_TABLE_SCHEMA,
MESSAGE = 'deletion of {tgtname} (' || tgtid
|| ') is prohibited by link target policy',
DETAIL = 'Object is still referenced in link '
|| linkname || ' of ' || endname || ' ('
|| srcid || ').';
END IF;
''').format(
tables=tables,
id='id',
tgtname=target.get_displayname(schema),
near_endpoint=near_endpoint,
far_endpoint=far_endpoint,
)
chunks.append(text)
elif action == s_links.LinkTargetDeleteAction.Allow:
for link in links:
link_psi = types.get_pointer_storage_info(
link, schema=schema)
link_col = link_psi.column_name
source_table = common.get_backend_name(
schema, link.get_source(schema))
text = textwrap.dedent(f'''\
UPDATE
{source_table}
SET
{qi(link_col)} = NULL
WHERE
{qi(link_col)} = OLD.id;
''')
chunks.append(text)
elif action == s_links.LinkTargetDeleteAction.DeleteSource:
sources = collections.defaultdict(list)
for link in links:
sources[link.get_source(schema)].append(link)
for source, source_links in sources.items():
tables = self._get_inline_link_table_union(
schema, source_links, include_children=False)
text = textwrap.dedent('''\
DELETE FROM
{source_table}
WHERE
{source_table}.{id} IN (
SELECT source
FROM {tables}
WHERE target = OLD.{id}
);
''').format(
source_table=common.get_backend_name(schema, source),
id='id',
tables=tables,
)
chunks.append(text)
text = textwrap.dedent('''\
DECLARE
link_type_id uuid;
srcid uuid;
tgtid uuid;
linkname text;
endname text;
links text[];
BEGIN
{chunks}
RETURN OLD;
END;
''').format(chunks='\n\n'.join(chunks))
return text
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
if not self.link_ops and not self.changed_targets:
return schema
DA = s_links.LinkTargetDeleteAction
affected_sources = set()
affected_targets = {t for _, t in self.changed_targets}
modifications = any(
isinstance(op, RebaseObjectType) and op.removed_bases
for op, _ in self.changed_targets
)
for link_op, link, orig_schema in self.link_ops:
# If our link has a restrict policy, we don't need to update
# the target on changes to inherited links.
# Most importantly, this optimization lets us avoid updating
# the triggers for every schema::Type subtype every time a
# new object type is created containing a __type__ link.
eff_schema = (
orig_schema if isinstance(link_op, DeleteLink) else schema)
action = link.get_on_target_delete(eff_schema)
target_is_affected = not (
(action is DA.Restrict or action is DA.DeferredRestrict)
and link.get_implicit_bases(eff_schema)
)
if (
link.generic(eff_schema)
or link.is_pure_computable(eff_schema)
):
continue
source = link.get_source(eff_schema)
target = link.get_target(eff_schema)
if not isinstance(link_op, CreateLink):
modifications = True
if isinstance(link_op, DeleteLink):
current_source = orig_schema.get_by_id(source.id, None)
if (current_source is not None
and not current_source.is_view(orig_schema)):
affected_sources.add((current_source, orig_schema))
current_target = schema.get_by_id(target.id, None)
if target_is_affected and current_target is not None:
affected_targets.add(current_target)
else:
if source.is_view(schema):
continue
affected_sources.add((source, schema))
if target_is_affected:
affected_targets.add(target)
if isinstance(link_op, AlterLink):
orig_target = link.get_target(orig_schema)
if target != orig_target:
current_orig_target = schema.get_by_id(
orig_target.id, None)
if current_orig_target is not None:
affected_targets.add(current_orig_target)
for source, src_schema in affected_sources:
links = []
for link in source.get_pointers(src_schema).objects(src_schema):
if (not isinstance(link, s_links.Link)
or link.is_pure_computable(src_schema)):
continue
ptr_stor_info = types.get_pointer_storage_info(
link, schema=src_schema)
if ptr_stor_info.table_type != 'link':
continue
links.append(link)
links.sort(
key=lambda l: (l.get_on_target_delete(src_schema),
l.get_name(src_schema)))
if links or modifications:
self._update_action_triggers(
src_schema, source, links, disposition='source')
# All descendants of affected targets also need to have their
# triggers updated, so track them down.
all_affected_targets = set()
for target in affected_targets:
union_of = target.get_union_of(schema)
if union_of:
objtypes = tuple(union_of.objects(schema))
else:
objtypes = (target,)
for objtype in objtypes:
all_affected_targets.add(objtype)
for descendant in objtype.descendants(schema):
if has_table(descendant, schema):
all_affected_targets.add(descendant)
for target in all_affected_targets:
deferred_links = []
deferred_inline_links = []
links = []
inline_links = []
inbound_links = schema.get_referrers(
target, scls_type=s_links.Link, field_name='target')
# We need to look at all inbound links to all ancestors
for ancestor in target.get_ancestors(schema).objects(schema):
inbound_links |= schema.get_referrers(
ancestor, scls_type=s_links.Link, field_name='target')
for link in inbound_links:
if link.is_pure_computable(schema):
continue
action = link.get_on_target_delete(schema)
# Enforcing link deletion policies on targets are
# handled by looking at the inheritance views, when
# restrict is the policy.
# If the policy is allow or delete source, we need to
# actually process this for each link.
if (
(action is DA.Restrict or action is DA.DeferredRestrict)
and link.get_implicit_bases(schema)
):
continue
source = link.get_source(schema)
if source.is_view(schema):
continue
ptr_stor_info = types.get_pointer_storage_info(
link, schema=schema)
if ptr_stor_info.table_type != 'link':
if action is DA.DeferredRestrict:
deferred_inline_links.append(link)
else:
inline_links.append(link)
else:
if action is DA.DeferredRestrict:
deferred_links.append(link)
else:
links.append(link)
links.sort(
key=lambda l: (l.get_on_target_delete(schema),
l.get_name(schema)))
inline_links.sort(
key=lambda l: (l.get_on_target_delete(schema),
l.get_name(schema)))
deferred_links.sort(
key=lambda l: l.get_name(schema))
deferred_inline_links.sort(
key=lambda l: l.get_name(schema))
if links or modifications:
self._update_action_triggers(
schema, target, links, disposition='target')
if inline_links or modifications:
self._update_action_triggers(
schema, target, inline_links,
disposition='target', inline=True)
if deferred_links or modifications:
self._update_action_triggers(
schema, target, deferred_links,
disposition='target', deferred=True)
if deferred_inline_links or modifications:
self._update_action_triggers(
schema, target, deferred_inline_links,
disposition='target', deferred=True,
inline=True)
return schema
def _update_action_triggers(
self,
schema,
objtype: s_objtypes.ObjectType,
links: List[s_links.Link], *,
disposition: str,
deferred: bool=False,
inline: bool=False) -> None:
table_name = common.get_backend_name(
schema, objtype, catenate=False)
trigger_name = self.get_trigger_name(
schema, objtype, disposition=disposition,
deferred=deferred, inline=inline)
proc_name = self.get_trigger_proc_name(
schema, objtype, disposition=disposition,
deferred=deferred, inline=inline)
trigger = dbops.Trigger(
name=trigger_name, table_name=table_name,
events=('delete',), procedure=proc_name,
is_constraint=True, inherit=True, deferred=deferred)
if links:
proc_text = self.get_trigger_proc_text(
objtype, links, disposition=disposition,
inline=inline, schema=schema)
trig_func = dbops.Function(
name=proc_name, text=proc_text, volatility='volatile',
returns='trigger', language='plpgsql')
self.pgops.add(dbops.CreateOrReplaceFunction(trig_func))
self.pgops.add(dbops.CreateTrigger(
trigger, neg_conditions=[dbops.TriggerExists(
trigger_name=trigger_name, table_name=table_name
)]
))
else:
self.pgops.add(
dbops.DropTrigger(
trigger,
conditions=[dbops.TriggerExists(
trigger_name=trigger_name,
table_name=table_name,
)]
)
)
self.pgops.add(
dbops.DropFunction(
name=proc_name,
args=[],
conditions=[dbops.FunctionExists(
name=proc_name,
args=[],
)]
)
)
@dataclasses.dataclass
class InheritanceViewUpdate:
update_ancestors: bool = True
update_descendants: bool = False
class UpdateInheritanceViews(MetaCommand):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.view_updates = {}
self.view_deletions = {}
def apply(self, schema, context):
all_updates = set()
for obj, update_info in self.view_updates.items():
if not schema.has_object(obj.id):
continue
all_updates.add(obj)
if update_info.update_ancestors:
all_updates.update(obj.get_ancestors(schema).objects(schema))
if update_info.update_descendants:
all_updates.update(obj.descendants(schema))
graph = {}
for obj in all_updates:
objname = obj.get_name(schema)
graph[objname] = topological.DepGraphEntry(
item=obj,
deps=obj.get_bases(schema).names(schema),
extra=False,
)
ordered = topological.sort(graph, allow_unresolved=True)
for obj in reversed(list(ordered)):
if has_table(obj, schema):
self.update_inhview(schema, obj)
for obj, obj_schema in self.view_deletions.items():
self.delete_inhview(obj_schema, obj)
def _get_select_from(self, schema, obj, ptrnames):
if isinstance(obj, s_sources.Source):
ptrs = dict(obj.get_pointers(schema).items(schema))
cols = []
for ptrname, alias in ptrnames.items():
ptr = ptrs[ptrname]
ptr_stor_info = types.get_pointer_storage_info(
ptr,
link_bias=isinstance(obj, s_links.Link),
schema=schema,
)
cols.append((ptr_stor_info.column_name, alias))
else:
cols = list(ptrnames.items())
coltext = ',\n'.join(
f'{qi(col)} AS {qi(alias)}' for col, alias in cols)
tabname = common.get_backend_name(
schema,
obj,
catenate=False,
aspect='table',
)
return textwrap.dedent(f'''\
(SELECT
{coltext}
FROM
{q(*tabname)}
)
''')
def update_inhview(self, schema, obj):
inhview_name = common.get_backend_name(
schema, obj, catenate=False, aspect='inhview')
ptrs = {}
if isinstance(obj, s_sources.Source):
pointers = list(obj.get_pointers(schema).items(schema))
pointers.sort(key=lambda p: p[1].id)
for ptrname, ptr in pointers:
ptr_stor_info = types.get_pointer_storage_info(
ptr,
link_bias=isinstance(obj, s_links.Link),
schema=schema,
)
if (
isinstance(obj, s_links.Link)
or ptr_stor_info.table_type == 'ObjectType'
):
ptrs[ptrname] = ptr_stor_info.column_name
else:
# MULTI PROPERTY
ptrs['source'] = 'source'
ptrs['target'] = 'target'
components = [self._get_select_from(schema, obj, ptrs)]
components.extend(
self._get_select_from(schema, descendant, ptrs)
for descendant in obj.descendants(schema)
if has_table(descendant, schema)
)
query = '\nUNION ALL\n'.join(components)
view = dbops.View(
name=inhview_name,
query=query,
)
self.pgops.add(
dbops.DropView(
inhview_name,
priority=1,
conditions=[dbops.ViewExists(inhview_name)],
),
)
self.pgops.add(
dbops.CreateView(
view=view,
priority=1,
),
)
def delete_inhview(self, schema, obj):
inhview_name = common.get_backend_name(
schema, obj, catenate=False, aspect='inhview')
self.pgops.add(
dbops.DropView(
inhview_name,
conditions=[dbops.ViewExists(inhview_name)],
priority=1,
),
)
class ModuleMetaCommand(ObjectMetaCommand):
pass
class CreateModule(ModuleMetaCommand, adapts=s_mod.CreateModule):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = CompositeObjectMetaCommand.apply(self, schema, context)
return s_mod.CreateModule.apply(self, schema, context)
class AlterModule(ModuleMetaCommand, adapts=s_mod.AlterModule):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_mod.AlterModule.apply(self, schema, context=context)
return CompositeObjectMetaCommand.apply(self, schema, context)
class DeleteModule(ModuleMetaCommand, adapts=s_mod.DeleteModule):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = CompositeObjectMetaCommand.apply(self, schema, context)
return s_mod.DeleteModule.apply(self, schema, context)
class CreateDatabase(ObjectMetaCommand, adapts=s_db.CreateDatabase):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_db.CreateDatabase.apply(self, schema, context)
db = self.scls
tenant_id = self._get_tenant_id(context)
db_name = common.get_database_backend_name(
str(self.classname), tenant_id=tenant_id)
tpl_name = common.get_database_backend_name(
self.template or edbdef.EDGEDB_TEMPLATE_DB, tenant_id=tenant_id)
self.pgops.add(
dbops.CreateDatabase(
dbops.Database(
db_name,
metadata=dict(
id=str(db.id),
tenant_id=tenant_id,
builtin=self.get_attribute_value('builtin'),
name=str(self.classname),
),
),
template=tpl_name,
)
)
return schema
class DropDatabase(ObjectMetaCommand, adapts=s_db.DropDatabase):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_db.DropDatabase.apply(self, schema, context)
tenant_id = self._get_tenant_id(context)
db_name = common.get_database_backend_name(
str(self.classname), tenant_id=tenant_id)
self.pgops.add(dbops.DropDatabase(db_name))
return schema
class CreateRole(ObjectMetaCommand, adapts=s_roles.CreateRole):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_roles.CreateRole.apply(self, schema, context)
role = self.scls
schema = ObjectMetaCommand.apply(self, schema, context)
membership = list(role.get_bases(schema).names(schema))
passwd = role.get_password(schema)
superuser_flag = False
members = set()
role_name = str(role.get_name(schema))
backend_params = self._get_backend_params(context)
capabilities = backend_params.instance_params.capabilities
tenant_id = backend_params.instance_params.tenant_id
if role.get_superuser(schema):
membership.append(edbdef.EDGEDB_SUPERGROUP)
# If the cluster is not exposing an explicit superuser role,
# we will make the created Postgres role superuser if we can
if not backend_params.instance_params.base_superuser:
superuser_flag = (
capabilities
& pgcluster.BackendCapabilities.SUPERUSER_ACCESS
)
if backend_params.session_authorization_role is not None:
# When we connect to the backend via a proxy role, we
# must ensure that role is a member of _every_ EdgeDB
# role so that `SET ROLE` can work properly.
members.add(backend_params.session_authorization_role)
role = dbops.Role(
name=common.get_role_backend_name(role_name, tenant_id=tenant_id),
allow_login=True,
superuser=superuser_flag,
password=passwd,
membership=[
common.get_role_backend_name(parent_role, tenant_id=tenant_id)
for parent_role in membership
],
metadata=dict(
id=str(role.id),
name=role_name,
tenant_id=tenant_id,
password_hash=passwd,
builtin=role.get_builtin(schema),
),
)
self.pgops.add(dbops.CreateRole(role))
return schema
class AlterRole(ObjectMetaCommand, adapts=s_roles.AlterRole):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_roles.AlterRole.apply(self, schema, context)
role = self.scls
schema = ObjectMetaCommand.apply(self, schema, context)
backend_params = self._get_backend_params(context)
capabilities = backend_params.instance_params.capabilities
tenant_id = backend_params.instance_params.tenant_id
instance_params = backend_params.instance_params
role_name = str(role.get_name(schema))
kwargs = {}
if self.has_attribute_value('password'):
passwd = self.get_attribute_value('password')
kwargs['password'] = passwd
kwargs['metadata'] = dict(
id=str(role.id),
name=role_name,
tenant_id=tenant_id,
password_hash=passwd,
builtin=role.get_builtin(schema),
)
pg_role_name = common.get_role_backend_name(
role_name, tenant_id=tenant_id)
if self.has_attribute_value('superuser'):
membership = list(role.get_bases(schema).names(schema))
membership.append(edbdef.EDGEDB_SUPERGROUP)
self.pgops.add(
dbops.AlterRoleAddMembership(
name=pg_role_name,
membership=[
common.get_role_backend_name(
parent_role, tenant_id=tenant_id)
for parent_role in membership
],
)
)
superuser_flag = False
# If the cluster is not exposing an explicit superuser role,
# we will make the modified Postgres role superuser if we can
if not instance_params.base_superuser:
superuser_flag = (
capabilities
& pgcluster.BackendCapabilities.SUPERUSER_ACCESS
)
kwargs['superuser'] = superuser_flag
dbrole = dbops.Role(name=pg_role_name, **kwargs)
self.pgops.add(dbops.AlterRole(dbrole))
return schema
class RebaseRole(ObjectMetaCommand, adapts=s_roles.RebaseRole):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_roles.RebaseRole.apply(self, schema, context)
role = self.scls
schema = ObjectMetaCommand.apply(self, schema, context)
tenant_id = self._get_tenant_id(context)
for dropped in self.removed_bases:
self.pgops.add(dbops.AlterRoleDropMember(
name=common.get_role_backend_name(
str(dropped.name), tenant_id=tenant_id),
member=common.get_role_backend_name(
str(role.get_name(schema)), tenant_id=tenant_id),
))
for bases, _pos in self.added_bases:
for added in bases:
self.pgops.add(dbops.AlterRoleAddMember(
name=common.get_role_backend_name(
str(added.name), tenant_id=tenant_id),
member=common.get_role_backend_name(
str(role.get_name(schema)), tenant_id=tenant_id),
))
return schema
class DeleteRole(ObjectMetaCommand, adapts=s_roles.DeleteRole):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_roles.DeleteRole.apply(self, schema, context)
schema = ObjectMetaCommand.apply(self, schema, context)
tenant_id = self._get_tenant_id(context)
self.pgops.add(dbops.DropRole(
common.get_role_backend_name(
str(self.classname), tenant_id=tenant_id)))
return schema
class CreateExtensionPackage(
ObjectMetaCommand,
adapts=s_exts.CreateExtensionPackage,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_exts.CreateExtensionPackage.apply(self, schema, context)
schema = ObjectMetaCommand.apply(self, schema, context)
ext_id = str(self.scls.id)
name__internal = str(self.scls.get_name(schema))
name = self.scls.get_displayname(schema)
version = self.scls.get_version(schema)._asdict()
version['stage'] = version['stage'].name.lower()
tenant_id = self._get_tenant_id(context)
tpl_db_name = common.get_database_backend_name(
edbdef.EDGEDB_TEMPLATE_DB, tenant_id=tenant_id)
self.pgops.add(
dbops.UpdateMetadataSection(
dbops.Database(name=tpl_db_name),
section='ExtensionPackage',
metadata={
ext_id: {
'id': ext_id,
'name': name,
'name__internal': name__internal,
'script': self.scls.get_script(schema),
'version': version,
'builtin': self.scls.get_builtin(schema),
'internal': self.scls.get_internal(schema),
}
}
)
)
return schema
class DeleteExtensionPackage(
ObjectMetaCommand,
adapts=s_exts.DeleteExtensionPackage,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_exts.DeleteExtensionPackage.apply(self, schema, context)
schema = ObjectMetaCommand.apply(self, schema, context)
tenant_id = self._get_tenant_id(context)
tpl_db_name = common.get_database_backend_name(
edbdef.EDGEDB_TEMPLATE_DB, tenant_id=tenant_id)
ext_id = str(self.scls.id)
self.pgops.add(
dbops.UpdateMetadataSection(
dbops.Database(name=tpl_db_name),
section='ExtensionPackage',
metadata={
ext_id: None
}
)
)
return schema
class CreateExtension(
CreateObject,
adapts=s_exts.CreateExtension,
):
pass
class DeleteExtension(
DeleteObject,
adapts=s_exts.DeleteExtension,
):
pass
class DeltaRoot(MetaCommand, adapts=sd.DeltaRoot):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._renames = {}
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
self.update_endpoint_delete_actions = UpdateEndpointDeleteActions()
self.update_inhviews = UpdateInheritanceViews()
schema = sd.DeltaRoot.apply(self, schema, context)
schema = MetaCommand.apply(self, schema, context)
self.update_endpoint_delete_actions.apply(schema, context)
self.pgops.add(self.update_endpoint_delete_actions)
self.update_inhviews.apply(schema, context)
self.pgops.add(self.update_inhviews)
return schema
def is_material(self):
return True
def generate(self, block: dbops.PLBlock) -> None:
for op in self.serialize_ops():
op.generate(block)
def serialize_ops(self):
queues = {}
self._serialize_ops(self, queues)
queues = (i[1] for i in sorted(queues.items(), key=lambda i: i[0]))
return itertools.chain.from_iterable(queues)
def _serialize_ops(self, obj, queues):
for op in obj.pgops:
if isinstance(op, MetaCommand):
self._serialize_ops(op, queues)
else:
queue = queues.get(op.priority)
if not queue:
queues[op.priority] = queue = []
queue.append(op)
class MigrationCommand(ObjectMetaCommand):
pass
class CreateMigration(
MigrationCommand,
CreateObject,
adapts=s_migrations.CreateMigration,
):
pass
class AlterMigration(
MigrationCommand,
AlterObject,
adapts=s_migrations.AlterMigration,
):
pass
class DeleteMigration(
MigrationCommand,
DeleteObject,
adapts=s_migrations.DeleteMigration,
):
pass
| apache-2.0 | -3,232,752,061,285,571,600 | 33.091024 | 79 | 0.539443 | false |
wasade/qiime | qiime/remote.py | 1 | 13012 | #!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2012, The QIIME project"
__credits__ = ["Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "[email protected]"
"""Contains functionality to interact with remote services."""
from collections import defaultdict
from csv import writer
from re import sub
from socket import gaierror
from StringIO import StringIO
from burrito.util import ApplicationNotFoundError
def raise_gdata_not_found_error(*args, **kwargs):
raise ApplicationNotFoundError("gdata cannot be found.\nIs it installed? "
"Is it in your $PYTHONPATH?\nThis is an optional QIIME "
"dependency, but is required if you plan to use QIIME's remote "
"mapping file features. For more information, please see "
"http://qiime.org/install/install.html.")
# Load gdata if it's available. If it's not, skip it but set up to raise errors
# if the user tries to use it.
try:
from gdata.spreadsheet import SpreadsheetsCellsFeedFromString
from gdata.spreadsheet.service import CellQuery
from gdata.spreadsheet.service import SpreadsheetsService
except ImportError:
# Set functions which cannot be imported to raise_gdata_not_found_error.
SpreadsheetsCellsFeedFromString = CellQuery = SpreadsheetsService = \
raise_gdata_not_found_error
class GoogleSpreadsheetError(Exception):
pass
class GoogleSpreadsheetConnectionError(Exception):
pass
def load_google_spreadsheet(spreadsheet_key, worksheet_name=None):
"""Downloads and exports a Google Spreadsheet in TSV format.
Returns a string containing the spreadsheet contents in TSV format (e.g.
for writing out to a file or parsing).
The first line is assumed to be the spreadsheet header (i.e. containing
column names), which can optionally be followed by one or more comment
lines (starting with '#'). Only the first cell of a comment line will be
parsed (to keep exported spreadsheets consistent with QIIME mapping files'
comments). The (optional) comments section is then followed by the
spreadsheet data.
Some of this code is based on the following websites, as well as the
gdata.spreadsheet.text_db module:
http://www.payne.org/index.php/Reading_Google_Spreadsheets_in_Python
http://stackoverflow.com/a/12031835
Arguments:
spreadsheet_key - the key used to identify the spreadsheet (a string).
Can either be a key or a URL containing the key
worksheet_name - the name of the worksheet to load data from (a
string). If not supplied, will use first worksheet in the
spreadsheet
"""
spreadsheet_key = _extract_spreadsheet_key_from_url(spreadsheet_key)
gd_client = SpreadsheetsService()
try:
worksheets_feed = gd_client.GetWorksheetsFeed(spreadsheet_key,
visibility='public',
projection='basic')
except gaierror:
raise GoogleSpreadsheetConnectionError("Could not establish "
"connection with server. Do "
"you have an active Internet "
"connection?")
if len(worksheets_feed.entry) < 1:
raise GoogleSpreadsheetError("The Google Spreadsheet with key '%s' "
"does not have any worksheets associated "
"with it." % spreadsheet_key)
# Find worksheet that will be exported. If a name has not been provided,
# use the first worksheet.
worksheet = None
if worksheet_name is not None:
for sheet in worksheets_feed.entry:
if sheet.title.text == worksheet_name:
worksheet = sheet
if worksheet is None:
raise GoogleSpreadsheetError("The worksheet name '%s' could not "
"be found in the Google Spreadsheet "
"with key '%s'."
% (worksheet_name, spreadsheet_key))
else:
# Choose the first one.
worksheet = worksheets_feed.entry[0]
# Extract the ID of the worksheet.
worksheet_id = worksheet.id.text.split('/')[-1]
# Now that we have a spreadsheet key and worksheet ID, we can read the
# data. First get the headers (first row). We need this in order to grab
# the rest of the actual data in the correct order (it is returned
# unordered).
headers = _get_spreadsheet_headers(gd_client, spreadsheet_key,
worksheet_id)
if len(headers) < 1:
raise GoogleSpreadsheetError("Could not load spreadsheet header (it "
"appears to be empty). Is your Google "
"Spreadsheet with key '%s' empty?"
% spreadsheet_key)
# Loop through the rest of the rows and build up a list of data (in the
# same row/col order found in the spreadsheet).
spreadsheet_lines = _export_spreadsheet(gd_client, spreadsheet_key,
worksheet_id, headers)
out_lines = StringIO()
tsv_writer = writer(out_lines, delimiter='\t', lineterminator='\n')
tsv_writer.writerows(spreadsheet_lines)
return out_lines.getvalue()
def _extract_spreadsheet_key_from_url(url):
"""Extracts a key from a URL in the form '...key=some_key&foo=42...
If the URL doesn't look valid, assumes the URL is the key and returns it
unmodified.
"""
result = url
if 'key=' in url:
result = url.split('key=')[-1].split('#')[0].split('&')[0]
return result
def _get_spreadsheet_headers(client, spreadsheet_key, worksheet_id):
"""Returns a list of headers (the first line of the spreadsheet).
Will be in the order they appear in the spreadsheet.
"""
headers = []
query = CellQuery()
query.max_row = '1'
query.min_row = '1'
feed = client.GetCellsFeed(spreadsheet_key, worksheet_id, query=query,
visibility='public', projection='values')
# Wish python had a do-while...
while True:
for entry in feed.entry:
headers.append(entry.content.text)
# Get the next set of cells if needed.
next_link = feed.GetNextLink()
if next_link:
feed = client.Get(next_link.href,
converter=SpreadsheetsCellsFeedFromString)
else:
break
return headers
def _export_spreadsheet(client, spreadsheet_key, worksheet_id, headers):
"""Returns a list of lists containing the entire spreadsheet.
This will include the header, any comment lines, and the spreadsheet data.
Blank cells are represented as None. Data will only be read up to the first
blank line that is encountered (this is a limitation of the Google
Spreadsheet API).
Comments are only supported after the header and before any real data is
encountered. The lines must start with [optional whitespace] '#' and only
the first cell is kept in that case (to avoid many empty cells after the
comment cell, which mimics QIIME's mapping file format).
Only cell data that falls under the supplied headers will be included.
"""
# Convert the headers into Google's internal "cleaned" representation.
# These will be used as lookups to pull out cell data.
cleaned_headers = _get_cleaned_headers(headers)
# List feed skips header and returns rows in the order they appear in the
# spreadsheet.
spreadsheet_lines = [headers]
rows_feed = client.GetListFeed(spreadsheet_key, worksheet_id,
visibility='public', projection='values')
while True:
found_data = False
for row in rows_feed.entry:
line = []
# Loop through our headers and use the cleaned version to look up
# the cell data. In certain cases (if the original header was blank
# or only contained special characters) we will not be able to map
# our header, so the best we can do is tell the user to change the
# name of their header to be something simple/alphanumeric.
for header_idx, (header, cleaned_header) in \
enumerate(zip(headers, cleaned_headers)):
try:
cell_data = row.custom[cleaned_header].text
except KeyError:
raise GoogleSpreadsheetError("Could not map header '%s' "
"to Google Spreadsheet's internal representation "
"of the header. We suggest changing the name of "
"the header in your Google Spreadsheet to be "
"alphanumeric if possible, as this will likely "
"solve the issue. Note that the name isn't "
"*required* to be alphanumeric, but it may fix "
"issues with converting to Google Spreadsheet's "
"internal format in some cases." % header)
# Special handling of comments (if it's a comment, only keep
# that cell to avoid several blank cells following it).
if not found_data and header_idx == 0 and \
cell_data.lstrip().startswith('#'):
line.append(cell_data)
break
else:
line.append(cell_data)
found_data = True
spreadsheet_lines.append(line)
# Get the next set of rows if necessary.
next_link = rows_feed.GetNextLink()
if next_link:
rows_feed = client.Get(next_link.href,
converter=SpreadsheetsListFeedFromString)
else:
break
return spreadsheet_lines
def _get_cleaned_headers(headers):
"""Creates a list of "cleaned" headers which spreadsheets accept.
A Google Spreadsheet converts the header names into a "cleaned" internal
representation, which must be used to reference a cell at a particular
header/column. They are all lower case and contain no spaces or special
characters. If two columns have the same name after being sanitized, the
columns further to the right have _2, _3 _4, etc. appended to them.
If there are column names which consist of all special characters, or if
the column header is blank, an obfuscated value will be used for a column
name. This method does not handle blank column names or column names with
only special characters.
Taken from gdata.spreadsheet.text_db.ConvertStringsToColumnHeaders and
modified to handle headers with pound signs or that start with numbers, as
well as correctly handle duplicate cleaned headers.
"""
cleaned_headers = []
for header in headers:
# Google strips special characters, whitespace, and underscores first,
# and then strips any *leading* digits. This order is extremely
# important!
sanitized = sub(r'^\d+', '', sub(r'[\W_]', '', header.lower()))
if len(sanitized) > 0:
cleaned_headers.append(sanitized)
else:
raise GoogleSpreadsheetError("Encountered a header '%s' that was "
"either blank or consisted only of special characters. "
"Could not map the header to the internal representation "
"used by the Google Spreadsheet. Please change the header "
"to consist of at least one alphanumeric character."
% header)
# When the same sanitized header appears multiple times in the first row
# of a spreadsheet, _n is appended to the name to make it unique.
header_count = defaultdict(int)
results = []
for header, cleaned_header in zip(headers, cleaned_headers):
new_header = cleaned_header
if header_count[cleaned_header] > 0:
# Google's numbering starts from _2, hence the +1.
new_header = '%s_%d' % (cleaned_header,
header_count[cleaned_header] + 1)
header_count[cleaned_header] += 1
results.append(new_header)
return results
| gpl-2.0 | -3,657,277,499,824,228,400 | 41.943894 | 100 | 0.600599 | false |
olafhauk/mne-python | mne/utils/numerics.py | 4 | 36095 | # -*- coding: utf-8 -*-
"""Some utility functions."""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
from contextlib import contextmanager
import hashlib
from io import BytesIO, StringIO
from math import sqrt
import numbers
import operator
import os
import os.path as op
from math import ceil
import shutil
import sys
from datetime import datetime, timedelta, timezone
import numpy as np
from scipy import sparse
from ._logging import logger, warn, verbose
from .check import check_random_state, _ensure_int, _validate_type
from ..fixes import _infer_dimension_, svd_flip, stable_cumsum, _safe_svd
from .docs import fill_doc
def split_list(v, n, idx=False):
"""Split list in n (approx) equal pieces, possibly giving indices."""
n = int(n)
tot = len(v)
sz = tot // n
start = stop = 0
for i in range(n - 1):
stop += sz
yield (np.arange(start, stop), v[start:stop]) if idx else v[start:stop]
start += sz
yield (np.arange(start, tot), v[start:]) if idx else v[start]
def array_split_idx(ary, indices_or_sections, axis=0, n_per_split=1):
"""Do what numpy.array_split does, but add indices."""
# this only works for indices_or_sections as int
indices_or_sections = _ensure_int(indices_or_sections)
ary_split = np.array_split(ary, indices_or_sections, axis=axis)
idx_split = np.array_split(np.arange(ary.shape[axis]), indices_or_sections)
idx_split = (np.arange(sp[0] * n_per_split, (sp[-1] + 1) * n_per_split)
for sp in idx_split)
return zip(idx_split, ary_split)
def create_chunks(sequence, size):
"""Generate chunks from a sequence.
Parameters
----------
sequence : iterable
Any iterable object
size : int
The chunksize to be returned
"""
return (sequence[p:p + size] for p in range(0, len(sequence), size))
def sum_squared(X):
"""Compute norm of an array.
Parameters
----------
X : array
Data whose norm must be found.
Returns
-------
value : float
Sum of squares of the input array X.
"""
X_flat = X.ravel(order='F' if np.isfortran(X) else 'C')
return np.dot(X_flat, X_flat)
def _compute_row_norms(data):
"""Compute scaling based on estimated norm."""
norms = np.sqrt(np.sum(data ** 2, axis=1))
norms[norms == 0] = 1.0
return norms
def _reg_pinv(x, reg=0, rank='full', rcond=1e-15):
"""Compute a regularized pseudoinverse of Hermitian matrices.
Regularization is performed by adding a constant value to each diagonal
element of the matrix before inversion. This is known as "diagonal
loading". The loading factor is computed as ``reg * np.trace(x) / len(x)``.
The pseudo-inverse is computed through SVD decomposition and inverting the
singular values. When the matrix is rank deficient, some singular values
will be close to zero and will not be used during the inversion. The number
of singular values to use can either be manually specified or automatically
estimated.
Parameters
----------
x : ndarray, shape (..., n, n)
Square, Hermitian matrices to invert.
reg : float
Regularization parameter. Defaults to 0.
rank : int | None | 'full'
This controls the effective rank of the covariance matrix when
computing the inverse. The rank can be set explicitly by specifying an
integer value. If ``None``, the rank will be automatically estimated.
Since applying regularization will always make the covariance matrix
full rank, the rank is estimated before regularization in this case. If
'full', the rank will be estimated after regularization and hence
will mean using the full rank, unless ``reg=0`` is used.
Defaults to 'full'.
rcond : float | 'auto'
Cutoff for detecting small singular values when attempting to estimate
the rank of the matrix (``rank='auto'``). Singular values smaller than
the cutoff are set to zero. When set to 'auto', a cutoff based on
floating point precision will be used. Defaults to 1e-15.
Returns
-------
x_inv : ndarray, shape (..., n, n)
The inverted matrix.
loading_factor : float
Value added to the diagonal of the matrix during regularization.
rank : int
If ``rank`` was set to an integer value, this value is returned,
else the estimated rank of the matrix, before regularization, is
returned.
"""
from ..rank import _estimate_rank_from_s
if rank is not None and rank != 'full':
rank = int(operator.index(rank))
if x.ndim < 2 or x.shape[-2] != x.shape[-1]:
raise ValueError('Input matrix must be square.')
if not np.allclose(x, x.conj().swapaxes(-2, -1)):
raise ValueError('Input matrix must be Hermitian (symmetric)')
assert x.ndim >= 2 and x.shape[-2] == x.shape[-1]
n = x.shape[-1]
# Decompose the matrix, not necessarily positive semidefinite
from mne.fixes import svd
U, s, Vh = svd(x, hermitian=True)
# Estimate the rank before regularization
tol = 'auto' if rcond == 'auto' else rcond * s[..., :1]
rank_before = _estimate_rank_from_s(s, tol)
# Decompose the matrix again after regularization
loading_factor = reg * np.mean(s, axis=-1)
if reg:
U, s, Vh = svd(
x + loading_factor[..., np.newaxis, np.newaxis] * np.eye(n),
hermitian=True)
# Estimate the rank after regularization
tol = 'auto' if rcond == 'auto' else rcond * s[..., :1]
rank_after = _estimate_rank_from_s(s, tol)
# Warn the user if both all parameters were kept at their defaults and the
# matrix is rank deficient.
if (rank_after < n).any() and reg == 0 and \
rank == 'full' and rcond == 1e-15:
warn('Covariance matrix is rank-deficient and no regularization is '
'done.')
elif isinstance(rank, int) and rank > n:
raise ValueError('Invalid value for the rank parameter (%d) given '
'the shape of the input matrix (%d x %d).' %
(rank, x.shape[0], x.shape[1]))
# Pick the requested number of singular values
mask = np.arange(s.shape[-1]).reshape((1,) * (x.ndim - 2) + (-1,))
if rank is None:
cmp = ret = rank_before
elif rank == 'full':
cmp = rank_after
ret = rank_before
else:
cmp = ret = rank
mask = mask < np.asarray(cmp)[..., np.newaxis]
mask &= s > 0
# Invert only non-zero singular values
s_inv = np.zeros(s.shape)
s_inv[mask] = 1. / s[mask]
# Compute the pseudo inverse
x_inv = np.matmul(U * s_inv[..., np.newaxis, :], Vh)
return x_inv, loading_factor, ret
def _gen_events(n_epochs):
"""Generate event structure from number of epochs."""
events = np.c_[np.arange(n_epochs), np.zeros(n_epochs, int),
np.ones(n_epochs, int)]
return events
def _reject_data_segments(data, reject, flat, decim, info, tstep):
"""Reject data segments using peak-to-peak amplitude."""
from ..epochs import _is_good
from ..io.pick import channel_indices_by_type
data_clean = np.empty_like(data)
idx_by_type = channel_indices_by_type(info)
step = int(ceil(tstep * info['sfreq']))
if decim is not None:
step = int(ceil(step / float(decim)))
this_start = 0
this_stop = 0
drop_inds = []
for first in range(0, data.shape[1], step):
last = first + step
data_buffer = data[:, first:last]
if data_buffer.shape[1] < (last - first):
break # end of the time segment
if _is_good(data_buffer, info['ch_names'], idx_by_type, reject,
flat, ignore_chs=info['bads']):
this_stop = this_start + data_buffer.shape[1]
data_clean[:, this_start:this_stop] = data_buffer
this_start += data_buffer.shape[1]
else:
logger.info("Artifact detected in [%d, %d]" % (first, last))
drop_inds.append((first, last))
data = data_clean[:, :this_stop]
if not data.any():
raise RuntimeError('No clean segment found. Please '
'consider updating your rejection '
'thresholds.')
return data, drop_inds
def _get_inst_data(inst):
"""Get data view from MNE object instance like Raw, Epochs or Evoked."""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from .. import Evoked
from ..time_frequency.tfr import _BaseTFR
_validate_type(inst, (BaseRaw, BaseEpochs, Evoked, _BaseTFR), "Instance")
if not inst.preload:
inst.load_data()
return inst._data
def compute_corr(x, y):
"""Compute pearson correlations between a vector and a matrix."""
if len(x) == 0 or len(y) == 0:
raise ValueError('x or y has zero length')
X = np.array(x, float)
Y = np.array(y, float)
X -= X.mean(0)
Y -= Y.mean(0)
x_sd = X.std(0, ddof=1)
# if covariance matrix is fully expanded, Y needs a
# transpose / broadcasting else Y is correct
y_sd = Y.std(0, ddof=1)[:, None if X.shape == Y.shape else Ellipsis]
return (np.dot(X.T, Y) / float(len(X) - 1)) / (x_sd * y_sd)
@fill_doc
def random_permutation(n_samples, random_state=None):
"""Emulate the randperm matlab function.
It returns a vector containing a random permutation of the
integers between 0 and n_samples-1. It returns the same random numbers
than randperm matlab function whenever the random_state is the same
as the matlab's random seed.
This function is useful for comparing against matlab scripts
which use the randperm function.
Note: the randperm(n_samples) matlab function generates a random
sequence between 1 and n_samples, whereas
random_permutation(n_samples, random_state) function generates
a random sequence between 0 and n_samples-1, that is:
randperm(n_samples) = random_permutation(n_samples, random_state) - 1
Parameters
----------
n_samples : int
End point of the sequence to be permuted (excluded, i.e., the end point
is equal to n_samples-1)
%(random_state)s
Returns
-------
randperm : ndarray, int
Randomly permuted sequence between 0 and n-1.
"""
rng = check_random_state(random_state)
# This can't just be rng.permutation(n_samples) because it's not identical
# to what MATLAB produces
idx = rng.uniform(size=n_samples)
randperm = np.argsort(idx)
return randperm
@verbose
def _apply_scaling_array(data, picks_list, scalings, verbose=None):
"""Scale data type-dependently for estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
logger.debug(' Scaling using mapping %s.' % (scalings,))
picks_dict = dict(picks_list)
scalings = [(picks_dict[k], v) for k, v in scalings.items()
if k in picks_dict]
for idx, scaling in scalings:
data[idx, :] *= scaling # F - order
else:
logger.debug(' Scaling using computed norms.')
data *= scalings[:, np.newaxis] # F - order
def _invert_scalings(scalings):
if isinstance(scalings, dict):
scalings = {k: 1. / v for k, v in scalings.items()}
elif isinstance(scalings, np.ndarray):
scalings = 1. / scalings
return scalings
def _undo_scaling_array(data, picks_list, scalings):
scalings = _invert_scalings(_check_scaling_inputs(data, picks_list,
scalings))
return _apply_scaling_array(data, picks_list, scalings, verbose=False)
@contextmanager
def _scaled_array(data, picks_list, scalings):
"""Scale, use, unscale array."""
_apply_scaling_array(data, picks_list=picks_list, scalings=scalings)
try:
yield
finally:
_undo_scaling_array(data, picks_list=picks_list, scalings=scalings)
def _apply_scaling_cov(data, picks_list, scalings):
"""Scale resulting data after estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
scales = None
if isinstance(scalings, dict):
n_channels = len(data)
covinds = list(zip(*picks_list))[1]
assert len(data) == sum(len(k) for k in covinds)
assert list(sorted(np.concatenate(covinds))) == list(range(len(data)))
scales = np.zeros(n_channels)
for ch_t, idx in picks_list:
scales[idx] = scalings[ch_t]
elif isinstance(scalings, np.ndarray):
if len(scalings) != len(data):
raise ValueError('Scaling factors and data are of incompatible '
'shape')
scales = scalings
elif scalings is None:
pass
else:
raise RuntimeError('Arff...')
if scales is not None:
assert np.sum(scales == 0.) == 0
data *= (scales[None, :] * scales[:, None])
def _undo_scaling_cov(data, picks_list, scalings):
scalings = _invert_scalings(_check_scaling_inputs(data, picks_list,
scalings))
return _apply_scaling_cov(data, picks_list, scalings)
def _check_scaling_inputs(data, picks_list, scalings):
"""Aux function."""
rescale_dict_ = dict(mag=1e15, grad=1e13, eeg=1e6)
scalings_ = None
if isinstance(scalings, str) and scalings == 'norm':
scalings_ = 1. / _compute_row_norms(data)
elif isinstance(scalings, dict):
rescale_dict_.update(scalings)
scalings_ = rescale_dict_
elif isinstance(scalings, np.ndarray):
scalings_ = scalings
elif scalings is None:
pass
else:
raise NotImplementedError("No way! That's not a rescaling "
'option: %s' % scalings)
return scalings_
def hashfunc(fname, block_size=1048576, hash_type="md5"): # 2 ** 20
"""Calculate the hash for a file.
Parameters
----------
fname : str
Filename.
block_size : int
Block size to use when reading.
Returns
-------
hash_ : str
The hexadecimal digest of the hash.
"""
if hash_type == "md5":
hasher = hashlib.md5()
elif hash_type == "sha1":
hasher = hashlib.sha1()
with open(fname, 'rb') as fid:
while True:
data = fid.read(block_size)
if not data:
break
hasher.update(data)
return hasher.hexdigest()
def _replace_md5(fname):
"""Replace a file based on MD5sum."""
# adapted from sphinx-gallery
assert fname.endswith('.new')
fname_old = fname[:-4]
if op.isfile(fname_old) and hashfunc(fname) == hashfunc(fname_old):
os.remove(fname)
else:
shutil.move(fname, fname_old)
def create_slices(start, stop, step=None, length=1):
"""Generate slices of time indexes.
Parameters
----------
start : int
Index where first slice should start.
stop : int
Index where last slice should maximally end.
length : int
Number of time sample included in a given slice.
step: int | None
Number of time samples separating two slices.
If step = None, step = length.
Returns
-------
slices : list
List of slice objects.
"""
# default parameters
if step is None:
step = length
# slicing
slices = [slice(t, t + length, 1) for t in
range(start, stop - length + 1, step)]
return slices
def _time_mask(times, tmin=None, tmax=None, sfreq=None, raise_error=True,
include_tmax=True):
"""Safely find sample boundaries."""
orig_tmin = tmin
orig_tmax = tmax
tmin = -np.inf if tmin is None else tmin
tmax = np.inf if tmax is None else tmax
if not np.isfinite(tmin):
tmin = times[0]
if not np.isfinite(tmax):
tmax = times[-1]
include_tmax = True # ignore this param when tmax is infinite
if sfreq is not None:
# Push to a bit past the nearest sample boundary first
sfreq = float(sfreq)
tmin = int(round(tmin * sfreq)) / sfreq - 0.5 / sfreq
tmax = int(round(tmax * sfreq)) / sfreq
tmax += (0.5 if include_tmax else -0.5) / sfreq
else:
assert include_tmax # can only be used when sfreq is known
if raise_error and tmin > tmax:
raise ValueError('tmin (%s) must be less than or equal to tmax (%s)'
% (orig_tmin, orig_tmax))
mask = (times >= tmin)
mask &= (times <= tmax)
if raise_error and not mask.any():
extra = '' if include_tmax else 'when include_tmax=False '
raise ValueError('No samples remain when using tmin=%s and tmax=%s %s'
'(original time bounds are [%s, %s])'
% (orig_tmin, orig_tmax, extra, times[0], times[-1]))
return mask
def _freq_mask(freqs, sfreq, fmin=None, fmax=None, raise_error=True):
"""Safely find frequency boundaries."""
orig_fmin = fmin
orig_fmax = fmax
fmin = -np.inf if fmin is None else fmin
fmax = np.inf if fmax is None else fmax
if not np.isfinite(fmin):
fmin = freqs[0]
if not np.isfinite(fmax):
fmax = freqs[-1]
if sfreq is None:
raise ValueError('sfreq can not be None')
# Push 0.5/sfreq past the nearest frequency boundary first
sfreq = float(sfreq)
fmin = int(round(fmin * sfreq)) / sfreq - 0.5 / sfreq
fmax = int(round(fmax * sfreq)) / sfreq + 0.5 / sfreq
if raise_error and fmin > fmax:
raise ValueError('fmin (%s) must be less than or equal to fmax (%s)'
% (orig_fmin, orig_fmax))
mask = (freqs >= fmin)
mask &= (freqs <= fmax)
if raise_error and not mask.any():
raise ValueError('No frequencies remain when using fmin=%s and '
'fmax=%s (original frequency bounds are [%s, %s])'
% (orig_fmin, orig_fmax, freqs[0], freqs[-1]))
return mask
def grand_average(all_inst, interpolate_bads=True, drop_bads=True):
"""Make grand average of a list of Evoked or AverageTFR data.
For :class:`mne.Evoked` data, the function interpolates bad channels based
on the ``interpolate_bads`` parameter. If ``interpolate_bads`` is True,
the grand average file will contain good channels and the bad channels
interpolated from the good MEG/EEG channels.
For :class:`mne.time_frequency.AverageTFR` data, the function takes the
subset of channels not marked as bad in any of the instances.
The ``grand_average.nave`` attribute will be equal to the number
of evoked datasets used to calculate the grand average.
.. note:: A grand average evoked should not be used for source
localization.
Parameters
----------
all_inst : list of Evoked or AverageTFR
The evoked datasets.
interpolate_bads : bool
If True, bad MEG and EEG channels are interpolated. Ignored for
AverageTFR.
drop_bads : bool
If True, drop all bad channels marked as bad in any data set.
If neither interpolate_bads nor drop_bads is True, in the output file,
every channel marked as bad in at least one of the input files will be
marked as bad, but no interpolation or dropping will be performed.
Returns
-------
grand_average : Evoked | AverageTFR
The grand average data. Same type as input.
Notes
-----
.. versionadded:: 0.11.0
"""
# check if all elements in the given list are evoked data
from ..evoked import Evoked
from ..time_frequency import AverageTFR
from ..channels.channels import equalize_channels
if not all_inst:
raise ValueError('Please pass a list of Evoked or AverageTFR objects.')
elif len(all_inst) == 1:
warn('Only a single dataset was passed to mne.grand_average().')
inst_type = type(all_inst[0])
_validate_type(all_inst[0], (Evoked, AverageTFR), 'All elements')
for inst in all_inst:
_validate_type(inst, inst_type, 'All elements', 'of the same type')
# Copy channels to leave the original evoked datasets intact.
all_inst = [inst.copy() for inst in all_inst]
# Interpolates if necessary
if isinstance(all_inst[0], Evoked):
if interpolate_bads:
all_inst = [inst.interpolate_bads() if len(inst.info['bads']) > 0
else inst for inst in all_inst]
from ..evoked import combine_evoked as combine
else: # isinstance(all_inst[0], AverageTFR):
from ..time_frequency.tfr import combine_tfr as combine
if drop_bads:
bads = list({b for inst in all_inst for b in inst.info['bads']})
if bads:
for inst in all_inst:
inst.drop_channels(bads)
equalize_channels(all_inst, copy=False)
# make grand_average object using combine_[evoked/tfr]
grand_average = combine(all_inst, weights='equal')
# change the grand_average.nave to the number of Evokeds
grand_average.nave = len(all_inst)
# change comment field
grand_average.comment = "Grand average (n = %d)" % grand_average.nave
return grand_average
def object_hash(x, h=None):
"""Hash a reasonable python object.
Parameters
----------
x : object
Object to hash. Can be anything comprised of nested versions of:
{dict, list, tuple, ndarray, str, bytes, float, int, None}.
h : hashlib HASH object | None
Optional, object to add the hash to. None creates an MD5 hash.
Returns
-------
digest : int
The digest resulting from the hash.
"""
if h is None:
h = hashlib.md5()
if hasattr(x, 'keys'):
# dict-like types
keys = _sort_keys(x)
for key in keys:
object_hash(key, h)
object_hash(x[key], h)
elif isinstance(x, bytes):
# must come before "str" below
h.update(x)
elif isinstance(x, (str, float, int, type(None))):
h.update(str(type(x)).encode('utf-8'))
h.update(str(x).encode('utf-8'))
elif isinstance(x, (np.ndarray, np.number, np.bool_)):
x = np.asarray(x)
h.update(str(x.shape).encode('utf-8'))
h.update(str(x.dtype).encode('utf-8'))
h.update(x.tobytes())
elif isinstance(x, datetime):
object_hash(_dt_to_stamp(x))
elif hasattr(x, '__len__'):
# all other list-like types
h.update(str(type(x)).encode('utf-8'))
for xx in x:
object_hash(xx, h)
else:
raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
return int(h.hexdigest(), 16)
def object_size(x, memo=None):
"""Estimate the size of a reasonable python object.
Parameters
----------
x : object
Object to approximate the size of.
Can be anything comprised of nested versions of:
{dict, list, tuple, ndarray, str, bytes, float, int, None}.
memo : dict | None
The memodict.
Returns
-------
size : int
The estimated size in bytes of the object.
"""
# Note: this will not process object arrays properly (since those only)
# hold references
if memo is None:
memo = dict()
id_ = id(x)
if id_ in memo:
return 0 # do not add already existing ones
if isinstance(x, (bytes, str, int, float, type(None))):
size = sys.getsizeof(x)
elif isinstance(x, np.ndarray):
# On newer versions of NumPy, just doing sys.getsizeof(x) works,
# but on older ones you always get something small :(
size = sys.getsizeof(np.array([]))
if x.base is None or id(x.base) not in memo:
size += x.nbytes
elif isinstance(x, np.generic):
size = x.nbytes
elif isinstance(x, dict):
size = sys.getsizeof(x)
for key, value in x.items():
size += object_size(key, memo)
size += object_size(value, memo)
elif isinstance(x, (list, tuple)):
size = sys.getsizeof(x) + sum(object_size(xx, memo) for xx in x)
elif isinstance(x, datetime):
size = object_size(_dt_to_stamp(x), memo)
elif sparse.isspmatrix_csc(x) or sparse.isspmatrix_csr(x):
size = sum(sys.getsizeof(xx)
for xx in [x, x.data, x.indices, x.indptr])
else:
raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
memo[id_] = size
return size
def _sort_keys(x):
"""Sort and return keys of dict."""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
def _array_equal_nan(a, b):
try:
np.testing.assert_array_equal(a, b)
except AssertionError:
return False
return True
def object_diff(a, b, pre=''):
"""Compute all differences between two python variables.
Parameters
----------
a : object
Currently supported: dict, list, tuple, ndarray, int, str, bytes,
float, StringIO, BytesIO.
b : object
Must be same type as ``a``.
pre : str
String to prepend to each line.
Returns
-------
diffs : str
A string representation of the differences.
"""
out = ''
if type(a) != type(b):
# Deal with NamedInt and NamedFloat
for sub in (int, float):
if isinstance(a, sub) and isinstance(b, sub):
break
else:
return pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
if isinstance(a, dict):
k1s = _sort_keys(a)
k2s = _sort_keys(b)
m1 = set(k2s) - set(k1s)
if len(m1):
out += pre + ' left missing keys %s\n' % (m1)
for key in k1s:
if key not in k2s:
out += pre + ' right missing key %s\n' % key
else:
out += object_diff(a[key], b[key],
pre=(pre + '[%s]' % repr(key)))
elif isinstance(a, (list, tuple)):
if len(a) != len(b):
out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
else:
for ii, (xx1, xx2) in enumerate(zip(a, b)):
out += object_diff(xx1, xx2, pre + '[%s]' % ii)
elif isinstance(a, float):
if not _array_equal_nan(a, b):
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif isinstance(a, (str, int, bytes, np.generic)):
if a != b:
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif a is None:
if b is not None:
out += pre + ' left is None, right is not (%s)\n' % (b)
elif isinstance(a, np.ndarray):
if not _array_equal_nan(a, b):
out += pre + ' array mismatch\n'
elif isinstance(a, (StringIO, BytesIO)):
if a.getvalue() != b.getvalue():
out += pre + ' StringIO mismatch\n'
elif isinstance(a, datetime):
if (a - b).total_seconds() != 0:
out += pre + ' datetime mismatch\n'
elif sparse.isspmatrix(a):
# sparsity and sparse type of b vs a already checked above by type()
if b.shape != a.shape:
out += pre + (' sparse matrix a and b shape mismatch'
'(%s vs %s)' % (a.shape, b.shape))
else:
c = a - b
c.eliminate_zeros()
if c.nnz > 0:
out += pre + (' sparse matrix a and b differ on %s '
'elements' % c.nnz)
elif hasattr(a, '__getstate__'):
out += object_diff(a.__getstate__(), b.__getstate__(), pre)
else:
raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
return out
class _PCA(object):
"""Principal component analysis (PCA)."""
# Adapted from sklearn and stripped down to just use linalg.svd
# and make it easier to later provide a "center" option if we want
def __init__(self, n_components=None, whiten=False):
self.n_components = n_components
self.whiten = whiten
def fit_transform(self, X, y=None):
X = X.copy()
U, S, _ = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0] - 1)
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
if self.n_components is None:
n_components = min(X.shape)
else:
n_components = self.n_components
n_samples, n_features = X.shape
if n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
elif not 0 <= n_components <= min(n_samples, n_features):
raise ValueError("n_components=%r must be between 0 and "
"min(n_samples, n_features)=%r with "
"svd_solver='full'"
% (n_components, min(n_samples, n_features)))
elif n_components >= 1:
if not isinstance(n_components, (numbers.Integral, np.integer)):
raise ValueError("n_components=%r must be of type int "
"when greater than or equal to 1, "
"was of type=%r"
% (n_components, type(n_components)))
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = _safe_svd(X, full_matrices=False)
# flip eigenvectors' sign to enforce deterministic output
U, V = svd_flip(U, V)
components_ = V
# Get variance explained by singular values
explained_variance_ = (S ** 2) / (n_samples - 1)
total_var = explained_variance_.sum()
explained_variance_ratio_ = explained_variance_ / total_var
singular_values_ = S.copy() # Store the singular values.
# Postprocess the number of components required
if n_components == 'mle':
n_components = \
_infer_dimension_(explained_variance_, n_samples, n_features)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
ratio_cumsum = stable_cumsum(explained_variance_ratio_)
n_components = np.searchsorted(ratio_cumsum, n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = explained_variance_[:n_components]
self.explained_variance_ratio_ = \
explained_variance_ratio_[:n_components]
self.singular_values_ = singular_values_[:n_components]
return U, S, V
def _mask_to_onsets_offsets(mask):
"""Group boolean mask into contiguous onset:offset pairs."""
assert mask.dtype == bool and mask.ndim == 1
mask = mask.astype(int)
diff = np.diff(mask)
onsets = np.where(diff > 0)[0] + 1
if mask[0]:
onsets = np.concatenate([[0], onsets])
offsets = np.where(diff < 0)[0] + 1
if mask[-1]:
offsets = np.concatenate([offsets, [len(mask)]])
assert len(onsets) == len(offsets)
return onsets, offsets
def _julian_to_dt(jd):
"""Convert Julian integer to a datetime object.
Parameters
----------
jd : int
Julian date - number of days since julian day 0
Julian day number 0 assigned to the day starting at
noon on January 1, 4713 BC, proleptic Julian calendar
November 24, 4714 BC, in the proleptic Gregorian calendar
Returns
-------
jd_date : datetime
Datetime representation of jd
"""
# https://aa.usno.navy.mil/data/docs/JulianDate.php
# Thursday, A.D. 1970 Jan 1 12:00:00.0 2440588.000000
jd_t0 = 2440588
datetime_t0 = datetime(1970, 1, 1, 12, 0, 0, 0, tzinfo=timezone.utc)
dt = timedelta(days=(jd - jd_t0))
return datetime_t0 + dt
def _dt_to_julian(jd_date):
"""Convert datetime object to a Julian integer.
Parameters
----------
jd_date : datetime
Returns
-------
jd : float
Julian date corresponding to jd_date
- number of days since julian day 0
Julian day number 0 assigned to the day starting at
noon on January 1, 4713 BC, proleptic Julian calendar
November 24, 4714 BC, in the proleptic Gregorian calendar
"""
# https://aa.usno.navy.mil/data/docs/JulianDate.php
# Thursday, A.D. 1970 Jan 1 12:00:00.0 2440588.000000
jd_t0 = 2440588
datetime_t0 = datetime(1970, 1, 1, 12, 0, 0, 0, tzinfo=timezone.utc)
dt = jd_date - datetime_t0
return jd_t0 + dt.days
def _cal_to_julian(year, month, day):
"""Convert calendar date (year, month, day) to a Julian integer.
Parameters
----------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
Returns
-------
jd: int
Julian date.
"""
return int(_dt_to_julian(datetime(year, month, day, 12, 0, 0,
tzinfo=timezone.utc)))
def _julian_to_cal(jd):
"""Convert calendar date (year, month, day) to a Julian integer.
Parameters
----------
jd: int, float
Julian date.
Returns
-------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
"""
tmp_date = _julian_to_dt(jd)
return tmp_date.year, tmp_date.month, tmp_date.day
def _check_dt(dt):
if not isinstance(dt, datetime) or dt.tzinfo is None or \
dt.tzinfo is not timezone.utc:
raise ValueError('Date must be datetime object in UTC: %r' % (dt,))
def _dt_to_stamp(inp_date):
"""Convert a datetime object to a timestamp."""
_check_dt(inp_date)
return int(inp_date.timestamp() // 1), inp_date.microsecond
def _stamp_to_dt(utc_stamp):
"""Convert timestamp to datetime object in Windows-friendly way."""
# The min on windows is 86400
stamp = [int(s) for s in utc_stamp]
if len(stamp) == 1: # In case there is no microseconds information
stamp.append(0)
return (datetime.fromtimestamp(0, tz=timezone.utc) +
timedelta(0, stamp[0], stamp[1])) # day, sec, µs
class _ReuseCycle(object):
"""Cycle over a variable, preferring to reuse earlier indices.
Requires the values in ``x`` to be hashable and unique. This holds
nicely for matplotlib's color cycle, which gives HTML hex color strings.
"""
def __init__(self, x):
self.indices = list()
self.popped = dict()
assert len(x) > 0
self.x = x
def __iter__(self):
while True:
yield self.__next__()
def __next__(self):
if not len(self.indices):
self.indices = list(range(len(self.x)))
self.popped = dict()
idx = self.indices.pop(0)
val = self.x[idx]
assert val not in self.popped
self.popped[val] = idx
return val
def restore(self, val):
try:
idx = self.popped.pop(val)
except KeyError:
warn('Could not find value: %s' % (val,))
else:
loc = np.searchsorted(self.indices, idx)
self.indices.insert(loc, idx)
| bsd-3-clause | 3,660,217,680,164,539,400 | 33.115312 | 79 | 0.591345 | false |
tiefpunkt/thingstore | thingstore/models.py | 1 | 2775 | from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.timezone import now, utc
import calendar, datetime, string, random
# Create your models here.
class Thing(models.Model):
name = models.CharField(max_length=255)
location = models.CharField(max_length=255, blank=True)
description = models.TextField(blank=True)
owner = models.ForeignKey(User)
def __unicode__(self):
return self.name;
def get_absolute_url(self):
return reverse('thingstore.views.thing', args=[str(self.id)])
class Metric(models.Model):
thing = models.ForeignKey(Thing, related_name='metrics')
name = models.CharField(max_length=255)
unit = models.CharField(max_length=64, blank=True)
class Meta:
unique_together = (("name","thing"),)
def __unicode__(self):
return self.name;
""" Return most recent value for metric """
@property
def current_value(self):
try:
return Value.objects.filter(metric = self)[:1].get().value
except Value.DoesNotExist:
return None
""" set current value by adding a new Value with current timestamp"""
@current_value.setter
def current_value(self, value):
v = Value(metric = self, value = value)
v.save()
""" Return datetime of last update """
@property
def last_update(self):
try:
return Value.objects.filter(metric = self)[:1].get().timestamp
except Value.DoesNotExist:
return None
""" Returns a list of Value objects for the last $timeframe_hours
plus the one Value before the timeframe if existing """
def getValues(self, timeframe_hours):
try:
# Get all values within the timeframe
r_values = Value.objects.filter(metric = self, timestamp__gte = now()-datetime.timedelta(hours=timeframe_hours)).order_by('timestamp')
r_list = [ values for values in r_values]
# The invisible Value outside of the Timeframe
inv_value = Value.objects.filter(metric = self, timestamp__lt = now()-datetime.timedelta(hours=timeframe_hours)).order_by('-timestamp')[:1]
if inv_value.count():
vr_list = list(inv_value) + list(r_values)
return vr_list
return r_list
except:
return None;
class Value(models.Model):
metric = models.ForeignKey(Metric, related_name='values')
value = models.FloatField()
timestamp = models.DateTimeField(default=now)
@property
def js_time(self):
return calendar.timegm(self.timestamp.timetuple())*1000
class Meta:
ordering = ['-timestamp']
class APIKey(models.Model):
token = models.CharField(max_length=255, unique = True)
user = models.ForeignKey(User, related_name='apikeys')
@classmethod
def create(cls, user):
apikey = cls(user=user)
apikey.token = ''.join(random.sample(string.lowercase+string.uppercase+string.digits,32))
return apikey
| mit | -3,062,768,217,435,428,000 | 28.83871 | 142 | 0.718198 | false |
vbshah1992/microblog | flask/lib/python2.7/site-packages/pip-1.5.4-py2.7.egg/pip/_vendor/html5lib/filters/lint.py | 250 | 4062 | from __future__ import absolute_import, division, unicode_literals
from gettext import gettext
_ = gettext
from . import _base
from ..constants import cdataElements, rcdataElements, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class LintError(Exception):
pass
class Filter(_base.Filter):
def __iter__(self):
open_elements = []
contentModelFlag = "PCDATA"
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("StartTag not in PCDATA content model flag: %s") % name)
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %r") % name)
if not name:
raise LintError(_("Empty tag name"))
if type == "StartTag" and name in voidElements:
raise LintError(_("Void element reported as StartTag token: %s") % name)
elif type == "EmptyTag" and name not in voidElements:
raise LintError(_("Non-void element reported as EmptyTag token: %s") % token["name"])
if type == "StartTag":
open_elements.append(name)
for name, value in token["data"]:
if not isinstance(name, str):
raise LintError(_("Attribute name is not a string: %r") % name)
if not name:
raise LintError(_("Empty attribute name"))
if not isinstance(value, str):
raise LintError(_("Attribute value is not a string: %r") % value)
if name in cdataElements:
contentModelFlag = "CDATA"
elif name in rcdataElements:
contentModelFlag = "RCDATA"
elif name == "plaintext":
contentModelFlag = "PLAINTEXT"
elif type == "EndTag":
name = token["name"]
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %r") % name)
if not name:
raise LintError(_("Empty tag name"))
if name in voidElements:
raise LintError(_("Void element reported as EndTag token: %s") % name)
start_name = open_elements.pop()
if start_name != name:
raise LintError(_("EndTag (%s) does not match StartTag (%s)") % (name, start_name))
contentModelFlag = "PCDATA"
elif type == "Comment":
if contentModelFlag != "PCDATA":
raise LintError(_("Comment not in PCDATA content model flag"))
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
if not isinstance(data, str):
raise LintError(_("Attribute name is not a string: %r") % data)
if not data:
raise LintError(_("%s token with empty data") % type)
if type == "SpaceCharacters":
data = data.strip(spaceCharacters)
if data:
raise LintError(_("Non-space character(s) found in SpaceCharacters token: ") % data)
elif type == "Doctype":
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("Doctype not in PCDATA content model flag: %s") % name)
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %r") % name)
# XXX: what to do with token["data"] ?
elif type in ("ParseError", "SerializeError"):
pass
else:
raise LintError(_("Unknown token type: %s") % type)
yield token
| bsd-3-clause | 2,923,873,812,112,633,300 | 42.677419 | 108 | 0.510094 | false |
FIWARE-TMForum/business-ecosystem-charging-backend | src/wstore/__init__.py | 1 | 2300 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 - 2017 CoNWeT Lab., Universidad Politécnica de Madrid
# This file belongs to the business-charging-backend
# of the Business API Ecosystem.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import sys
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from wstore.models import Context
from wstore.store_commons.utils.url import is_valid_url
from wstore.ordering.inventory_client import InventoryClient
from wstore.rss_adaptor.rss_manager import ProviderManager
testing = sys.argv[1:2] == ['test']
if not testing:
# Validate that a correct site and local_site has been provided
if not is_valid_url(settings.SITE) or not is_valid_url(settings.LOCAL_SITE):
raise ImproperlyConfigured('SITE and LOCAL_SITE settings must be a valid URL')
# Create context object if it does not exists
if not len(Context.objects.all()):
Context.objects.create()
inventory = InventoryClient()
inventory.create_inventory_subscription()
# Create RSS default aggregator and provider
credentials = {
'user': settings.STORE_NAME,
'roles': [settings.ADMIN_ROLE],
'email': settings.WSTOREMAIL
}
prov_manager = ProviderManager(credentials)
try:
prov_manager.register_aggregator({
'aggregatorId': settings.WSTOREMAIL,
'aggregatorName': settings.STORE_NAME,
'defaultAggregator': True
})
except Exception as e: # If the error is a conflict means that the aggregator is already registered
if e.response.status_code != 409:
raise e
| agpl-3.0 | -815,755,948,076,390,000 | 34.921875 | 104 | 0.721618 | false |
aequitas/home-assistant | homeassistant/components/conversation/__init__.py | 7 | 5514 | """Support for functionality to have conversations with Home Assistant."""
import logging
import re
import voluptuous as vol
from homeassistant import core
from homeassistant.components import http
from homeassistant.components.cover import (
INTENT_CLOSE_COVER, INTENT_OPEN_COVER)
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.const import EVENT_COMPONENT_LOADED
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, intent
from homeassistant.loader import bind_hass
from homeassistant.setup import ATTR_COMPONENT
from .util import create_matcher
_LOGGER = logging.getLogger(__name__)
ATTR_TEXT = 'text'
DOMAIN = 'conversation'
REGEX_TURN_COMMAND = re.compile(r'turn (?P<name>(?: |\w)+) (?P<command>\w+)')
REGEX_TYPE = type(re.compile(''))
UTTERANCES = {
'cover': {
INTENT_OPEN_COVER: ['Open [the] [a] [an] {name}[s]'],
INTENT_CLOSE_COVER: ['Close [the] [a] [an] {name}[s]']
}
}
SERVICE_PROCESS = 'process'
SERVICE_PROCESS_SCHEMA = vol.Schema({
vol.Required(ATTR_TEXT): cv.string,
})
CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema({
vol.Optional('intents'): vol.Schema({
cv.string: vol.All(cv.ensure_list, [cv.string])
})
})}, extra=vol.ALLOW_EXTRA)
@core.callback
@bind_hass
def async_register(hass, intent_type, utterances):
"""Register utterances and any custom intents.
Registrations don't require conversations to be loaded. They will become
active once the conversation component is loaded.
"""
intents = hass.data.get(DOMAIN)
if intents is None:
intents = hass.data[DOMAIN] = {}
conf = intents.get(intent_type)
if conf is None:
conf = intents[intent_type] = []
for utterance in utterances:
if isinstance(utterance, REGEX_TYPE):
conf.append(utterance)
else:
conf.append(create_matcher(utterance))
async def async_setup(hass, config):
"""Register the process service."""
config = config.get(DOMAIN, {})
intents = hass.data.get(DOMAIN)
if intents is None:
intents = hass.data[DOMAIN] = {}
for intent_type, utterances in config.get('intents', {}).items():
conf = intents.get(intent_type)
if conf is None:
conf = intents[intent_type] = []
conf.extend(create_matcher(utterance) for utterance in utterances)
async def process(service):
"""Parse text into commands."""
text = service.data[ATTR_TEXT]
_LOGGER.debug('Processing: <%s>', text)
try:
await _process(hass, text)
except intent.IntentHandleError as err:
_LOGGER.error('Error processing %s: %s', text, err)
hass.services.async_register(
DOMAIN, SERVICE_PROCESS, process, schema=SERVICE_PROCESS_SCHEMA)
hass.http.register_view(ConversationProcessView)
# We strip trailing 's' from name because our state matcher will fail
# if a letter is not there. By removing 's' we can match singular and
# plural names.
async_register(hass, intent.INTENT_TURN_ON, [
'Turn [the] [a] {name}[s] on',
'Turn on [the] [a] [an] {name}[s]',
])
async_register(hass, intent.INTENT_TURN_OFF, [
'Turn [the] [a] [an] {name}[s] off',
'Turn off [the] [a] [an] {name}[s]',
])
async_register(hass, intent.INTENT_TOGGLE, [
'Toggle [the] [a] [an] {name}[s]',
'[the] [a] [an] {name}[s] toggle',
])
@callback
def register_utterances(component):
"""Register utterances for a component."""
if component not in UTTERANCES:
return
for intent_type, sentences in UTTERANCES[component].items():
async_register(hass, intent_type, sentences)
@callback
def component_loaded(event):
"""Handle a new component loaded."""
register_utterances(event.data[ATTR_COMPONENT])
hass.bus.async_listen(EVENT_COMPONENT_LOADED, component_loaded)
# Check already loaded components.
for component in hass.config.components:
register_utterances(component)
return True
async def _process(hass, text):
"""Process a line of text."""
intents = hass.data.get(DOMAIN, {})
for intent_type, matchers in intents.items():
for matcher in matchers:
match = matcher.match(text)
if not match:
continue
response = await hass.helpers.intent.async_handle(
DOMAIN, intent_type,
{key: {'value': value} for key, value
in match.groupdict().items()}, text)
return response
class ConversationProcessView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = '/api/conversation/process'
name = "api:conversation:process"
@RequestDataValidator(vol.Schema({
vol.Required('text'): str,
}))
async def post(self, request, data):
"""Send a request for processing."""
hass = request.app['hass']
try:
intent_result = await _process(hass, data['text'])
except intent.IntentHandleError as err:
intent_result = intent.IntentResponse()
intent_result.async_set_speech(str(err))
if intent_result is None:
intent_result = intent.IntentResponse()
intent_result.async_set_speech("Sorry, I didn't understand that")
return self.json(intent_result)
| apache-2.0 | 7,398,157,632,579,395,000 | 28.967391 | 77 | 0.63765 | false |
valentin-krasontovitsch/ansible | lib/ansible/plugins/doc_fragments/docker.py | 5 | 5695 | # -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Docker doc fragment
DOCUMENTATION = r'''
options:
docker_host:
description:
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
the module will automatically replace C(tcp) in the connection URL with C(https).
- If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
instead. If the environment variable is not set, the default value will be used.
type: str
default: unix://var/run/docker.sock
aliases: [ docker_url ]
tls_hostname:
description:
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
be used instead. If the environment variable is not set, the default value will be used.
type: str
default: localhost
api_version:
description:
- The version of the Docker API running on the Docker Host.
- Defaults to the latest version of the API supported by docker-py.
- If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
used instead. If the environment variable is not set, the default value will be used.
type: str
default: auto
aliases: [ docker_api_version ]
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
instead. If the environment variable is not set, the default value will be used.
type: int
default: 60
cacert_path:
description:
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: str
aliases: [ tls_ca_cert ]
cert_path:
description:
- Path to the client's TLS certificate file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: str
aliases: [ tls_client_cert ]
key_path:
description:
- Path to the client's TLS key file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: str
aliases: [ tls_client_key ]
ssl_version:
description:
- Provide a valid SSL version number. Default value determined by ssl.py module.
- If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
used instead.
type: str
tls:
description:
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
instead. If the environment variable is not set, the default value will be used.
type: bool
default: false
tls_verify:
description:
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
used instead. If the environment variable is not set, the default value will be used.
type: bool
default: false
debug:
description:
- Debug mode
type: bool
default: false
notes:
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
with the product that sets up the environment. It will set these variables for you. See
U(https://docker-py.readthedocs.io/en/stable/machine/) for more details.
- When connecting to Docker daemon with TLS, you might need to install additional Python packages.
For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(pip).
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
In general, it will use C($HOME/docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
and use C($DOCKER_CONFIG/config.json) otherwise.
'''
| gpl-3.0 | -6,953,489,562,432,565,000 | 53.759615 | 127 | 0.661457 | false |
mbauskar/sapphire-erpnext | erpnext/stock/get_item_details.py | 1 | 15380 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, throw
from frappe.utils import flt, cint, add_days, cstr
import json
from erpnext.accounts.doctype.pricing_rule.pricing_rule import get_pricing_rule_for_item
from erpnext.setup.utils import get_exchange_rate
from frappe.model.meta import get_field_precision
@frappe.whitelist()
def get_item_details(args):
"""
args = {
"item_code": "",
"warehouse": None,
"customer": "",
"conversion_rate": 1.0,
"selling_price_list": None,
"price_list_currency": None,
"plc_conversion_rate": 1.0,
"parenttype": "",
"parent": "",
"supplier": None,
"transaction_date": None,
"conversion_rate": 1.0,
"buying_price_list": None,
"is_subcontracted": "Yes" / "No",
"transaction_type": "selling",
"ignore_pricing_rule": 0/1
"project_name": "",
}
"""
args = process_args(args)
item_doc = frappe.get_doc("Item", args.item_code)
item = item_doc
validate_item_details(args, item)
out = get_basic_details(args, item)
get_party_item_code(args, item_doc, out)
if out.get("warehouse"):
out.update(get_available_qty(args.item_code, out.warehouse))
out.update(get_projected_qty(item.name, out.warehouse))
get_price_list_rate(args, item_doc, out)
if args.transaction_type == "selling" and cint(args.is_pos):
out.update(get_pos_profile_item_details(args.company, args))
# update args with out, if key or value not exists
for key, value in out.iteritems():
if args.get(key) is None:
args[key] = value
out.update(get_pricing_rule_for_item(args))
if args.get("parenttype") in ("Sales Invoice", "Delivery Note"):
if item_doc.has_serial_no == 1 and not args.serial_no:
out.serial_no = get_serial_nos_by_fifo(args, item_doc)
if args.transaction_date and item.lead_time_days:
out.schedule_date = out.lead_time_date = add_days(args.transaction_date,
item.lead_time_days)
if args.get("is_subcontracted") == "Yes":
out.bom = get_default_bom(args.item_code)
return out
def process_args(args):
if isinstance(args, basestring):
args = json.loads(args)
args = frappe._dict(args)
if not args.get("transaction_type"):
if args.get("parenttype")=="Material Request" or \
frappe.get_meta(args.get("parenttype")).get_field("supplier"):
args.transaction_type = "buying"
else:
args.transaction_type = "selling"
if not args.get("price_list"):
args.price_list = args.get("selling_price_list") or args.get("buying_price_list")
if args.barcode:
args.item_code = get_item_code(barcode=args.barcode)
elif not args.item_code and args.serial_no:
args.item_code = get_item_code(serial_no=args.serial_no)
return args
@frappe.whitelist()
def get_item_code(barcode=None, serial_no=None):
if barcode:
item_code = frappe.db.get_value("Item", {"barcode": barcode})
if not item_code:
frappe.throw(_("No Item with Barcode {0}").format(barcode))
elif serial_no:
item_code = frappe.db.get_value("Serial No", serial_no, "item_code")
if not item_code:
frappe.throw(_("No Item with Serial No {0}").format(serial_no))
return item_code
def validate_item_details(args, item):
if not args.company:
throw(_("Please specify Company"))
from erpnext.stock.doctype.item.item import validate_end_of_life
validate_end_of_life(item.name, item.end_of_life)
if args.transaction_type == "selling":
# validate if sales item or service item
if args.get("order_type") == "Maintenance":
if item.is_service_item != 1:
throw(_("Item {0} must be a Service Item.").format(item.name))
elif item.is_sales_item != 1:
throw(_("Item {0} must be a Sales Item").format(item.name))
if cint(item.has_variants):
throw(_("Item {0} is a template, please select one of its variants").format(item.name))
elif args.transaction_type == "buying" and args.parenttype != "Material Request":
# validate if purchase item or subcontracted item
if item.is_purchase_item != 1:
throw(_("Item {0} must be a Purchase Item").format(item.name))
if args.get("is_subcontracted") == "Yes" and item.is_sub_contracted_item != 1:
throw(_("Item {0} must be a Sub-contracted Item").format(item.name))
def get_basic_details(args, item):
if not item:
item = frappe.get_doc("Item", args.get("item_code"))
if item.variant_of:
item.update_template_tables()
from frappe.defaults import get_user_default_as_list
user_default_warehouse_list = get_user_default_as_list('warehouse')
user_default_warehouse = user_default_warehouse_list[0] \
if len(user_default_warehouse_list)==1 else ""
out = frappe._dict({
"item_code": item.name,
"item_name": item.item_name,
"description": cstr(item.description).strip(),
"image": cstr(item.image).strip(),
"warehouse": user_default_warehouse or args.warehouse or item.default_warehouse,
"income_account": get_default_income_account(args, item),
"expense_account": get_default_expense_account(args, item),
"cost_center": get_default_cost_center(args, item),
"batch_no": None,
"item_tax_rate": json.dumps(dict(([d.tax_type, d.tax_rate] for d in
item.get("taxes")))),
"uom": item.stock_uom,
"min_order_qty": flt(item.min_order_qty) if args.parenttype == "Material Request" else "",
"conversion_factor": 1.0,
"qty": args.qty or 1.0,
"stock_qty": 1.0,
"price_list_rate": 0.0,
"base_price_list_rate": 0.0,
"rate": 0.0,
"base_rate": 0.0,
"amount": 0.0,
"base_amount": 0.0,
"net_rate": 0.0,
"net_amount": 0.0,
"discount_percentage": 0.0
})
# if default specified in item is for another company, fetch from company
for d in [["Account", "income_account", "default_income_account"], ["Account", "expense_account", "default_expense_account"],
["Cost Center", "cost_center", "cost_center"], ["Warehouse", "warehouse", ""]]:
company = frappe.db.get_value(d[0], out.get(d[1]), "company")
if not out[d[1]] or (company and args.company != company):
out[d[1]] = frappe.db.get_value("Company", args.company, d[2]) if d[2] else None
for fieldname in ("item_name", "item_group", "barcode", "brand", "stock_uom"):
out[fieldname] = item.get(fieldname)
return out
def get_default_income_account(args, item):
return (item.income_account
or args.income_account
or frappe.db.get_value("Item Group", item.item_group, "default_income_account"))
def get_default_expense_account(args, item):
return (item.expense_account
or args.expense_account
or frappe.db.get_value("Item Group", item.item_group, "default_expense_account"))
def get_default_cost_center(args, item):
return (frappe.db.get_value("Project", args.get("project_name"), "cost_center")
or (item.selling_cost_center if args.get("transaction_type") == "selling" else item.buying_cost_center)
or frappe.db.get_value("Item Group", item.item_group, "default_cost_center")
or args.get("cost_center"))
def get_price_list_rate(args, item_doc, out):
meta = frappe.get_meta(args.parenttype)
if meta.get_field("currency"):
validate_price_list(args)
validate_conversion_rate(args, meta)
price_list_rate = get_price_list_rate_for(args, item_doc.name)
if not price_list_rate and item_doc.variant_of:
price_list_rate = get_price_list_rate_for(args, item_doc.variant_of)
if not price_list_rate:
if args.price_list and args.rate:
insert_item_price(args)
return {}
out.price_list_rate = flt(price_list_rate) * flt(args.plc_conversion_rate) \
/ flt(args.conversion_rate)
if not out.price_list_rate and args.transaction_type == "buying":
from erpnext.stock.doctype.item.item import get_last_purchase_details
out.update(get_last_purchase_details(item_doc.name,
args.parent, args.conversion_rate))
def insert_item_price(args):
"""Insert Item Price if Price List and Price List Rate are specified and currency is the same"""
if frappe.db.get_value("Price List", args.price_list, "currency") == args.currency \
and cint(frappe.db.get_single_value("Stock Settings", "auto_insert_price_list_rate_if_missing")):
if frappe.has_permission("Item Price", "write"):
price_list_rate = args.rate / args.conversion_factor \
if args.get("conversion_factor") else args.rate
item_price = frappe.get_doc({
"doctype": "Item Price",
"price_list": args.price_list,
"item_code": args.item_code,
"currency": args.currency,
"price_list_rate": price_list_rate
})
item_price.insert()
frappe.msgprint("Item Price added for {0} in Price List {1}".format(args.item_code,
args.price_list))
def get_price_list_rate_for(args, item_code):
return frappe.db.get_value("Item Price",
{"price_list": args.price_list, "item_code": item_code}, "price_list_rate")
def validate_price_list(args):
if args.get("price_list"):
if not frappe.db.get_value("Price List",
{"name": args.price_list, args.transaction_type: 1, "enabled": 1}):
throw(_("Price List {0} is disabled").format(args.price_list))
else:
throw(_("Price List not selected"))
def validate_conversion_rate(args, meta):
from erpnext.controllers.accounts_controller import validate_conversion_rate
if (not args.conversion_rate
and args.currency==frappe.db.get_value("Company", args.company, "default_currency")):
args.conversion_rate = 1.0
# validate currency conversion rate
validate_conversion_rate(args.currency, args.conversion_rate,
meta.get_label("conversion_rate"), args.company)
args.conversion_rate = flt(args.conversion_rate,
get_field_precision(meta.get_field("conversion_rate"),
frappe._dict({"fields": args})))
# validate price list currency conversion rate
if not args.get("price_list_currency"):
throw(_("Price List Currency not selected"))
else:
validate_conversion_rate(args.price_list_currency, args.plc_conversion_rate,
meta.get_label("plc_conversion_rate"), args.company)
args.plc_conversion_rate = flt(args.plc_conversion_rate,
get_field_precision(meta.get_field("plc_conversion_rate"),
frappe._dict({"fields": args})))
def get_party_item_code(args, item_doc, out):
if args.transaction_type == "selling":
customer_item_code = item_doc.get("customer_items", {"customer_name": args.customer})
out.customer_item_code = customer_item_code[0].ref_code if customer_item_code else None
else:
item_supplier = item_doc.get("supplier_items", {"supplier": args.supplier})
out.supplier_part_no = item_supplier[0].supplier_part_no if item_supplier else None
def get_pos_profile_item_details(company, args, pos_profile=None):
res = frappe._dict()
if not pos_profile:
pos_profile = get_pos_profile(company)
if pos_profile:
for fieldname in ("income_account", "cost_center", "warehouse", "expense_account"):
if not args.get(fieldname) and pos_profile.get(fieldname):
res[fieldname] = pos_profile.get(fieldname)
if res.get("warehouse"):
res.actual_qty = get_available_qty(args.item_code,
res.warehouse).get("actual_qty")
return res
@frappe.whitelist()
def get_pos_profile(company):
pos_profile = frappe.db.sql("""select * from `tabPOS Profile` where user = %s
and company = %s""", (frappe.session['user'], company), as_dict=1)
if not pos_profile:
pos_profile = frappe.db.sql("""select * from `tabPOS Profile`
where ifnull(user,'') = '' and company = %s""", company, as_dict=1)
return pos_profile and pos_profile[0] or None
def get_serial_nos_by_fifo(args, item_doc):
if frappe.db.get_single_value("Stock Settings", "automatically_set_serial_nos_based_on_fifo"):
return "\n".join(frappe.db.sql_list("""select name from `tabSerial No`
where item_code=%(item_code)s and warehouse=%(warehouse)s
order by timestamp(purchase_date, purchase_time) asc limit %(qty)s""", {
"item_code": args.item_code,
"warehouse": args.warehouse,
"qty": abs(cint(args.qty))
}))
def get_actual_batch_qty(batch_no,warehouse,item_code):
actual_batch_qty = 0
if batch_no:
actual_batch_qty = flt(frappe.db.sql("""select sum(actual_qty)
from `tabStock Ledger Entry`
where warehouse=%s and item_code=%s and batch_no=%s""",
(warehouse, item_code, batch_no))[0][0])
return actual_batch_qty
@frappe.whitelist()
def get_conversion_factor(item_code, uom):
variant_of = frappe.db.get_value("Item", item_code, "variant_of")
filters = {"parent": item_code, "uom": uom}
if variant_of:
filters["parent"] = ("in", (item_code, variant_of))
return {"conversion_factor": frappe.db.get_value("UOM Conversion Detail",
filters, "conversion_factor")}
@frappe.whitelist()
def get_projected_qty(item_code, warehouse):
return {"projected_qty": frappe.db.get_value("Bin",
{"item_code": item_code, "warehouse": warehouse}, "projected_qty")}
@frappe.whitelist()
def get_available_qty(item_code, warehouse):
return frappe.db.get_value("Bin", {"item_code": item_code, "warehouse": warehouse},
["projected_qty", "actual_qty"], as_dict=True) or {}
@frappe.whitelist()
def get_batch_qty(batch_no,warehouse,item_code):
actual_batch_qty = get_actual_batch_qty(batch_no,warehouse,item_code)
if batch_no:
return {'actual_batch_qty': actual_batch_qty}
@frappe.whitelist()
def apply_price_list(args):
"""
args = {
"item_list": [{"doctype": "", "name": "", "item_code": "", "brand": "", "item_group": ""}, ...],
"conversion_rate": 1.0,
"selling_price_list": None,
"price_list_currency": None,
"plc_conversion_rate": 1.0,
"parenttype": "",
"parent": "",
"supplier": None,
"transaction_date": None,
"conversion_rate": 1.0,
"buying_price_list": None,
"transaction_type": "selling",
"ignore_pricing_rule": 0/1
}
"""
args = process_args(args)
parent = get_price_list_currency_and_exchange_rate(args)
children = []
if "item_list" in args:
item_list = args.get("item_list")
del args["item_list"]
args.update(parent)
for item in item_list:
args_copy = frappe._dict(args.copy())
args_copy.update(item)
item_details = apply_price_list_on_item(args_copy)
children.append(item_details)
return {
"parent": parent,
"children": children
}
def apply_price_list_on_item(args):
item_details = frappe._dict()
item_doc = frappe.get_doc("Item", args.item_code)
get_price_list_rate(args, item_doc, item_details)
item_details.update(get_pricing_rule_for_item(args))
return item_details
def get_price_list_currency(price_list):
if price_list:
result = frappe.db.get_value("Price List", {"name": price_list,
"enabled": 1}, ["name", "currency"], as_dict=True)
if not result:
throw(_("Price List {0} is disabled").format(price_list))
return result.currency
def get_price_list_currency_and_exchange_rate(args):
price_list_currency = get_price_list_currency(args.price_list)
plc_conversion_rate = args.plc_conversion_rate
if (not plc_conversion_rate) or (price_list_currency and args.price_list_currency \
and price_list_currency != args.price_list_currency):
plc_conversion_rate = get_exchange_rate(price_list_currency, args.currency) or plc_conversion_rate
return {
"price_list_currency": price_list_currency,
"plc_conversion_rate": plc_conversion_rate
}
@frappe.whitelist()
def get_default_bom(item_code=None):
if item_code:
bom = frappe.db.get_value("BOM", {"docstatus": 1, "is_default": 1, "is_active": 1, "item": item_code})
if bom:
return bom
else:
frappe.throw(_("No default BOM exists for Item {0}").format(item_code))
| agpl-3.0 | 3,590,138,322,818,248,700 | 33.407159 | 126 | 0.692588 | false |
Krossom/python-for-android | python-modules/twisted/twisted/words/test/test_irc.py | 49 | 58572 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.protocols.irc}.
"""
import time
from twisted.trial import unittest
from twisted.trial.unittest import TestCase
from twisted.words.protocols import irc
from twisted.words.protocols.irc import IRCClient
from twisted.internet import protocol
from twisted.test.proto_helpers import StringTransport, StringIOWithoutClosing
class ModeParsingTests(unittest.TestCase):
"""
Tests for L{twisted.words.protocols.irc.parseModes}.
"""
paramModes = ('klb', 'b')
def test_emptyModes(self):
"""
Parsing an empty mode string raises L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, '', [])
def test_emptyModeSequence(self):
"""
Parsing a mode string that contains an empty sequence (either a C{+} or
C{-} followed directly by another C{+} or C{-}, or not followed by
anything at all) raises L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, '++k', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '-+k', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '+', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '-', [])
def test_malformedModes(self):
"""
Parsing a mode string that does not start with C{+} or C{-} raises
L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, 'foo', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '%', [])
def test_nullModes(self):
"""
Parsing a mode string that contains no mode characters raises
L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, '+', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '-', [])
def test_singleMode(self):
"""
Parsing a single mode setting with no parameters results in that mode,
with no parameters, in the "added" direction and no modes in the
"removed" direction.
"""
added, removed = irc.parseModes('+s', [])
self.assertEquals(added, [('s', None)])
self.assertEquals(removed, [])
added, removed = irc.parseModes('-s', [])
self.assertEquals(added, [])
self.assertEquals(removed, [('s', None)])
def test_singleDirection(self):
"""
Parsing a single-direction mode setting with multiple modes and no
parameters, results in all modes falling into the same direction group.
"""
added, removed = irc.parseModes('+stn', [])
self.assertEquals(added, [('s', None),
('t', None),
('n', None)])
self.assertEquals(removed, [])
added, removed = irc.parseModes('-nt', [])
self.assertEquals(added, [])
self.assertEquals(removed, [('n', None),
('t', None)])
def test_multiDirection(self):
"""
Parsing a multi-direction mode setting with no parameters.
"""
added, removed = irc.parseModes('+s-n+ti', [])
self.assertEquals(added, [('s', None),
('t', None),
('i', None)])
self.assertEquals(removed, [('n', None)])
def test_consecutiveDirection(self):
"""
Parsing a multi-direction mode setting containing two consecutive mode
sequences with the same direction results in the same result as if
there were only one mode sequence in the same direction.
"""
added, removed = irc.parseModes('+sn+ti', [])
self.assertEquals(added, [('s', None),
('n', None),
('t', None),
('i', None)])
self.assertEquals(removed, [])
def test_mismatchedParams(self):
"""
If the number of mode parameters does not match the number of modes
expecting parameters, L{irc.IRCBadModes} is raised.
"""
self.assertRaises(irc.IRCBadModes,
irc.parseModes,
'+k', [],
self.paramModes)
self.assertRaises(irc.IRCBadModes,
irc.parseModes,
'+kl', ['foo', '10', 'lulz_extra_param'],
self.paramModes)
def test_parameters(self):
"""
Modes which require parameters are parsed and paired with their relevant
parameter, modes which do not require parameters do not consume any of
the parameters.
"""
added, removed = irc.parseModes(
'+klbb',
['somekey', '42', 'nick!user@host', 'other!*@*'],
self.paramModes)
self.assertEquals(added, [('k', 'somekey'),
('l', '42'),
('b', 'nick!user@host'),
('b', 'other!*@*')])
self.assertEquals(removed, [])
added, removed = irc.parseModes(
'-klbb',
['nick!user@host', 'other!*@*'],
self.paramModes)
self.assertEquals(added, [])
self.assertEquals(removed, [('k', None),
('l', None),
('b', 'nick!user@host'),
('b', 'other!*@*')])
# Mix a no-argument mode in with argument modes.
added, removed = irc.parseModes(
'+knbb',
['somekey', 'nick!user@host', 'other!*@*'],
self.paramModes)
self.assertEquals(added, [('k', 'somekey'),
('n', None),
('b', 'nick!user@host'),
('b', 'other!*@*')])
self.assertEquals(removed, [])
stringSubjects = [
"Hello, this is a nice string with no complications.",
"xargs%(NUL)smight%(NUL)slike%(NUL)sthis" % {'NUL': irc.NUL },
"embedded%(CR)snewline%(CR)s%(NL)sFUN%(NL)s" % {'CR': irc.CR,
'NL': irc.NL},
"escape!%(X)s escape!%(M)s %(X)s%(X)sa %(M)s0" % {'X': irc.X_QUOTE,
'M': irc.M_QUOTE}
]
class QuotingTest(unittest.TestCase):
def test_lowquoteSanity(self):
"""Testing client-server level quote/dequote"""
for s in stringSubjects:
self.failUnlessEqual(s, irc.lowDequote(irc.lowQuote(s)))
def test_ctcpquoteSanity(self):
"""Testing CTCP message level quote/dequote"""
for s in stringSubjects:
self.failUnlessEqual(s, irc.ctcpDequote(irc.ctcpQuote(s)))
class Dispatcher(irc._CommandDispatcherMixin):
"""
A dispatcher that exposes one known command and handles unknown commands.
"""
prefix = 'disp'
def disp_working(self, a, b):
"""
A known command that returns its input.
"""
return a, b
def disp_unknown(self, name, a, b):
"""
Handle unknown commands by returning their name and inputs.
"""
return name, a, b
class DispatcherTests(unittest.TestCase):
"""
Tests for L{irc._CommandDispatcherMixin}.
"""
def test_dispatch(self):
"""
Dispatching a command invokes the correct handler.
"""
disp = Dispatcher()
args = (1, 2)
res = disp.dispatch('working', *args)
self.assertEquals(res, args)
def test_dispatchUnknown(self):
"""
Dispatching an unknown command invokes the default handler.
"""
disp = Dispatcher()
name = 'missing'
args = (1, 2)
res = disp.dispatch(name, *args)
self.assertEquals(res, (name,) + args)
def test_dispatchMissingUnknown(self):
"""
Dispatching an unknown command, when no default handler is present,
results in an exception being raised.
"""
disp = Dispatcher()
disp.disp_unknown = None
self.assertRaises(irc.UnhandledCommand, disp.dispatch, 'bar')
class ServerSupportedFeatureTests(unittest.TestCase):
"""
Tests for L{ServerSupportedFeatures} and related functions.
"""
def test_intOrDefault(self):
"""
L{_intOrDefault} converts values to C{int} if possible, otherwise
returns a default value.
"""
self.assertEquals(irc._intOrDefault(None), None)
self.assertEquals(irc._intOrDefault([]), None)
self.assertEquals(irc._intOrDefault(''), None)
self.assertEquals(irc._intOrDefault('hello', 5), 5)
self.assertEquals(irc._intOrDefault('123'), 123)
self.assertEquals(irc._intOrDefault(123), 123)
def test_splitParam(self):
"""
L{ServerSupportedFeatures._splitParam} splits ISUPPORT parameters
into key and values. Parameters without a separator are split into a
key and a list containing only the empty string. Escaped parameters
are unescaped.
"""
params = [('FOO', ('FOO', [''])),
('FOO=', ('FOO', [''])),
('FOO=1', ('FOO', ['1'])),
('FOO=1,2,3', ('FOO', ['1', '2', '3'])),
('FOO=A\\x20B', ('FOO', ['A B'])),
('FOO=\\x5Cx', ('FOO', ['\\x'])),
('FOO=\\', ('FOO', ['\\'])),
('FOO=\\n', ('FOO', ['\\n']))]
_splitParam = irc.ServerSupportedFeatures._splitParam
for param, expected in params:
res = _splitParam(param)
self.assertEquals(res, expected)
self.assertRaises(ValueError, _splitParam, 'FOO=\\x')
self.assertRaises(ValueError, _splitParam, 'FOO=\\xNN')
self.assertRaises(ValueError, _splitParam, 'FOO=\\xN')
self.assertRaises(ValueError, _splitParam, 'FOO=\\x20\\x')
def test_splitParamArgs(self):
"""
L{ServerSupportedFeatures._splitParamArgs} splits ISUPPORT parameter
arguments into key and value. Arguments without a separator are
split into a key and an empty string.
"""
res = irc.ServerSupportedFeatures._splitParamArgs(['A:1', 'B:2', 'C:', 'D'])
self.assertEquals(res, [('A', '1'),
('B', '2'),
('C', ''),
('D', '')])
def test_splitParamArgsProcessor(self):
"""
L{ServerSupportedFeatures._splitParamArgs} uses the argument processor
passed to to convert ISUPPORT argument values to some more suitable
form.
"""
res = irc.ServerSupportedFeatures._splitParamArgs(['A:1', 'B:2', 'C'],
irc._intOrDefault)
self.assertEquals(res, [('A', 1),
('B', 2),
('C', None)])
def test_parsePrefixParam(self):
"""
L{ServerSupportedFeatures._parsePrefixParam} parses the ISUPPORT PREFIX
parameter into a mapping from modes to prefix symbols, returns
C{None} if there is no parseable prefix parameter or raises
C{ValueError} if the prefix parameter is malformed.
"""
_parsePrefixParam = irc.ServerSupportedFeatures._parsePrefixParam
self.assertEquals(_parsePrefixParam(''), None)
self.assertRaises(ValueError, _parsePrefixParam, 'hello')
self.assertEquals(_parsePrefixParam('(ov)@+'),
{'o': ('@', 0),
'v': ('+', 1)})
def test_parseChanModesParam(self):
"""
L{ServerSupportedFeatures._parseChanModesParam} parses the ISUPPORT
CHANMODES parameter into a mapping from mode categories to mode
characters. Passing fewer than 4 parameters results in the empty string
for the relevant categories. Passing more than 4 parameters raises
C{ValueError}.
"""
_parseChanModesParam = irc.ServerSupportedFeatures._parseChanModesParam
self.assertEquals(
_parseChanModesParam([]),
{'addressModes': '',
'param': '',
'setParam': '',
'noParam': ''})
self.assertEquals(
_parseChanModesParam(['b', 'k', 'l', 'imnpst']),
{'addressModes': 'b',
'param': 'k',
'setParam': 'l',
'noParam': 'imnpst'})
self.assertEquals(
_parseChanModesParam(['b', 'k', 'l']),
{'addressModes': 'b',
'param': 'k',
'setParam': 'l',
'noParam': ''})
self.assertRaises(
ValueError,
_parseChanModesParam, ['a', 'b', 'c', 'd', 'e'])
def test_parse(self):
"""
L{ServerSupportedFeatures.parse} changes the internal state of the
instance to reflect the features indicated by the parsed ISUPPORT
parameters, including unknown parameters and unsetting previously set
parameters.
"""
supported = irc.ServerSupportedFeatures()
supported.parse(['MODES=4',
'CHANLIMIT=#:20,&:10',
'INVEX',
'EXCEPTS=Z',
'UNKNOWN=A,B,C'])
self.assertEquals(supported.getFeature('MODES'), 4)
self.assertEquals(supported.getFeature('CHANLIMIT'),
[('#', 20),
('&', 10)])
self.assertEquals(supported.getFeature('INVEX'), 'I')
self.assertEquals(supported.getFeature('EXCEPTS'), 'Z')
self.assertEquals(supported.getFeature('UNKNOWN'), ('A', 'B', 'C'))
self.assertTrue(supported.hasFeature('INVEX'))
supported.parse(['-INVEX'])
self.assertFalse(supported.hasFeature('INVEX'))
# Unsetting a previously unset parameter should not be a problem.
supported.parse(['-INVEX'])
def _parse(self, features):
"""
Parse all specified features according to the ISUPPORT specifications.
@type features: C{list} of C{(featureName, value)}
@param features: Feature names and values to parse
@rtype: L{irc.ServerSupportedFeatures}
"""
supported = irc.ServerSupportedFeatures()
features = ['%s=%s' % (name, value or '')
for name, value in features]
supported.parse(features)
return supported
def _parseFeature(self, name, value=None):
"""
Parse a feature, with the given name and value, according to the
ISUPPORT specifications and return the parsed value.
"""
supported = self._parse([(name, value)])
return supported.getFeature(name)
def _testIntOrDefaultFeature(self, name, default=None):
"""
Perform some common tests on a feature known to use L{_intOrDefault}.
"""
self.assertEquals(
self._parseFeature(name, None),
default)
self.assertEquals(
self._parseFeature(name, 'notanint'),
default)
self.assertEquals(
self._parseFeature(name, '42'),
42)
def _testFeatureDefault(self, name, features=None):
"""
Features known to have default values are reported as being present by
L{irc.ServerSupportedFeatures.hasFeature}, and their value defaults
correctly, when they don't appear in an ISUPPORT message.
"""
default = irc.ServerSupportedFeatures()._features[name]
if features is None:
features = [('DEFINITELY_NOT', 'a_feature')]
supported = self._parse(features)
self.assertTrue(supported.hasFeature(name))
self.assertEquals(supported.getFeature(name), default)
def test_support_CHANMODES(self):
"""
The CHANMODES ISUPPORT parameter is parsed into a C{dict} giving the
four mode categories, C{'addressModes'}, C{'param'}, C{'setParam'}, and
C{'noParam'}.
"""
self._testFeatureDefault('CHANMODES')
self._testFeatureDefault('CHANMODES', [('CHANMODES', 'b,,lk,')])
self._testFeatureDefault('CHANMODES', [('CHANMODES', 'b,,lk,ha,ha')])
self.assertEquals(
self._parseFeature('CHANMODES', ''),
{'addressModes': '',
'param': '',
'setParam': '',
'noParam': ''})
self.assertEquals(
self._parseFeature('CHANMODES', ',A'),
{'addressModes': '',
'param': 'A',
'setParam': '',
'noParam': ''})
self.assertEquals(
self._parseFeature('CHANMODES', 'A,Bc,Def,Ghij'),
{'addressModes': 'A',
'param': 'Bc',
'setParam': 'Def',
'noParam': 'Ghij'})
def test_support_IDCHAN(self):
"""
The IDCHAN support parameter is parsed into a sequence of two-tuples
giving channel prefix and ID length pairs.
"""
self.assertEquals(
self._parseFeature('IDCHAN', '!:5'),
[('!', '5')])
def test_support_MAXLIST(self):
"""
The MAXLIST support parameter is parsed into a sequence of two-tuples
giving modes and their limits.
"""
self.assertEquals(
self._parseFeature('MAXLIST', 'b:25,eI:50'),
[('b', 25), ('eI', 50)])
# A non-integer parameter argument results in None.
self.assertEquals(
self._parseFeature('MAXLIST', 'b:25,eI:50,a:3.1415'),
[('b', 25), ('eI', 50), ('a', None)])
self.assertEquals(
self._parseFeature('MAXLIST', 'b:25,eI:50,a:notanint'),
[('b', 25), ('eI', 50), ('a', None)])
def test_support_NETWORK(self):
"""
The NETWORK support parameter is parsed as the network name, as
specified by the server.
"""
self.assertEquals(
self._parseFeature('NETWORK', 'IRCNet'),
'IRCNet')
def test_support_SAFELIST(self):
"""
The SAFELIST support parameter is parsed into a boolean indicating
whether the safe "list" command is supported or not.
"""
self.assertEquals(
self._parseFeature('SAFELIST'),
True)
def test_support_STATUSMSG(self):
"""
The STATUSMSG support parameter is parsed into a string of channel
status that support the exclusive channel notice method.
"""
self.assertEquals(
self._parseFeature('STATUSMSG', '@+'),
'@+')
def test_support_TARGMAX(self):
"""
The TARGMAX support parameter is parsed into a dictionary, mapping
strings to integers, of the maximum number of targets for a particular
command.
"""
self.assertEquals(
self._parseFeature('TARGMAX', 'PRIVMSG:4,NOTICE:3'),
{'PRIVMSG': 4,
'NOTICE': 3})
# A non-integer parameter argument results in None.
self.assertEquals(
self._parseFeature('TARGMAX', 'PRIVMSG:4,NOTICE:3,KICK:3.1415'),
{'PRIVMSG': 4,
'NOTICE': 3,
'KICK': None})
self.assertEquals(
self._parseFeature('TARGMAX', 'PRIVMSG:4,NOTICE:3,KICK:notanint'),
{'PRIVMSG': 4,
'NOTICE': 3,
'KICK': None})
def test_support_NICKLEN(self):
"""
The NICKLEN support parameter is parsed into an integer value
indicating the maximum length of a nickname the client may use,
otherwise, if the parameter is missing or invalid, the default value
(as specified by RFC 1459) is used.
"""
default = irc.ServerSupportedFeatures()._features['NICKLEN']
self._testIntOrDefaultFeature('NICKLEN', default)
def test_support_CHANNELLEN(self):
"""
The CHANNELLEN support parameter is parsed into an integer value
indicating the maximum channel name length, otherwise, if the
parameter is missing or invalid, the default value (as specified by
RFC 1459) is used.
"""
default = irc.ServerSupportedFeatures()._features['CHANNELLEN']
self._testIntOrDefaultFeature('CHANNELLEN', default)
def test_support_CHANTYPES(self):
"""
The CHANTYPES support parameter is parsed into a tuple of
valid channel prefix characters.
"""
self._testFeatureDefault('CHANTYPES')
self.assertEquals(
self._parseFeature('CHANTYPES', '#&%'),
('#', '&', '%'))
def test_support_KICKLEN(self):
"""
The KICKLEN support parameter is parsed into an integer value
indicating the maximum length of a kick message a client may use.
"""
self._testIntOrDefaultFeature('KICKLEN')
def test_support_PREFIX(self):
"""
The PREFIX support parameter is parsed into a dictionary mapping
modes to two-tuples of status symbol and priority.
"""
self._testFeatureDefault('PREFIX')
self._testFeatureDefault('PREFIX', [('PREFIX', 'hello')])
self.assertEquals(
self._parseFeature('PREFIX', None),
None)
self.assertEquals(
self._parseFeature('PREFIX', '(ohv)@%+'),
{'o': ('@', 0),
'h': ('%', 1),
'v': ('+', 2)})
self.assertEquals(
self._parseFeature('PREFIX', '(hov)@%+'),
{'o': ('%', 1),
'h': ('@', 0),
'v': ('+', 2)})
def test_support_TOPICLEN(self):
"""
The TOPICLEN support parameter is parsed into an integer value
indicating the maximum length of a topic a client may set.
"""
self._testIntOrDefaultFeature('TOPICLEN')
def test_support_MODES(self):
"""
The MODES support parameter is parsed into an integer value
indicating the maximum number of "variable" modes (defined as being
modes from C{addressModes}, C{param} or C{setParam} categories for
the C{CHANMODES} ISUPPORT parameter) which may by set on a channel
by a single MODE command from a client.
"""
self._testIntOrDefaultFeature('MODES')
def test_support_EXCEPTS(self):
"""
The EXCEPTS support parameter is parsed into the mode character
to be used for "ban exception" modes. If no parameter is specified
then the character C{e} is assumed.
"""
self.assertEquals(
self._parseFeature('EXCEPTS', 'Z'),
'Z')
self.assertEquals(
self._parseFeature('EXCEPTS'),
'e')
def test_support_INVEX(self):
"""
The INVEX support parameter is parsed into the mode character to be
used for "invite exception" modes. If no parameter is specified then
the character C{I} is assumed.
"""
self.assertEquals(
self._parseFeature('INVEX', 'Z'),
'Z')
self.assertEquals(
self._parseFeature('INVEX'),
'I')
class IRCClientWithoutLogin(irc.IRCClient):
performLogin = 0
class CTCPTest(unittest.TestCase):
def setUp(self):
self.file = StringIOWithoutClosing()
self.transport = protocol.FileWrapper(self.file)
self.client = IRCClientWithoutLogin()
self.client.makeConnection(self.transport)
def test_ERRMSG(self):
"""Testing CTCP query ERRMSG.
Not because this is this is an especially important case in the
field, but it does go through the entire dispatch/decode/encode
process.
"""
errQuery = (":[email protected] PRIVMSG #theChan :"
"%(X)cERRMSG t%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF})
errReply = ("NOTICE nick :%(X)cERRMSG t :"
"No error has occoured.%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF})
self.client.dataReceived(errQuery)
reply = self.file.getvalue()
self.failUnlessEqual(errReply, reply)
def test_noNumbersVERSION(self):
"""
If attributes for version information on L{IRCClient} are set to
C{None}, the parts of the CTCP VERSION response they correspond to
are omitted.
"""
self.client.versionName = "FrobozzIRC"
self.client.ctcpQuery_VERSION("[email protected]", "#theChan", None)
versionReply = ("NOTICE nick :%(X)cVERSION %(vname)s::"
"%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF,
'vname': self.client.versionName})
reply = self.file.getvalue()
self.assertEquals(versionReply, reply)
def test_fullVERSION(self):
"""
The response to a CTCP VERSION query includes the version number and
environment information, as specified by L{IRCClient.versionNum} and
L{IRCClient.versionEnv}.
"""
self.client.versionName = "FrobozzIRC"
self.client.versionNum = "1.2g"
self.client.versionEnv = "ZorkOS"
self.client.ctcpQuery_VERSION("[email protected]", "#theChan", None)
versionReply = ("NOTICE nick :%(X)cVERSION %(vname)s:%(vnum)s:%(venv)s"
"%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF,
'vname': self.client.versionName,
'vnum': self.client.versionNum,
'venv': self.client.versionEnv})
reply = self.file.getvalue()
self.assertEquals(versionReply, reply)
def tearDown(self):
self.transport.loseConnection()
self.client.connectionLost()
del self.client
del self.transport
class NoticingClient(IRCClientWithoutLogin, object):
methods = {
'created': ('when',),
'yourHost': ('info',),
'myInfo': ('servername', 'version', 'umodes', 'cmodes'),
'luserClient': ('info',),
'bounce': ('info',),
'isupport': ('options',),
'luserChannels': ('channels',),
'luserOp': ('ops',),
'luserMe': ('info',),
'receivedMOTD': ('motd',),
'privmsg': ('user', 'channel', 'message'),
'joined': ('channel',),
'left': ('channel',),
'noticed': ('user', 'channel', 'message'),
'modeChanged': ('user', 'channel', 'set', 'modes', 'args'),
'pong': ('user', 'secs'),
'signedOn': (),
'kickedFrom': ('channel', 'kicker', 'message'),
'nickChanged': ('nick',),
'userJoined': ('user', 'channel'),
'userLeft': ('user', 'channel'),
'userKicked': ('user', 'channel', 'kicker', 'message'),
'action': ('user', 'channel', 'data'),
'topicUpdated': ('user', 'channel', 'newTopic'),
'userRenamed': ('oldname', 'newname')}
def __init__(self, *a, **kw):
# It is important that IRCClient.__init__ is not called since
# traditionally it did not exist, so it is important that nothing is
# initialised there that would prevent subclasses that did not (or
# could not) invoke the base implementation. Any protocol
# initialisation should happen in connectionMode.
self.calls = []
def __getattribute__(self, name):
if name.startswith('__') and name.endswith('__'):
return super(NoticingClient, self).__getattribute__(name)
try:
args = super(NoticingClient, self).__getattribute__('methods')[name]
except KeyError:
return super(NoticingClient, self).__getattribute__(name)
else:
return self.makeMethod(name, args)
def makeMethod(self, fname, args):
def method(*a, **kw):
if len(a) > len(args):
raise TypeError("TypeError: %s() takes %d arguments "
"(%d given)" % (fname, len(args), len(a)))
for (name, value) in zip(args, a):
if name in kw:
raise TypeError("TypeError: %s() got multiple values "
"for keyword argument '%s'" % (fname, name))
else:
kw[name] = value
if len(kw) != len(args):
raise TypeError("TypeError: %s() takes %d arguments "
"(%d given)" % (fname, len(args), len(a)))
self.calls.append((fname, kw))
return method
def pop(dict, key, default):
try:
value = dict[key]
except KeyError:
return default
else:
del dict[key]
return value
class ClientImplementationTests(unittest.TestCase):
def setUp(self):
self.file = StringIOWithoutClosing()
self.transport = protocol.FileWrapper(self.file)
self.client = NoticingClient()
self.client.makeConnection(self.transport)
def tearDown(self):
self.transport.loseConnection()
self.client.connectionLost()
del self.client
del self.transport
def _serverTestImpl(self, code, msg, func, **kw):
host = pop(kw, 'host', 'server.host')
nick = pop(kw, 'nick', 'nickname')
args = pop(kw, 'args', '')
message = (":" +
host + " " +
code + " " +
nick + " " +
args + " :" +
msg + "\r\n")
self.client.dataReceived(message)
self.assertEquals(
self.client.calls,
[(func, kw)])
def testYourHost(self):
msg = "Your host is some.host[blah.blah/6667], running version server-version-3"
self._serverTestImpl("002", msg, "yourHost", info=msg)
def testCreated(self):
msg = "This server was cobbled together Fri Aug 13 18:00:25 UTC 2004"
self._serverTestImpl("003", msg, "created", when=msg)
def testMyInfo(self):
msg = "server.host server-version abcDEF bcdEHI"
self._serverTestImpl("004", msg, "myInfo",
servername="server.host",
version="server-version",
umodes="abcDEF",
cmodes="bcdEHI")
def testLuserClient(self):
msg = "There are 9227 victims and 9542 hiding on 24 servers"
self._serverTestImpl("251", msg, "luserClient",
info=msg)
def _sendISUPPORT(self):
args = ("MODES=4 CHANLIMIT=#:20 NICKLEN=16 USERLEN=10 HOSTLEN=63 "
"TOPICLEN=450 KICKLEN=450 CHANNELLEN=30 KEYLEN=23 CHANTYPES=# "
"PREFIX=(ov)@+ CASEMAPPING=ascii CAPAB IRCD=dancer")
msg = "are available on this server"
self._serverTestImpl("005", msg, "isupport", args=args,
options=['MODES=4',
'CHANLIMIT=#:20',
'NICKLEN=16',
'USERLEN=10',
'HOSTLEN=63',
'TOPICLEN=450',
'KICKLEN=450',
'CHANNELLEN=30',
'KEYLEN=23',
'CHANTYPES=#',
'PREFIX=(ov)@+',
'CASEMAPPING=ascii',
'CAPAB',
'IRCD=dancer'])
def test_ISUPPORT(self):
"""
The client parses ISUPPORT messages sent by the server and calls
L{IRCClient.isupport}.
"""
self._sendISUPPORT()
def testBounce(self):
msg = "Try server some.host, port 321"
self._serverTestImpl("010", msg, "bounce",
info=msg)
def testLuserChannels(self):
args = "7116"
msg = "channels formed"
self._serverTestImpl("254", msg, "luserChannels", args=args,
channels=int(args))
def testLuserOp(self):
args = "34"
msg = "flagged staff members"
self._serverTestImpl("252", msg, "luserOp", args=args,
ops=int(args))
def testLuserMe(self):
msg = "I have 1937 clients and 0 servers"
self._serverTestImpl("255", msg, "luserMe",
info=msg)
def test_receivedMOTD(self):
"""
Lines received in I{RPL_MOTDSTART} and I{RPL_MOTD} are delivered to
L{IRCClient.receivedMOTD} when I{RPL_ENDOFMOTD} is received.
"""
lines = [
":host.name 375 nickname :- host.name Message of the Day -",
":host.name 372 nickname :- Welcome to host.name",
":host.name 376 nickname :End of /MOTD command."]
for L in lines:
self.assertEquals(self.client.calls, [])
self.client.dataReceived(L + '\r\n')
self.assertEquals(
self.client.calls,
[("receivedMOTD", {"motd": ["host.name Message of the Day -", "Welcome to host.name"]})])
# After the motd is delivered, the tracking variable should be
# reset.
self.assertIdentical(self.client.motd, None)
def test_withoutMOTDSTART(self):
"""
If L{IRCClient} receives I{RPL_MOTD} and I{RPL_ENDOFMOTD} without
receiving I{RPL_MOTDSTART}, L{IRCClient.receivedMOTD} is still
called with a list of MOTD lines.
"""
lines = [
":host.name 372 nickname :- Welcome to host.name",
":host.name 376 nickname :End of /MOTD command."]
for L in lines:
self.client.dataReceived(L + '\r\n')
self.assertEquals(
self.client.calls,
[("receivedMOTD", {"motd": ["Welcome to host.name"]})])
def _clientTestImpl(self, sender, group, type, msg, func, **kw):
ident = pop(kw, 'ident', 'ident')
host = pop(kw, 'host', 'host')
wholeUser = sender + '!' + ident + '@' + host
message = (":" +
wholeUser + " " +
type + " " +
group + " :" +
msg + "\r\n")
self.client.dataReceived(message)
self.assertEquals(
self.client.calls,
[(func, kw)])
self.client.calls = []
def testPrivmsg(self):
msg = "Tooty toot toot."
self._clientTestImpl("sender", "#group", "PRIVMSG", msg, "privmsg",
ident="ident", host="host",
# Expected results below
user="sender!ident@host",
channel="#group",
message=msg)
self._clientTestImpl("sender", "recipient", "PRIVMSG", msg, "privmsg",
ident="ident", host="host",
# Expected results below
user="sender!ident@host",
channel="recipient",
message=msg)
def test_getChannelModeParams(self):
"""
L{IRCClient.getChannelModeParams} uses ISUPPORT information, either
given by the server or defaults, to determine which channel modes
require arguments when being added or removed.
"""
add, remove = map(sorted, self.client.getChannelModeParams())
self.assertEquals(add, ['b', 'h', 'k', 'l', 'o', 'v'])
self.assertEquals(remove, ['b', 'h', 'o', 'v'])
def removeFeature(name):
name = '-' + name
msg = "are available on this server"
self._serverTestImpl(
'005', msg, 'isupport', args=name, options=[name])
self.assertIdentical(
self.client.supported.getFeature(name), None)
self.client.calls = []
# Remove CHANMODES feature, causing getFeature('CHANMODES') to return
# None.
removeFeature('CHANMODES')
add, remove = map(sorted, self.client.getChannelModeParams())
self.assertEquals(add, ['h', 'o', 'v'])
self.assertEquals(remove, ['h', 'o', 'v'])
# Remove PREFIX feature, causing getFeature('PREFIX') to return None.
removeFeature('PREFIX')
add, remove = map(sorted, self.client.getChannelModeParams())
self.assertEquals(add, [])
self.assertEquals(remove, [])
# Restore ISUPPORT features.
self._sendISUPPORT()
self.assertNotIdentical(
self.client.supported.getFeature('PREFIX'), None)
def test_getUserModeParams(self):
"""
L{IRCClient.getUserModeParams} returns a list of user modes (modes that
the user sets on themself, outside of channel modes) that require
parameters when added and removed, respectively.
"""
add, remove = map(sorted, self.client.getUserModeParams())
self.assertEquals(add, [])
self.assertEquals(remove, [])
def _sendModeChange(self, msg, args='', target=None):
"""
Build a MODE string and send it to the client.
"""
if target is None:
target = '#chan'
message = ":[email protected] MODE %s %s %s\r\n" % (
target, msg, args)
self.client.dataReceived(message)
def _parseModeChange(self, results, target=None):
"""
Parse the results, do some test and return the data to check.
"""
if target is None:
target = '#chan'
for n, result in enumerate(results):
method, data = result
self.assertEquals(method, 'modeChanged')
self.assertEquals(data['user'], '[email protected]')
self.assertEquals(data['channel'], target)
results[n] = tuple([data[key] for key in ('set', 'modes', 'args')])
return results
def _checkModeChange(self, expected, target=None):
"""
Compare the expected result with the one returned by the client.
"""
result = self._parseModeChange(self.client.calls, target)
self.assertEquals(result, expected)
self.client.calls = []
def test_modeMissingDirection(self):
"""
Mode strings that do not begin with a directional character, C{'+'} or
C{'-'}, have C{'+'} automatically prepended.
"""
self._sendModeChange('s')
self._checkModeChange([(True, 's', (None,))])
def test_noModeParameters(self):
"""
No parameters are passed to L{IRCClient.modeChanged} for modes that
don't take any parameters.
"""
self._sendModeChange('-s')
self._checkModeChange([(False, 's', (None,))])
self._sendModeChange('+n')
self._checkModeChange([(True, 'n', (None,))])
def test_oneModeParameter(self):
"""
Parameters are passed to L{IRCClient.modeChanged} for modes that take
parameters.
"""
self._sendModeChange('+o', 'a_user')
self._checkModeChange([(True, 'o', ('a_user',))])
self._sendModeChange('-o', 'a_user')
self._checkModeChange([(False, 'o', ('a_user',))])
def test_mixedModes(self):
"""
Mixing adding and removing modes that do and don't take parameters
invokes L{IRCClient.modeChanged} with mode characters and parameters
that match up.
"""
self._sendModeChange('+osv', 'a_user another_user')
self._checkModeChange([(True, 'osv', ('a_user', None, 'another_user'))])
self._sendModeChange('+v-os', 'a_user another_user')
self._checkModeChange([(True, 'v', ('a_user',)),
(False, 'os', ('another_user', None))])
def test_tooManyModeParameters(self):
"""
Passing an argument to modes that take no parameters results in
L{IRCClient.modeChanged} not being called and an error being logged.
"""
self._sendModeChange('+s', 'wrong')
self._checkModeChange([])
errors = self.flushLoggedErrors(irc.IRCBadModes)
self.assertEquals(len(errors), 1)
self.assertSubstring(
'Too many parameters', errors[0].getErrorMessage())
def test_tooFewModeParameters(self):
"""
Passing no arguments to modes that do take parameters results in
L{IRCClient.modeChange} not being called and an error being logged.
"""
self._sendModeChange('+o')
self._checkModeChange([])
errors = self.flushLoggedErrors(irc.IRCBadModes)
self.assertEquals(len(errors), 1)
self.assertSubstring(
'Not enough parameters', errors[0].getErrorMessage())
def test_userMode(self):
"""
A C{MODE} message whose target is our user (the nickname of our user,
to be precise), as opposed to a channel, will be parsed according to
the modes specified by L{IRCClient.getUserModeParams}.
"""
target = self.client.nickname
# Mode "o" on channels is supposed to take a parameter, but since this
# is not a channel this will not cause an exception.
self._sendModeChange('+o', target=target)
self._checkModeChange([(True, 'o', (None,))], target=target)
def getUserModeParams():
return ['Z', '']
# Introduce our own user mode that takes an argument.
self.patch(self.client, 'getUserModeParams', getUserModeParams)
self._sendModeChange('+Z', 'an_arg', target=target)
self._checkModeChange([(True, 'Z', ('an_arg',))], target=target)
class BasicServerFunctionalityTestCase(unittest.TestCase):
def setUp(self):
self.f = StringIOWithoutClosing()
self.t = protocol.FileWrapper(self.f)
self.p = irc.IRC()
self.p.makeConnection(self.t)
def check(self, s):
self.assertEquals(self.f.getvalue(), s)
def testPrivmsg(self):
self.p.privmsg("this-is-sender", "this-is-recip", "this is message")
self.check(":this-is-sender PRIVMSG this-is-recip :this is message\r\n")
def testNotice(self):
self.p.notice("this-is-sender", "this-is-recip", "this is notice")
self.check(":this-is-sender NOTICE this-is-recip :this is notice\r\n")
def testAction(self):
self.p.action("this-is-sender", "this-is-recip", "this is action")
self.check(":this-is-sender ACTION this-is-recip :this is action\r\n")
def testJoin(self):
self.p.join("this-person", "#this-channel")
self.check(":this-person JOIN #this-channel\r\n")
def testPart(self):
self.p.part("this-person", "#that-channel")
self.check(":this-person PART #that-channel\r\n")
def testWhois(self):
"""
Verify that a whois by the client receives the right protocol actions
from the server.
"""
timestamp = int(time.time()-100)
hostname = self.p.hostname
req = 'requesting-nick'
targ = 'target-nick'
self.p.whois(req, targ, 'target', 'host.com',
'Target User', 'irc.host.com', 'A fake server', False,
12, timestamp, ['#fakeusers', '#fakemisc'])
expected = '\r\n'.join([
':%(hostname)s 311 %(req)s %(targ)s target host.com * :Target User',
':%(hostname)s 312 %(req)s %(targ)s irc.host.com :A fake server',
':%(hostname)s 317 %(req)s %(targ)s 12 %(timestamp)s :seconds idle, signon time',
':%(hostname)s 319 %(req)s %(targ)s :#fakeusers #fakemisc',
':%(hostname)s 318 %(req)s %(targ)s :End of WHOIS list.',
'']) % dict(hostname=hostname, timestamp=timestamp, req=req, targ=targ)
self.check(expected)
class DummyClient(irc.IRCClient):
def __init__(self):
self.lines = []
def sendLine(self, m):
self.lines.append(m)
class ClientMsgTests(unittest.TestCase):
def setUp(self):
self.client = DummyClient()
def testSingleLine(self):
self.client.msg('foo', 'bar')
self.assertEquals(self.client.lines, ['PRIVMSG foo :bar'])
def testDodgyMaxLength(self):
self.assertRaises(ValueError, self.client.msg, 'foo', 'bar', 0)
self.assertRaises(ValueError, self.client.msg, 'foo', 'bar', 3)
def testMultipleLine(self):
maxLen = len('PRIVMSG foo :') + 3 + 2 # 2 for line endings
self.client.msg('foo', 'barbazbo', maxLen)
self.assertEquals(self.client.lines, ['PRIVMSG foo :bar',
'PRIVMSG foo :baz',
'PRIVMSG foo :bo'])
def testSufficientWidth(self):
msg = 'barbazbo'
maxLen = len('PRIVMSG foo :%s' % (msg,)) + 2
self.client.msg('foo', msg, maxLen)
self.assertEquals(self.client.lines, ['PRIVMSG foo :%s' % (msg,)])
self.client.lines = []
self.client.msg('foo', msg, maxLen-1)
self.assertEquals(2, len(self.client.lines))
self.client.lines = []
self.client.msg('foo', msg, maxLen+1)
self.assertEquals(1, len(self.client.lines))
def test_newlinesAtStart(self):
"""
An LF at the beginning of the message is ignored.
"""
self.client.lines = []
self.client.msg('foo', '\nbar')
self.assertEquals(self.client.lines, ['PRIVMSG foo :bar'])
def test_newlinesAtEnd(self):
"""
An LF at the end of the message is ignored.
"""
self.client.lines = []
self.client.msg('foo', 'bar\n')
self.assertEquals(self.client.lines, ['PRIVMSG foo :bar'])
def test_newlinesWithinMessage(self):
"""
An LF within a message causes a new line.
"""
self.client.lines = []
self.client.msg('foo', 'bar\n\nbaz')
self.assertEquals(self.client.lines, [
'PRIVMSG foo :bar',
'PRIVMSG foo :baz'
])
def test_consecutiveNewlines(self):
"""
Consecutive LFs do not cause a blank line.
"""
self.client.lines = []
self.client.msg('foo', 'bar\n\nbaz')
self.assertEquals(self.client.lines, [
'PRIVMSG foo :bar',
'PRIVMSG foo :baz',
])
def test_longLinesCauseNewLines(self):
"""
Lines that would break the 512-byte barrier cause two lines to be sent.
"""
# The maximum length of a line is 512 bytes, including the line prefix
# and the trailing CRLF.
maxLineLength = irc.MAX_COMMAND_LENGTH - 2 - len('PRIVMSG foo :')
self.client.msg('foo', 'o' * (maxLineLength+1))
self.assertEquals(self.client.lines, [
'PRIVMSG foo :' + maxLineLength * 'o',
'PRIVMSG foo :o',
])
def test_newlinesBeforeLineBreaking(self):
"""
IRCClient breaks on newlines before it breaks long lines.
"""
# Because MAX_COMMAND_LENGTH includes framing characters, this long
# line is slightly longer than half the permissible message size.
longline = 'o' * (irc.MAX_COMMAND_LENGTH // 2)
self.client.msg('foo', longline + '\n' + longline)
self.assertEquals(self.client.lines, [
'PRIVMSG foo :' + longline,
'PRIVMSG foo :' + longline,
])
def test_lineBreakOnWordBoundaries(self):
"""
IRCClient prefers to break long lines at word boundaries.
"""
# Because MAX_COMMAND_LENGTH includes framing characters, this long
# line is slightly longer than half the permissible message size.
longline = 'o' * (irc.MAX_COMMAND_LENGTH // 2)
self.client.msg('foo', longline + ' ' + longline)
self.assertEquals(self.client.lines, [
'PRIVMSG foo :' + longline,
'PRIVMSG foo :' + longline,
])
def testSplitSanity(self):
# Whiteboxing
self.assertRaises(ValueError, irc.split, 'foo', -1)
self.assertRaises(ValueError, irc.split, 'foo', 0)
self.assertEquals([], irc.split('', 1))
self.assertEquals([], irc.split(''))
def test_splitDelimiters(self):
"""
Test that split() skips any delimiter (space or newline) that it finds
at the very beginning of the string segment it is operating on.
Nothing should be added to the output list because of it.
"""
r = irc.split("xx yyz", 2)
self.assertEquals(['xx', 'yy', 'z'], r)
r = irc.split("xx\nyyz", 2)
self.assertEquals(['xx', 'yy', 'z'], r)
def test_splitValidatesLength(self):
"""
split() raises ValueError if given a length <= 0
"""
self.assertRaises(ValueError, irc.split, "foo", 0)
self.assertRaises(ValueError, irc.split, "foo", -1)
class ClientTests(TestCase):
"""
Tests for the protocol-level behavior of IRCClient methods intended to
be called by application code.
"""
def setUp(self):
"""
Create and connect a new L{IRCClient} to a new L{StringTransport}.
"""
self.transport = StringTransport()
self.protocol = IRCClient()
self.protocol.performLogin = False
self.protocol.makeConnection(self.transport)
# Sanity check - we don't want anything to have happened at this
# point, since we're not in a test yet.
self.assertEquals(self.transport.value(), "")
def getLastLine(self, transport):
"""
Return the last IRC message in the transport buffer.
"""
return transport.value().split('\r\n')[-2]
def test_away(self):
"""
L{IRCCLient.away} sends an AWAY command with the specified message.
"""
message = "Sorry, I'm not here."
self.protocol.away(message)
expected = [
'AWAY :%s' % (message,),
'',
]
self.assertEquals(self.transport.value().split('\r\n'), expected)
def test_back(self):
"""
L{IRCClient.back} sends an AWAY command with an empty message.
"""
self.protocol.back()
expected = [
'AWAY :',
'',
]
self.assertEquals(self.transport.value().split('\r\n'), expected)
def test_whois(self):
"""
L{IRCClient.whois} sends a WHOIS message.
"""
self.protocol.whois('alice')
self.assertEquals(
self.transport.value().split('\r\n'),
['WHOIS alice', ''])
def test_whoisWithServer(self):
"""
L{IRCClient.whois} sends a WHOIS message with a server name if a
value is passed for the C{server} parameter.
"""
self.protocol.whois('alice', 'example.org')
self.assertEquals(
self.transport.value().split('\r\n'),
['WHOIS example.org alice', ''])
def test_register(self):
"""
L{IRCClient.register} sends NICK and USER commands with the
username, name, hostname, server name, and real name specified.
"""
username = 'testuser'
hostname = 'testhost'
servername = 'testserver'
self.protocol.realname = 'testname'
self.protocol.password = None
self.protocol.register(username, hostname, servername)
expected = [
'NICK %s' % (username,),
'USER %s %s %s :%s' % (
username, hostname, servername, self.protocol.realname),
'']
self.assertEquals(self.transport.value().split('\r\n'), expected)
def test_registerWithPassword(self):
"""
If the C{password} attribute of L{IRCClient} is not C{None}, the
C{register} method also sends a PASS command with it as the
argument.
"""
username = 'testuser'
hostname = 'testhost'
servername = 'testserver'
self.protocol.realname = 'testname'
self.protocol.password = 'testpass'
self.protocol.register(username, hostname, servername)
expected = [
'PASS %s' % (self.protocol.password,),
'NICK %s' % (username,),
'USER %s %s %s :%s' % (
username, hostname, servername, self.protocol.realname),
'']
self.assertEquals(self.transport.value().split('\r\n'), expected)
def test_registerWithTakenNick(self):
"""
Verify that the client repeats the L{IRCClient.setNick} method with a
new value when presented with an C{ERR_NICKNAMEINUSE} while trying to
register.
"""
username = 'testuser'
hostname = 'testhost'
servername = 'testserver'
self.protocol.realname = 'testname'
self.protocol.password = 'testpass'
self.protocol.register(username, hostname, servername)
self.protocol.irc_ERR_NICKNAMEINUSE('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertNotEquals(lastLine, 'NICK %s' % (username,))
# Keep chaining underscores for each collision
self.protocol.irc_ERR_NICKNAMEINUSE('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEquals(lastLine, 'NICK %s' % (username + '__',))
def test_overrideAlterCollidedNick(self):
"""
L{IRCClient.alterCollidedNick} determines how a nickname is altered upon
collision while a user is trying to change to that nickname.
"""
nick = 'foo'
self.protocol.alterCollidedNick = lambda nick: nick + '***'
self.protocol.register(nick)
self.protocol.irc_ERR_NICKNAMEINUSE('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEquals(
lastLine, 'NICK %s' % (nick + '***',))
def test_nickChange(self):
"""
When a NICK command is sent after signon, C{IRCClient.nickname} is set
to the new nickname I{after} the server sends an acknowledgement.
"""
oldnick = 'foo'
newnick = 'bar'
self.protocol.register(oldnick)
self.protocol.irc_RPL_WELCOME('prefix', ['param'])
self.protocol.setNick(newnick)
self.assertEquals(self.protocol.nickname, oldnick)
self.protocol.irc_NICK('%s!quux@qux' % (oldnick,), [newnick])
self.assertEquals(self.protocol.nickname, newnick)
def test_erroneousNick(self):
"""
Trying to register an illegal nickname results in the default legal
nickname being set, and trying to change a nickname to an illegal
nickname results in the old nickname being kept.
"""
# Registration case: change illegal nickname to erroneousNickFallback
badnick = 'foo'
self.assertEquals(self.protocol._registered, False)
self.protocol.register(badnick)
self.protocol.irc_ERR_ERRONEUSNICKNAME('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEquals(
lastLine, 'NICK %s' % (self.protocol.erroneousNickFallback,))
self.protocol.irc_RPL_WELCOME('prefix', ['param'])
self.assertEquals(self.protocol._registered, True)
self.protocol.setNick(self.protocol.erroneousNickFallback)
self.assertEquals(
self.protocol.nickname, self.protocol.erroneousNickFallback)
# Illegal nick change attempt after registration. Fall back to the old
# nickname instead of erroneousNickFallback.
oldnick = self.protocol.nickname
self.protocol.setNick(badnick)
self.protocol.irc_ERR_ERRONEUSNICKNAME('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEquals(
lastLine, 'NICK %s' % (badnick,))
self.assertEquals(self.protocol.nickname, oldnick)
def test_describe(self):
"""
L{IRCClient.desrcibe} sends a CTCP ACTION message to the target
specified.
"""
target = 'foo'
channel = '#bar'
action = 'waves'
self.protocol.describe(target, action)
self.protocol.describe(channel, action)
expected = [
'PRIVMSG %s :\01ACTION %s\01' % (target, action),
'PRIVMSG %s :\01ACTION %s\01' % (channel, action),
'']
self.assertEquals(self.transport.value().split('\r\n'), expected)
def test_me(self):
"""
L{IRCClient.me} sends a CTCP ACTION message to the target channel
specified.
If the target does not begin with a standard channel prefix,
'#' is prepended.
"""
target = 'foo'
channel = '#bar'
action = 'waves'
self.protocol.me(target, action)
self.protocol.me(channel, action)
expected = [
'PRIVMSG %s :\01ACTION %s\01' % ('#' + target, action),
'PRIVMSG %s :\01ACTION %s\01' % (channel, action),
'']
self.assertEquals(self.transport.value().split('\r\n'), expected)
warnings = self.flushWarnings(
offendingFunctions=[self.test_me])
self.assertEquals(
warnings[0]['message'],
"me() is deprecated since Twisted 9.0. Use IRCClient.describe().")
self.assertEquals(warnings[0]['category'], DeprecationWarning)
self.assertEquals(len(warnings), 2)
def test_noticedDoesntPrivmsg(self):
"""
The default implementation of L{IRCClient.noticed} doesn't invoke
C{privmsg()}
"""
def privmsg(user, channel, message):
self.fail("privmsg() should not have been called")
self.protocol.privmsg = privmsg
self.protocol.irc_NOTICE('spam', "I don't want any spam!")
| apache-2.0 | 6,612,655,994,867,814,000 | 34.010161 | 101 | 0.552363 | false |
ofayans/freeipa | ipaclient/remote_plugins/2_49/automount.py | 8 | 34860 | #
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
# pylint: disable=unused-import
import six
from . import Command, Method, Object
from ipalib import api, parameters, output
from ipalib.parameters import DefaultFrom
from ipalib.plugable import Registry
from ipalib.text import _
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
if six.PY3:
unicode = str
__doc__ = _("""
Automount
Stores automount(8) configuration for autofs(8) in IPA.
The base of an automount configuration is the configuration file auto.master.
This is also the base location in IPA. Multiple auto.master configurations
can be stored in separate locations. A location is implementation-specific
with the default being a location named 'default'. For example, you can have
locations by geographic region, by floor, by type, etc.
Automount has three basic object types: locations, maps and keys.
A location defines a set of maps anchored in auto.master. This allows you
to store multiple automount configurations. A location in itself isn't
very interesting, it is just a point to start a new automount map.
A map is roughly equivalent to a discrete automount file and provides
storage for keys.
A key is a mount point associated with a map.
When a new location is created, two maps are automatically created for
it: auto.master and auto.direct. auto.master is the root map for all
automount maps for the location. auto.direct is the default map for
direct mounts and is mounted on /-.
An automount map may contain a submount key. This key defines a mount
location within the map that references another map. This can be done
either using automountmap-add-indirect --parentmap or manually
with automountkey-add and setting info to "-type=autofs :<mapname>".
EXAMPLES:
Locations:
Create a named location, "Baltimore":
ipa automountlocation-add baltimore
Display the new location:
ipa automountlocation-show baltimore
Find available locations:
ipa automountlocation-find
Remove a named automount location:
ipa automountlocation-del baltimore
Show what the automount maps would look like if they were in the filesystem:
ipa automountlocation-tofiles baltimore
Import an existing configuration into a location:
ipa automountlocation-import baltimore /etc/auto.master
The import will fail if any duplicate entries are found. For
continuous operation where errors are ignored, use the --continue
option.
Maps:
Create a new map, "auto.share":
ipa automountmap-add baltimore auto.share
Display the new map:
ipa automountmap-show baltimore auto.share
Find maps in the location baltimore:
ipa automountmap-find baltimore
Create an indirect map with auto.share as a submount:
ipa automountmap-add-indirect baltimore --parentmap=auto.share --mount=sub auto.man
This is equivalent to:
ipa automountmap-add-indirect baltimore --mount=/man auto.man
ipa automountkey-add baltimore auto.man --key=sub --info="-fstype=autofs ldap:auto.share"
Remove the auto.share map:
ipa automountmap-del baltimore auto.share
Keys:
Create a new key for the auto.share map in location baltimore. This ties
the map we previously created to auto.master:
ipa automountkey-add baltimore auto.master --key=/share --info=auto.share
Create a new key for our auto.share map, an NFS mount for man pages:
ipa automountkey-add baltimore auto.share --key=man --info="-ro,soft,rsize=8192,wsize=8192 ipa.example.com:/shared/man"
Find all keys for the auto.share map:
ipa automountkey-find baltimore auto.share
Find all direct automount keys:
ipa automountkey-find baltimore --key=/-
Remove the man key from the auto.share map:
ipa automountkey-del baltimore auto.share --key=man
""")
register = Registry()
@register()
class automountkey(Object):
takes_params = (
parameters.Str(
'automountkey',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
label=_(u'Mount information'),
),
parameters.Str(
'description',
required=False,
primary_key=True,
label=_(u'description'),
exclude=('webui', 'cli'),
),
)
@register()
class automountlocation(Object):
takes_params = (
parameters.Str(
'cn',
primary_key=True,
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
)
@register()
class automountmap(Object):
takes_params = (
parameters.Str(
'automountmapname',
primary_key=True,
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
parameters.Str(
'description',
required=False,
label=_(u'Description'),
),
)
@register()
class automountkey_add(Method):
__doc__ = _("Create a new automount key.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapautomountmapname',
cli_name='automountmap',
label=_(u'Map'),
),
)
takes_options = (
parameters.Str(
'automountkey',
cli_name='key',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
cli_name='info',
label=_(u'Mount information'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountkey_del(Method):
__doc__ = _("Delete an automount key.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapautomountmapname',
cli_name='automountmap',
label=_(u'Map'),
),
)
takes_options = (
parameters.Flag(
'continue',
doc=_(u"Continuous mode: Don't stop on errors."),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Str(
'automountkey',
cli_name='key',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
required=False,
cli_name='info',
label=_(u'Mount information'),
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
dict,
doc=_(u'List of deletions that failed'),
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountkey_find(Method):
__doc__ = _("Search for an automount key.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapautomountmapname',
cli_name='automountmap',
label=_(u'Map'),
),
parameters.Str(
'criteria',
required=False,
doc=_(u'A string searched in all relevant object attributes'),
),
)
takes_options = (
parameters.Str(
'automountkey',
required=False,
cli_name='key',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
required=False,
cli_name='info',
label=_(u'Mount information'),
),
parameters.Int(
'timelimit',
required=False,
label=_(u'Time Limit'),
doc=_(u'Time limit of search in seconds'),
),
parameters.Int(
'sizelimit',
required=False,
label=_(u'Size Limit'),
doc=_(u'Maximum number of entries returned'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.ListOfEntries(
'result',
),
output.Output(
'count',
int,
doc=_(u'Number of entries returned'),
),
output.Output(
'truncated',
bool,
doc=_(u'True if not all results were returned'),
),
)
@register()
class automountkey_mod(Method):
__doc__ = _("Modify an automount key.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapautomountmapname',
cli_name='automountmap',
label=_(u'Map'),
),
)
takes_options = (
parameters.Str(
'automountkey',
cli_name='key',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
required=False,
cli_name='info',
label=_(u'Mount information'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Str(
'delattr',
required=False,
multivalue=True,
doc=_(u'Delete an attribute/value pair. The option will be evaluated\nlast, after all sets and adds.'),
exclude=('webui',),
),
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Str(
'newautomountinformation',
required=False,
cli_name='newinfo',
label=_(u'New mount information'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Str(
'rename',
required=False,
label=_(u'Rename'),
doc=_(u'Rename the automount key object'),
exclude=('webui',),
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountkey_show(Method):
__doc__ = _("Display an automount key.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapautomountmapname',
cli_name='automountmap',
label=_(u'Map'),
),
)
takes_options = (
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Str(
'automountkey',
cli_name='key',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
required=False,
cli_name='info',
label=_(u'Mount information'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountlocation_add(Method):
__doc__ = _("Create a new automount location.")
takes_args = (
parameters.Str(
'cn',
cli_name='location',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
)
takes_options = (
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountlocation_del(Method):
__doc__ = _("Delete an automount location.")
takes_args = (
parameters.Str(
'cn',
multivalue=True,
cli_name='location',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
)
takes_options = (
parameters.Flag(
'continue',
doc=_(u"Continuous mode: Don't stop on errors."),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
dict,
doc=_(u'List of deletions that failed'),
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountlocation_find(Method):
__doc__ = _("Search for an automount location.")
takes_args = (
parameters.Str(
'criteria',
required=False,
doc=_(u'A string searched in all relevant object attributes'),
),
)
takes_options = (
parameters.Str(
'cn',
required=False,
cli_name='location',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
parameters.Int(
'timelimit',
required=False,
label=_(u'Time Limit'),
doc=_(u'Time limit of search in seconds'),
),
parameters.Int(
'sizelimit',
required=False,
label=_(u'Size Limit'),
doc=_(u'Maximum number of entries returned'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'pkey_only',
required=False,
label=_(u'Primary key only'),
doc=_(u'Results should contain primary key attribute only ("location")'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.ListOfEntries(
'result',
),
output.Output(
'count',
int,
doc=_(u'Number of entries returned'),
),
output.Output(
'truncated',
bool,
doc=_(u'True if not all results were returned'),
),
)
@register()
class automountlocation_show(Method):
__doc__ = _("Display an automount location.")
takes_args = (
parameters.Str(
'cn',
cli_name='location',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
)
takes_options = (
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountlocation_tofiles(Method):
__doc__ = _("Generate automount files for a specific location.")
takes_args = (
parameters.Str(
'cn',
cli_name='location',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
)
has_output = (
output.Output(
'result',
),
)
@register()
class automountmap_add(Method):
__doc__ = _("Create a new automount map.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapname',
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountmap_add_indirect(Method):
__doc__ = _("Create a new indirect mount point.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapname',
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Str(
'key',
cli_name='mount',
label=_(u'Mount point'),
),
parameters.Str(
'parentmap',
required=False,
label=_(u'Parent map'),
doc=_(u'Name of parent automount map (default: auto.master).'),
default=u'auto.master',
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountmap_del(Method):
__doc__ = _("Delete an automount map.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapname',
multivalue=True,
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Flag(
'continue',
doc=_(u"Continuous mode: Don't stop on errors."),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
dict,
doc=_(u'List of deletions that failed'),
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountmap_find(Method):
__doc__ = _("Search for an automount map.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'criteria',
required=False,
doc=_(u'A string searched in all relevant object attributes'),
),
)
takes_options = (
parameters.Str(
'automountmapname',
required=False,
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
),
parameters.Int(
'timelimit',
required=False,
label=_(u'Time Limit'),
doc=_(u'Time limit of search in seconds'),
),
parameters.Int(
'sizelimit',
required=False,
label=_(u'Size Limit'),
doc=_(u'Maximum number of entries returned'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'pkey_only',
required=False,
label=_(u'Primary key only'),
doc=_(u'Results should contain primary key attribute only ("map")'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.ListOfEntries(
'result',
),
output.Output(
'count',
int,
doc=_(u'Number of entries returned'),
),
output.Output(
'truncated',
bool,
doc=_(u'True if not all results were returned'),
),
)
@register()
class automountmap_mod(Method):
__doc__ = _("Modify an automount map.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapname',
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Str(
'delattr',
required=False,
multivalue=True,
doc=_(u'Delete an attribute/value pair. The option will be evaluated\nlast, after all sets and adds.'),
exclude=('webui',),
),
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountmap_show(Method):
__doc__ = _("Display an automount map.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapname',
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
| gpl-3.0 | -3,637,372,465,941,941,000 | 27.457143 | 162 | 0.508176 | false |
gustavo-guimaraes/siga | backend/venv/lib/python2.7/site-packages/unidecode/x026.py | 165 | 4020 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'[?]', # 0x14
'[?]', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'[?]', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'#', # 0x6f
'', # 0x70
'', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'[?]', # 0xa0
'[?]', # 0xa1
'[?]', # 0xa2
'[?]', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| mit | 3,818,406,042,418,843,600 | 14.642023 | 16 | 0.254726 | false |
RecursiveGreen/pymod | formats/MOD.py | 1 | 9729 | import struct
from pymod.constants import *
from pymod.module import *
from pymod.tables import *
from pymod.util import *
MOD_TYPES = (
('M.K.', 'Amiga-NewTracker', 4),
('M!K!', 'Amiga-ProTracker', 4),
('M&K!', 'Amiga-NoiseTracker', 4),
('N.T.', 'Amiga-NoiseTracker?', 4), # ???, mentioned in libModplug
('CD81', '8 Channel Falcon', 8),
('OCTA', 'Amiga Oktalyzer', 8), # SchismTracker/libModplug have
('OKTA', 'Amiga Oktalyzer', 8), # 'C' or 'K', but not both
('FLT4', '4 Channel Startrekker', 4),
('FLT8', '8 Channel Startrekker', 8),
('2CHN', '2 Channel MOD', 2),
('3CHN', '3 Channel MOD', 3), # Does this show up ever?
('4CHN', '4 Channel MOD', 4),
('5CHN', '5 Channel TakeTracker', 5),
('6CHN', '6 Channel MOD', 6),
('7CHN', '7 Channel TakeTracker', 7),
('8CHN', '8 Channel MOD', 8),
('9CHN', '9 Channel TakeTracker', 9),
('10CH', '10 Channel MOD', 10),
('11CH', '11 Channel TakeTracker', 11),
('12CH', '12 Channel MOD', 12),
('13CH', '13 Channel TakeTracker', 13),
('14CH', '14 Channel MOD', 14),
('15CH', '15 Channel TakeTracker', 15),
('16CH', '16 Channel MOD', 16),
('18CH', '18 Channel MOD', 18),
('20CH', '20 Channel MOD', 20),
('22CH', '22 Channel MOD', 22),
('24CH', '24 Channel MOD', 24),
('26CH', '26 Channel MOD', 26),
('28CH', '28 Channel MOD', 28),
('30CH', '30 Channel MOD', 30),
('32CH', '32 Channel MOD', 32),
('16CN', '16 Channel MOD', 16), # Not certain where these two
('32CN', '32 Channel MOD', 32), # come from. (libModplug)
('TDZ1', '1 Channel TakeTracker', 1),
('TDZ2', '2 Channel TakeTracker', 2),
('TDZ3', '3 Channel TakeTracker', 3),
('TDZ4', '4 Channel MOD', 4),
('TDZ5', '5 Channel MOD', 5),
('TDZ6', '6 Channel MOD', 6),
('TDZ7', '7 Channel MOD', 7),
('TDZ8', '8 Channel MOD', 8),
('TDZ9', '9 Channel MOD', 9)
)
class MODNote(Note):
"""The definition of a generic MOD note and it's effects"""
def __init__(self, pattdata=[]):
if pattdata:
note = self.mod_period_to_note(((pattdata[0] & 0xf) << 8) + pattdata[1])
instrument = (pattdata[0] & 0xf0) + (pattdata[2] >> 4)
voleffect = VOLFX_NONE
volparam = 0
effect = pattdata[2] & 0xf
param = pattdata[3]
super(MODNote, self).__init__(note, instrument, voleffect, volparam, effect, param)
else:
super(MODNote, self).__init__(0, 0, 0, 0, 0, 0)
def mod_period_to_note(self, period):
if period:
for num in range(NOTE_LAST + 1):
if period >= (32 * period_table[num % 12] >> (num / 12 + 2)):
return num + 1
return NOTE_NONE
def __unicode__(self):
keys = ['C-', 'C#', 'D-', 'D#', 'E-', 'F-', 'F#', 'G-', 'G#', 'A-', 'A#', 'B-']
commands = '123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if self.note == 0: ret1 = '...'
elif self.note > 0 and self.note <=120:
split = divmod(self.note-1, 12)
ret1 = '%s%s' % (keys[split[1]], str(split[0]))
elif self.note == 254: ret1 = '^^^'
elif self.note == 255: ret1 = '==='
if self.instrument: ret2 = str(self.instrument).zfill(2)
else: ret2 = '..'
# No volume columns for MOD files
ret3 = '..'
if self.effect: letter = commands[self.effect-1]
else: letter = '.'
ret4 = '%s%s' % (letter, hex(self.param)[2:].zfill(2).upper())
return '%s %s %s %s' % (ret1, ret2, ret3, ret4)
def __repr__(self):
return self.__unicode__()
class MODPattern(Pattern):
"""The definition of the MOD pattern"""
def __init__(self, file=None, rows=64, channels=4):
super(MODPattern, self).__init__(rows, channels)
if file:
self.load(file)
else:
self.data = self.empty(self.rows, self.channels)
def empty(self, rows, channels):
pattern = []
for row in range(rows):
pattern.append([])
for channel in range(channels):
pattern[row].append(MODNote())
return pattern
def load(self, file):
self.data = self.empty(self.rows, self.channels)
for row in range(self.rows):
for channel in range(self.channels):
self.data[row][channel] = MODNote(list(struct.unpack(">4B", file.read(4))))
class MODSample(Sample):
"""Definition of an MOD sample"""
def __init__(self, file=None):
super(MODSample, self).__init__()
self.modsamploadflags = SF_8 | SF_LE | SF_M | SF_PCMS
if file: self.load(file, 0)
def load(self, file, loadtype=0):
if loadtype == 0:
# Loads the MOD sample headers
modsampname = struct.unpack(">22s", file.read(22))[0]
modsamplength = struct.unpack(">H", file.read(2))[0]
modsampfinetune = struct.unpack(">b", file.read(1))[0]
modsampvolume = struct.unpack(">B", file.read(1))[0]
modsamploopbegin = struct.unpack(">H", file.read(2))[0]
modsamplooplength = struct.unpack(">H", file.read(2))[0]
# Parse it into generic Sample
self.name = modsampname
self.filename = modsampname
self.volume = MIN(modsampvolume, 64) * 4
self.length = modsamplength * 2
self.c5speed = MOD_FINETUNE(modsampfinetune)
self.loopbegin = modsamploopbegin
if modsamplooplength > 2: self.flags = self.flags | CHN_LOOP
self.loopend = self.loopbegin + modsamplooplength
elif loadtype == 1:
# . . .otherwise, load sample data
super(MODSample, self).load(file, file.tell(), self.modsamploadflags)
class MOD(Module):
"""A class that holds a generic MOD file"""
def __init__(self, filename=None):
super(MOD, self).__init__()
if not filename:
self.id = '4CHN' # /b/, for teh lulz. . .(bad joke)
self.tracker = '4 Channel MOD'
self.restartpos = 0
self.channelnum = 4
self.samplenum = 31
else:
f = open(filename, 'rb') # NOTE: MOD files should be big-endian!
self.filename = filename
f.seek(1080) # Magic number is in middle of file.
magic = struct.unpack(">4s", f.read(4))[0]
self.id = ''
for TYPE in MOD_TYPES:
if magic == TYPE[0]:
self.id = magic
self.tracker = TYPE[1]
self.channelnum = TYPE[2]
self.samplenum = 31
break
if self.id == '':
self.id = '????'
self.tracker = '*OLD* 4 Channel MOD'
self.channelnum = 4
self.samplenum = 15
f.seek(0)
self.name = struct.unpack(">20s", f.read(20))[0] # Song title (padded with NULL)
self.samples = []
for num in range(self.samplenum):
self.samples.append(MODSample(f)) # Loading sample headers
self.ordernum = struct.unpack(">B", f.read(1))[0] # Number of orders in song
self.restartpos = struct.unpack(">B", f.read(1))[0] # Restart position
self.orders = list(struct.unpack(">128B", f.read(128)))
# Fixes for buggy Startrekker MOD's. . .
fixed = 0
if self.id == 'FLT8':
for order in self.orders:
if order & 1:
fixed = 1
self.id = 'FLT4'
self.tracker = '4 Channel Startrekker (buggy)'
self.channelnum = 4
if not fixed:
for num in range(128):
self.orders[num] = self.orders[num] >> 1
self.patternnum = max(self.orders) + 1
self.tempo = 125
self.speed = 6
curpos = f.tell()
# Testing for WOW files. . .
if self.id == 'M.K.':
f.seek(0, 2)
sampsize = 0
for num in range(self.samplenum):
sampsize = sampsize + self.samples[num].length
if f.tell() == 2048 * self.patternnum + sampsize + 3132:
self.channelnum = 8
self.tracker = 'Mods Grave WOW'
f.seek(curpos)
if self.id != '????':
f.seek(4, 1) # Skip the magic id. . .
self.patterns = []
if self.patternnum:
for num in range(self.patternnum):
self.patterns.append(MODPattern(f, channels=self.channelnum))
for num in range(self.samplenum):
self.samples[num].load(f, 1) # Loading sample data
f.close()
def detect(filename):
f = open(filename, 'rb')
f.seek(1080)
magic = struct.unpack(">4s", f.read(4))[0]
f.close()
for TYPE in MOD_TYPES:
if magic == TYPE[0]:
return 2
if filename.lower().endswith('.mod') or filename.lower().startswith('mod.'):
return 1
else:
return 0
detect = staticmethod(detect)
| gpl-3.0 | 8,350,322,968,958,532,000 | 36.133588 | 98 | 0.490698 | false |
heiko-r/paparazzi | sw/tools/airframe_editor/gui_dialogs.py | 29 | 1632 | #!/usr/bin/env python
from __future__ import print_function
import gtk
from os import path
if gtk.pygtk_version < (2, 3, 90):
print("Please upgrade your pygtk")
raise SystemExit
def filechooser(pathname):
dialog = gtk.FileChooserDialog("Open ...", None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
dialog.set_current_folder(pathname)
filter = gtk.FileFilter()
filter.set_name("Airframe File")
filter.add_pattern("*.xml")
dialog.add_filter(filter)
response = dialog.run()
filename = ""
if response == gtk.RESPONSE_OK:
filename = dialog.get_filename()
elif response == gtk.RESPONSE_CANCEL:
print("No file selected")
dialog.destroy()
return filename
def error_loading_xml(s):
err_msg = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE,
"Error Loading XML: " + s)
err_msg.run()
err_msg.destroy()
def about(home):
about_d = gtk.AboutDialog()
about_d.set_program_name("Paparazzi Airframe Editor")
about_d.set_version("0.1")
about_d.set_copyright("(c) GPL v2")
about_d.set_comments("Airframe Editor")
about_d.set_website("http://paparazzi.github.io")
about_d.set_logo(gtk.gdk.pixbuf_new_from_file(path.join(home, "data/pictures/penguin_icon.png")))
about_d.run()
about_d.destroy()
| gpl-2.0 | -247,942,472,923,176,500 | 27.631579 | 101 | 0.604779 | false |
kuri65536/python-for-android | python-modules/twisted/twisted/python/hook.py | 90 | 5266 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I define support for hookable instance methods.
These are methods which you can register pre-call and post-call external
functions to augment their functionality. People familiar with more esoteric
languages may think of these as \"method combinations\".
This could be used to add optional preconditions, user-extensible callbacks
(a-la emacs) or a thread-safety mechanism.
The four exported calls are:
- L{addPre}
- L{addPost}
- L{removePre}
- L{removePost}
All have the signature (class, methodName, callable), and the callable they
take must always have the signature (instance, *args, **kw) unless the
particular signature of the method they hook is known.
Hooks should typically not throw exceptions, however, no effort will be made by
this module to prevent them from doing so. Pre-hooks will always be called,
but post-hooks will only be called if the pre-hooks do not raise any exceptions
(they will still be called if the main method raises an exception). The return
values and exception status of the main method will be propogated (assuming
none of the hooks raise an exception). Hooks will be executed in the order in
which they are added.
"""
# System Imports
import string
### Public Interface
class HookError(Exception):
"An error which will fire when an invariant is violated."
def addPre(klass, name, func):
"""hook.addPre(klass, name, func) -> None
Add a function to be called before the method klass.name is invoked.
"""
_addHook(klass, name, PRE, func)
def addPost(klass, name, func):
"""hook.addPost(klass, name, func) -> None
Add a function to be called after the method klass.name is invoked.
"""
_addHook(klass, name, POST, func)
def removePre(klass, name, func):
"""hook.removePre(klass, name, func) -> None
Remove a function (previously registered with addPre) so that it
is no longer executed before klass.name.
"""
_removeHook(klass, name, PRE, func)
def removePost(klass, name, func):
"""hook.removePre(klass, name, func) -> None
Remove a function (previously registered with addPost) so that it
is no longer executed after klass.name.
"""
_removeHook(klass, name, POST, func)
### "Helper" functions.
hooked_func = """
import %(module)s
def %(name)s(*args, **kw):
klazz = %(module)s.%(klass)s
for preMethod in klazz.%(preName)s:
preMethod(*args, **kw)
try:
return klazz.%(originalName)s(*args, **kw)
finally:
for postMethod in klazz.%(postName)s:
postMethod(*args, **kw)
"""
_PRE = '__hook_pre_%s_%s_%s__'
_POST = '__hook_post_%s_%s_%s__'
_ORIG = '__hook_orig_%s_%s_%s__'
def _XXX(k,n,s):
"string manipulation garbage"
x = s % (string.replace(k.__module__,'.','_'), k.__name__, n)
return x
def PRE(k,n):
"(private) munging to turn a method name into a pre-hook-method-name"
return _XXX(k,n,_PRE)
def POST(k,n):
"(private) munging to turn a method name into a post-hook-method-name"
return _XXX(k,n,_POST)
def ORIG(k,n):
"(private) munging to turn a method name into an `original' identifier"
return _XXX(k,n,_ORIG)
def _addHook(klass, name, phase, func):
"(private) adds a hook to a method on a class"
_enhook(klass, name)
if not hasattr(klass, phase(klass, name)):
setattr(klass, phase(klass, name), [])
phaselist = getattr(klass, phase(klass, name))
phaselist.append(func)
def _removeHook(klass, name, phase, func):
"(private) removes a hook from a method on a class"
phaselistname = phase(klass, name)
if not hasattr(klass, ORIG(klass,name)):
raise HookError("no hooks present!")
phaselist = getattr(klass, phase(klass, name))
try: phaselist.remove(func)
except ValueError:
raise HookError("hook %s not found in removal list for %s"%
(name,klass))
if not getattr(klass, PRE(klass,name)) and not getattr(klass, POST(klass, name)):
_dehook(klass, name)
def _enhook(klass, name):
"(private) causes a certain method name to be hooked on a class"
if hasattr(klass, ORIG(klass, name)):
return
def newfunc(*args, **kw):
for preMethod in getattr(klass, PRE(klass, name)):
preMethod(*args, **kw)
try:
return getattr(klass, ORIG(klass, name))(*args, **kw)
finally:
for postMethod in getattr(klass, POST(klass, name)):
postMethod(*args, **kw)
try:
newfunc.func_name = name
except TypeError:
# Older python's don't let you do this
pass
oldfunc = getattr(klass, name).im_func
setattr(klass, ORIG(klass, name), oldfunc)
setattr(klass, PRE(klass, name), [])
setattr(klass, POST(klass, name), [])
setattr(klass, name, newfunc)
def _dehook(klass, name):
"(private) causes a certain method name no longer to be hooked on a class"
if not hasattr(klass, ORIG(klass, name)):
raise HookError("Cannot unhook!")
setattr(klass, name, getattr(klass, ORIG(klass,name)))
delattr(klass, PRE(klass,name))
delattr(klass, POST(klass,name))
delattr(klass, ORIG(klass,name))
| apache-2.0 | 7,849,887,658,224,969,000 | 28.751412 | 85 | 0.661033 | false |
leggitta/mne-python | mne/decoding/mixin.py | 19 | 1063 | class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn"""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
| bsd-3-clause | -7,794,861,591,294,883,000 | 34.433333 | 71 | 0.591722 | false |
ooici/coi-services | ion/agents/instrument/test/test_agent_persistence.py | 1 | 29918 | #!/usr/bin/env python
"""
@package ion.agents.instrument.test.test_agent_persistence
@file ion/agents.instrument/test_agent_persistence.py
@author Edward Hunter
@brief Test cases for R2 instrument agent state and config persistence between running instances.
"""
__author__ = 'Edward Hunter'
# Import pyon first for monkey patching.
# Pyon log and config objects.
from pyon.public import log
from pyon.public import CFG
from pyon.public import get_obj_registry
# Standard imports.
import sys
import time
import socket
import re
import json
import unittest
import os
from copy import deepcopy
# 3rd party imports.
import gevent
from gevent.event import AsyncResult
from nose.plugins.attrib import attr
from mock import patch
# Pyon pubsub and event support.
from pyon.event.event import EventSubscriber, EventPublisher
from pyon.ion.stream import StandaloneStreamSubscriber
from ion.services.dm.utility.granule_utils import RecordDictionaryTool
# Pyon unittest support.
from pyon.util.int_test import IonIntegrationTestCase
# Pyon exceptions.
from pyon.core.exception import BadRequest, Conflict, Timeout, ResourceError
# Agent imports.
from pyon.util.context import LocalContextMixin
from pyon.agent.agent import ResourceAgentClient
from pyon.agent.agent import ResourceAgentState
from pyon.agent.agent import ResourceAgentEvent
# Driver imports.
from ion.agents.instrument.driver_int_test_support import DriverIntegrationTestSupport
# Objects and clients.
from interface.objects import AgentCommand
from interface.services.icontainer_agent import ContainerAgentClient
from interface.services.dm.ipubsub_management_service import PubsubManagementServiceClient
from interface.services.dm.idataset_management_service import DatasetManagementServiceClient
# Alerts.
from interface.objects import StreamAlertType, AggregateStatusType
from interface.services.cei.iprocess_dispatcher_service import ProcessDispatcherServiceClient
from interface.objects import ProcessDefinition, ProcessStateEnum
from pyon.core.object import IonObjectSerializer, IonObjectDeserializer
from pyon.core.bootstrap import IonObject
"""
--with-pycc
--with-queueblame
bin/nosetests -s -v --nologcapture --with-queueblame --with-pycc ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence
bin/nosetests -s -v --nologcapture ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence
bin/nosetests --with-pycc -s -v --nologcapture ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence.test_agent_config_persistence
bin/nosetests -s -v --nologcapture ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence.test_agent_config_persistence
bin/nosetests -s -v --nologcapture ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence.test_agent_state_persistence
bin/nosetests -s -v --nologcapture ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence.test_agent_rparam_persistence
bin/nosetests -s -v --nologcapture ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence.test_test_cei_launch_mode
bin/nosetests -s -v --nologcapture --with-queueblame --with-pycc ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence.test_test_cei_launch_mode
"""
###############################################################################
# Global constants.
###############################################################################
DEV_ADDR = CFG.device.sbe37.host
DEV_PORT = CFG.device.sbe37.port
DATA_PORT = CFG.device.sbe37.port_agent_data_port
CMD_PORT = CFG.device.sbe37.port_agent_cmd_port
PA_BINARY = CFG.device.sbe37.port_agent_binary
DELIM = CFG.device.sbe37.delim
WORK_DIR = CFG.device.sbe37.workdir
DRV_URI = CFG.device.sbe37.dvr_egg
from ion.agents.instrument.test.agent_test_constants import IA_RESOURCE_ID
from ion.agents.instrument.test.agent_test_constants import IA_NAME
from ion.agents.instrument.test.agent_test_constants import IA_MOD
from ion.agents.instrument.test.agent_test_constants import IA_CLS
from ion.agents.instrument.test.load_test_driver_egg import load_egg
DVR_CONFIG = load_egg()
# Load MI modules from the egg
from mi.instrument.seabird.sbe37smb.ooicore.driver import SBE37ProtocolEvent
from mi.instrument.seabird.sbe37smb.ooicore.driver import SBE37Parameter
class FakeProcess(LocalContextMixin):
"""
A fake process used because the test case is not an ion process.
"""
name = ''
id=''
process_type = ''
@attr('HARDWARE', group='mi')
@patch.dict(CFG, {'endpoint':{'receive':{'timeout': 360}}})
@unittest.skipIf((not os.getenv('PYCC_MODE', False)) and os.getenv('CEI_LAUNCH_TEST', False), 'Skip until tests support launch port agent configurations.')
class TestAgentPersistence(IonIntegrationTestCase):
"""
"""
############################################################################
# Setup, teardown.
############################################################################
def setUp(self):
"""
Set up driver integration support.
Start port agent, add port agent cleanup.
Start container.
Start deploy services.
Define agent config.
"""
self._ia_client = None
log.info('Creating driver integration test support:')
log.info('driver uri: %s', DRV_URI)
log.info('device address: %s', DEV_ADDR)
log.info('device port: %s', DEV_PORT)
log.info('log delimiter: %s', DELIM)
log.info('work dir: %s', WORK_DIR)
self._support = DriverIntegrationTestSupport(None,
None,
DEV_ADDR,
DEV_PORT,
DATA_PORT,
CMD_PORT,
PA_BINARY,
DELIM,
WORK_DIR)
# Start port agent, add stop to cleanup.
self._start_pagent()
self.addCleanup(self._support.stop_pagent)
# Start container.
log.info('Staring capability container.')
self._start_container()
# Bring up services in a deploy file (no need to message)
log.info('Staring deploy services.')
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
log.info('building stream configuration')
# Setup stream config.
self._build_stream_config()
# Create agent config.
self._agent_config = {
'driver_config' : DVR_CONFIG,
'stream_config' : self._stream_config,
'agent' : {'resource_id': IA_RESOURCE_ID},
'test_mode' : True,
'forget_past' : False,
'enable_persistence' : True,
'aparam_pubrate_config' :
{
'raw' : 2,
'parsed' : 2
}
}
self._ia_client = None
self._ia_pid = '1234'
self.addCleanup(self._verify_agent_reset)
self.addCleanup(self.container.state_repository.put_state,
self._ia_pid, {})
###############################################################################
# Port agent helpers.
###############################################################################
def _start_pagent(self):
"""
Construct and start the port agent.
"""
port = self._support.start_pagent()
log.info('Port agent started at port %i',port)
# Configure driver to use port agent port number.
DVR_CONFIG['comms_config'] = {
'addr' : 'localhost',
'port' : port,
'cmd_port' : CMD_PORT
}
###############################################################################
# Data stream helpers.
###############################################################################
def _build_stream_config(self):
"""
"""
# Create a pubsub client to create streams.
pubsub_client = PubsubManagementServiceClient(node=self.container.node)
dataset_management = DatasetManagementServiceClient()
# Create streams and subscriptions for each stream named in driver.
self._stream_config = {}
stream_name = 'parsed'
param_dict_name = 'ctd_parsed_param_dict'
pd_id = dataset_management.read_parameter_dictionary_by_name(param_dict_name, id_only=True)
stream_def_id = pubsub_client.create_stream_definition(name=stream_name, parameter_dictionary_id=pd_id)
pd = pubsub_client.read_stream_definition(stream_def_id).parameter_dictionary
stream_id, stream_route = pubsub_client.create_stream(name=stream_name,
exchange_point='science_data',
stream_definition_id=stream_def_id)
stream_config = dict(routing_key=stream_route.routing_key,
exchange_point=stream_route.exchange_point,
stream_id=stream_id,
stream_definition_ref=stream_def_id,
parameter_dictionary=pd)
self._stream_config[stream_name] = stream_config
stream_name = 'raw'
param_dict_name = 'ctd_raw_param_dict'
pd_id = dataset_management.read_parameter_dictionary_by_name(param_dict_name, id_only=True)
stream_def_id = pubsub_client.create_stream_definition(name=stream_name, parameter_dictionary_id=pd_id)
pd = pubsub_client.read_stream_definition(stream_def_id).parameter_dictionary
stream_id, stream_route = pubsub_client.create_stream(name=stream_name,
exchange_point='science_data',
stream_definition_id=stream_def_id)
stream_config = dict(routing_key=stream_route.routing_key,
exchange_point=stream_route.exchange_point,
stream_id=stream_id,
stream_definition_ref=stream_def_id,
parameter_dictionary=pd)
self._stream_config[stream_name] = stream_config
###############################################################################
# Agent start stop helpers.
###############################################################################
def _start_agent(self, bootmode=None):
"""
"""
container_client = ContainerAgentClient(node=self.container.node,
name=self.container.name)
agent_config = deepcopy(self._agent_config)
agent_config['bootmode'] = bootmode
self._ia_pid = container_client.spawn_process(name=IA_NAME,
module=IA_MOD,
cls=IA_CLS,
config=agent_config,
process_id=self._ia_pid)
# Start a resource agent client to talk with the instrument agent.
self._ia_client = None
self._ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess())
log.info('Got instrument agent client %s.', str(self._ia_client))
def _stop_agent(self):
"""
"""
if self._ia_pid:
container_client = ContainerAgentClient(node=self.container.node,
name=self.container.name)
container_client.terminate_process(self._ia_pid)
if self._ia_client:
self._ia_client = None
def _verify_agent_reset(self):
"""
Check agent state and reset if necessary.
This called if a test fails and reset hasn't occurred.
"""
if self._ia_client is None:
return
state = self._ia_client.get_agent_state()
if state != ResourceAgentState.UNINITIALIZED:
cmd = AgentCommand(command=ResourceAgentEvent.RESET)
retval = self._ia_client.execute_agent(cmd)
self._ia_client = None
###############################################################################
# Tests.
###############################################################################
def test_agent_config_persistence(self):
"""
test_agent_config_persistence
Test that agent parameter configuration is persisted between running
instances.
"""
# Start the agent.
self._start_agent()
# We start in uninitialized state.
# In this state there is no driver process.
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
# Ping the agent.
retval = self._ia_client.ping_agent()
log.info(retval)
# Confirm the default agent parameters.
#{'streams': {'raw': ['quality_flag', 'ingestion_timestamp', 'port_timestamp', 'raw', 'lat', 'driver_timestamp', 'preferred_timestamp', 'lon', 'internal_timestamp', 'time'], 'parsed': ['quality_flag', 'ingestion_timestamp', 'port_timestamp', 'pressure', 'lat', 'driver_timestamp', 'conductivity', 'preferred_timestamp', 'temp', 'density', 'salinity', 'lon', 'internal_timestamp', 'time']}}
retval = self._ia_client.get_agent(['streams'])['streams']
self.assertIn('raw', retval.keys())
self.assertIn('parsed', retval.keys())
#{'pubrate': {'raw': 0, 'parsed': 0}}
retval = self._ia_client.get_agent(['pubrate'])['pubrate']
self.assertIn('raw', retval.keys())
self.assertIn('parsed', retval.keys())
self.assertEqual(retval['raw'], 2)
self.assertEqual(retval['parsed'], 2)
#{'alerts': []}
retval = self._ia_client.get_agent(['alerts'])['alerts']
self.assertEqual(retval, [])
# Define a few new parameters and set them.
# Confirm they are set.
alert_def_1 = {
'name' : 'current_warning_interval',
'stream_name' : 'parsed',
'description' : 'Current is below normal range.',
'alert_type' : StreamAlertType.WARNING,
'aggregate_type' : AggregateStatusType.AGGREGATE_DATA,
'value_id' : 'temp',
'lower_bound' : None,
'lower_rel_op' : None,
'upper_bound' : 10.0,
'upper_rel_op' : '<',
'alert_class' : 'IntervalAlert'
}
alert_def_2 = {
'name' : 'temp_alarm_interval',
'stream_name' : 'parsed',
'description' : 'Temperatoure is critical.',
'alert_type' : StreamAlertType.ALARM,
'aggregate_type' : AggregateStatusType.AGGREGATE_DATA,
'value_id' : 'temp',
'lower_bound' : None,
'lower_rel_op' : None,
'upper_bound' : 20.0,
'upper_rel_op' : '<',
'alert_class' : 'IntervalAlert'
}
alert_def3 = {
'name' : 'late_data_warning',
'stream_name' : 'parsed',
'description' : 'Expected data has not arrived.',
'alert_type' : StreamAlertType.WARNING,
'aggregate_type' : AggregateStatusType.AGGREGATE_COMMS,
'time_delta' : 180,
'alert_class' : 'LateDataAlert'
}
orig_alerts = [alert_def_1,alert_def_2, alert_def3]
pubrate = {
'parsed' : 10,
'raw' : 20
}
params = {
'alerts' : orig_alerts,
'pubrate' : pubrate
}
# Set the new agent params and confirm.
self._ia_client.set_agent(params)
params = [
'alerts',
'pubrate'
]
retval = self._ia_client.get_agent(params)
pubrate = retval['pubrate']
alerts = retval['alerts']
self.assertIn('raw', pubrate.keys())
self.assertIn('parsed', pubrate.keys())
self.assertEqual(pubrate['parsed'], 10)
self.assertEqual(pubrate['raw'], 20)
count = 0
for x in alerts:
x.pop('status')
x.pop('value')
for y in orig_alerts:
if x['name'] == y['name']:
count += 1
self.assertItemsEqual(x.keys(), y.keys())
self.assertEqual(count, 3)
# Now stop and restart the agent.
self._stop_agent()
gevent.sleep(15)
self._start_agent('restart')
# We start in uninitialized state.
# In this state there is no driver process.
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
# Ping the agent.
retval = self._ia_client.ping_agent()
log.info(retval)
# Confirm the persisted parameters.
params = [
'alerts',
'pubrate'
]
retval = self._ia_client.get_agent(params)
pubrate = retval['pubrate']
alerts = retval['alerts']
self.assertIn('raw', pubrate.keys())
self.assertIn('parsed', pubrate.keys())
self.assertEqual(pubrate['parsed'], 10)
self.assertEqual(pubrate['raw'], 20)
count = 0
for x in alerts:
x.pop('status')
x.pop('value')
for y in orig_alerts:
if x['name'] == y['name']:
count += 1
self.assertItemsEqual(x.keys(), y.keys())
self.assertEqual(count, 3)
def test_agent_state_persistence(self):
"""
test_agent_state_persistence
Verify that agents can be restored to their prior running state.
"""
self._start_agent()
# We start in uninitialized state.
# In this state there is no driver process.
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
# Ping the agent.
retval = self._ia_client.ping_agent()
log.info(retval)
alert_def3 = {
'name' : 'late_data_warning',
'stream_name' : 'parsed',
'description' : 'Expected data has not arrived.',
'alert_type' : StreamAlertType.WARNING,
'aggregate_type' : AggregateStatusType.AGGREGATE_COMMS,
'time_delta' : 180,
'alert_class' : 'LateDataAlert'
}
orig_pubrate = {
'parsed' : 10,
'raw' : 20
}
params = {
'alerts' : [alert_def3],
'pubrate' : orig_pubrate
}
# Set the new agent params and confirm.
self._ia_client.set_agent(params)
# Initialize the agent.
# The agent is spawned with a driver config, but you can pass one in
# optinally with the initialize command. This validates the driver
# config, launches a driver process and connects to it via messaging.
# If successful, we switch to the inactive state.
cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.INACTIVE)
# Ping the driver proc.
retval = self._ia_client.ping_resource()
log.info(retval)
cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.IDLE)
cmd = AgentCommand(command=ResourceAgentEvent.RUN)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.COMMAND)
# Acquire sample returns a string, not a particle. The particle
# is created by the data handler though.
cmd = AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)
retval = self._ia_client.execute_resource(cmd)
# Now stop and restart the agent.
self._stop_agent()
gevent.sleep(15)
self._start_agent('restart')
timeout = gevent.Timeout(240)
timeout.start()
try:
while True:
state = self._ia_client.get_agent_state()
print '## in state: ' + state
if state == ResourceAgentState.COMMAND:
timeout.cancel()
break
else:
gevent.sleep(1)
except gevent.Timeout:
self.fail("Could not restore agent state to COMMAND.")
params = [
'alerts',
'pubrate'
]
retval = self._ia_client.get_agent(params)
alerts = retval['alerts']
pubrate = retval['pubrate']
self.assertEqual(len(alerts), 1)
self.assertEqual(alert_def3['name'], alerts[0]['name'])
self.assertEqual(pubrate['raw'], 20)
self.assertEqual(pubrate['parsed'], 10)
cmd = AgentCommand(command=ResourceAgentEvent.PAUSE)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.STOPPED)
# Now stop and restart the agent.
self._stop_agent()
gevent.sleep(15)
self._start_agent('restart')
timeout = gevent.Timeout(240)
timeout.start()
try:
while True:
state = self._ia_client.get_agent_state()
if state == ResourceAgentState.STOPPED:
timeout.cancel()
break
else:
gevent.sleep(1)
except gevent.Timeout:
self.fail("Could not restore agent state to STOPPED.")
retval = self._ia_client.get_agent(params)
alerts = retval['alerts']
pubrate = retval['pubrate']
self.assertEqual(len(alerts), 1)
self.assertEqual(alert_def3['name'], alerts[0]['name'])
self.assertEqual(pubrate['raw'], 20)
self.assertEqual(pubrate['parsed'], 10)
# Reset the agent. This causes the driver messaging to be stopped,
# the driver process to end and switches us back to uninitialized.
cmd = AgentCommand(command=ResourceAgentEvent.RESET)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
def test_agent_rparam_persistence(self):
"""
test_agent_rparam_persistence
Verify ability to restore device configuration.
### Original values:
{'TA0': -0.0002572242, 'OUTPUTSV': False, 'NAVG': 0}
### Values after set:
{'TA0': -0.0005144484, 'OUTPUTSV': True, 'NAVG': 1}
### Restore config:
{'PTCA1': 0.6603433, 'WBOTC': 1.2024e-05, 'PCALDATE': [12, 8, 2005],
'STORETIME': False, 'CPCOR': 9.57e-08, 'PTCA2': 0.00575649,
'OUTPUTSV': True, 'SAMPLENUM': 0, 'TCALDATE': [8, 11, 2005],
'OUTPUTSAL': False, 'TA2': -9.717158e-06, 'POFFSET': 0.0,
'INTERVAL': 19733, 'SYNCWAIT': 0, 'CJ': 3.339261e-05,
'CI': 0.0001334915, 'CH': 0.1417895, 'TA0': -0.0005144484,
'TA1': 0.0003138936, 'NAVG': 1, 'TA3': 2.138735e-07, '
RCALDATE': [8, 11, 2005], 'CG': -0.987093, 'CTCOR': 3.25e-06, '
PTCB0': 24.6145, 'PTCB1': -0.0009, 'PTCB2': 0.0,
'CCALDATE': [8, 11, 2005], 'PA0': 5.916199, 'PA1': 0.4851819,
'PA2': 4.596432e-07, 'SYNCMODE': False, 'PTCA0': 276.2492,
'TXREALTIME': True, 'RTCA2': -3.022745e-08, 'RTCA1': 1.686132e-06,
'RTCA0': 0.9999862}
### Of which we have:
{'TA0': -0.0005144484, 'OUTPUTSV': True, 'NAVG': 1}
"""
self._start_agent()
# We start in uninitialized state.
# In this state there is no driver process.
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
# Ping the agent.
retval = self._ia_client.ping_agent()
log.info(retval)
# Initialize the agent.
# The agent is spawned with a driver config, but you can pass one in
# optinally with the initialize command. This validates the driver
# config, launches a driver process and connects to it via messaging.
# If successful, we switch to the inactive state.
cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.INACTIVE)
# Ping the driver proc.
retval = self._ia_client.ping_resource()
log.info(retval)
cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.IDLE)
cmd = AgentCommand(command=ResourceAgentEvent.RUN)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.COMMAND)
params = [
SBE37Parameter.OUTPUTSV,
SBE37Parameter.NAVG,
SBE37Parameter.TA0
]
retval = self._ia_client.get_resource(params)
orig_params = retval
new_params = {
SBE37Parameter.OUTPUTSV : not orig_params[SBE37Parameter.OUTPUTSV],
SBE37Parameter.NAVG : orig_params[SBE37Parameter.NAVG] + 1,
SBE37Parameter.TA0 : orig_params[SBE37Parameter.TA0] * 2
}
#print '########### orig params'
#print str(orig_params)
self._ia_client.set_resource(new_params)
retval = self._ia_client.get_resource(params)
self.assertEqual(retval[SBE37Parameter.OUTPUTSV],
new_params[SBE37Parameter.OUTPUTSV])
self.assertEqual(retval[SBE37Parameter.NAVG],
new_params[SBE37Parameter.NAVG])
delta = max(retval[SBE37Parameter.TA0],
new_params[SBE37Parameter.TA0])*.01
self.assertAlmostEqual(retval[SBE37Parameter.TA0],
new_params[SBE37Parameter.TA0], delta=delta)
#print '########### new params'
#print str(retval)
# Now stop and restart the agent.
self._stop_agent()
self._support.stop_pagent()
gevent.sleep(10)
self._start_pagent()
gevent.sleep(10)
self._start_agent('restart')
timeout = gevent.Timeout(600)
timeout.start()
try:
while True:
state = self._ia_client.get_agent_state()
if state == ResourceAgentState.COMMAND:
timeout.cancel()
break
else:
gevent.sleep(3)
except gevent.Timeout:
self.fail("Could not restore agent state to COMMAND.")
# Verify the parameters have been restored as needed.
retval = self._ia_client.get_resource(params)
#print '########### restored params'
#print str(retval)
self.assertEqual(retval[SBE37Parameter.OUTPUTSV],
new_params[SBE37Parameter.OUTPUTSV])
self.assertEqual(retval[SBE37Parameter.NAVG],
new_params[SBE37Parameter.NAVG])
delta = max(retval[SBE37Parameter.TA0],
new_params[SBE37Parameter.TA0])*.01
self.assertAlmostEqual(retval[SBE37Parameter.TA0],
new_params[SBE37Parameter.TA0], delta=delta)
# Reset the agent. This causes the driver messaging to be stopped,
# the driver process to end and switches us back to uninitialized.
cmd = AgentCommand(command=ResourceAgentEvent.RESET)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
@unittest.skip('Making CEI friendly.')
def test_cei_launch_mode(self):
pdc = ProcessDispatcherServiceClient(node=self.container.node)
p_def = ProcessDefinition(name='Agent007')
p_def.executable = {
'module' : 'ion.agents.instrument.instrument_agent',
'class' : 'InstrumentAgent'
}
p_def_id = pdc.create_process_definition(p_def)
pid = pdc.create_process(p_def_id)
def event_callback(event, *args, **kwargs):
print '######### proc %s in state %s' % (event.origin, ProcessStateEnum._str_map[event.state])
sub = EventSubscriber(event_type='ProcessLifecycleEvent',
callback=event_callback,
origin=pid,
origin_type='DispatchedProcess')
sub.start()
agent_config = deepcopy(self._agent_config)
agent_config['bootmode'] = 'restart'
pdc.schedule_process(p_def_id, process_id=pid,
configuration=agent_config)
gevent.sleep(5)
pdc.cancel_process(pid)
gevent.sleep(15)
sub.stop()
| bsd-2-clause | -5,372,083,795,564,110,000 | 37.957031 | 397 | 0.568721 | false |
akaszynski/vtkInterface | examples/02-plot/lighting.py | 1 | 1906 | """
Lighting Controls
~~~~~~~~~~~~~~~~~
Control aspects of the rendered mesh's lighting such as Ambient, Diffuse,
and Specular. These options only work if the ``lighting`` argument to
``add_mesh`` is ``True`` (it's true by default).
You can turn off all lighting by passing ``lighting=False`` to ``add_mesh``.
"""
# sphinx_gallery_thumbnail_number = 4
import pyvista as pv
from pyvista import examples
mesh = examples.download_st_helens().warp_by_scalar()
cpos = [(575848., 5128459., 22289.),
(562835.0, 5114981.5, 2294.5),
(-0.5, -0.5, 0.7)]
###############################################################################
# First, lets take a look at the mesh with default lighting conditions
mesh.plot(cpos=cpos, show_scalar_bar=False)
###############################################################################
# What about with no lighting
mesh.plot(lighting=False, cpos=cpos, show_scalar_bar=False)
###############################################################################
# Demonstration of the specular property
p = pv.Plotter(shape=(1,2), window_size=[1500, 500])
p.subplot(0,0)
p.add_mesh(mesh, show_scalar_bar=False)
p.add_text('No Specular')
p.subplot(0,1)
s = 1.0
p.add_mesh(mesh, specular=s, show_scalar_bar=False)
p.add_text('Specular of {}'.format(s))
p.link_views()
p.view_isometric()
p.show(cpos=cpos)
###############################################################################
# Just specular
mesh.plot(specular=0.5, cpos=cpos, show_scalar_bar=False)
###############################################################################
# Specular power
mesh.plot(specular=0.5, specular_power=15,
cpos=cpos, show_scalar_bar=False)
###############################################################################
# Demonstration of all three in use
mesh.plot(diffuse=0.5, specular=0.5, ambient=0.5,
cpos=cpos, show_scalar_bar=False)
| mit | 3,560,562,684,647,035,000 | 31.862069 | 79 | 0.519937 | false |
nion-software/nionswift | nion/swift/HistogramPanel.py | 1 | 37648 | # standard libraries
import functools
import gettext
import operator
import typing
# third party libraries
import numpy
# local libraries
from nion.data import Core
from nion.data import Image
from nion.swift import DisplayPanel
from nion.swift import Panel
from nion.swift.model import DisplayItem
from nion.swift.model import Graphics
from nion.ui import CanvasItem
from nion.ui import DrawingContext
from nion.ui import Widgets
from nion.utils import Binding
from nion.utils import Event
from nion.utils import Model
from nion.utils import Stream
_ = gettext.gettext
class AdornmentsCanvasItem(CanvasItem.AbstractCanvasItem):
"""A canvas item to draw the adornments on top of the histogram.
The adornments are the black and white lines shown during mouse
adjustment of the display limits.
Callers are expected to set the display_limits property and
then call update.
"""
def __init__(self):
super().__init__()
self.display_limits = (0,1)
def _repaint(self, drawing_context):
"""Repaint the canvas item. This will occur on a thread."""
# canvas size
canvas_width = self.canvas_size[1]
canvas_height = self.canvas_size[0]
left = self.display_limits[0]
right = self.display_limits[1]
# draw left display limit
if left > 0.0:
with drawing_context.saver():
drawing_context.begin_path()
drawing_context.move_to(left * canvas_width, 1)
drawing_context.line_to(left * canvas_width, canvas_height-1)
drawing_context.line_width = 2
drawing_context.stroke_style = "#000"
drawing_context.stroke()
# draw right display limit
if right < 1.0:
with drawing_context.saver():
drawing_context.begin_path()
drawing_context.move_to(right * canvas_width, 1)
drawing_context.line_to(right * canvas_width, canvas_height-1)
drawing_context.line_width = 2
drawing_context.stroke_style = "#FFF"
drawing_context.stroke()
# draw border
with drawing_context.saver():
drawing_context.begin_path()
drawing_context.move_to(0,canvas_height)
drawing_context.line_to(canvas_width,canvas_height)
drawing_context.line_width = 1
drawing_context.stroke_style = "#444"
drawing_context.stroke()
class SimpleLineGraphCanvasItem(CanvasItem.AbstractCanvasItem):
"""A canvas item to draw a simple line graph.
The caller can specify a background color by setting the background_color
property in the format of a CSS color.
The caller must update the data by setting the data property. The data must
be a numpy array with a range from 0,1. The data will be re-binned to the
width of the canvas item and plotted.
"""
def __init__(self):
super().__init__()
self.__data = None
self.__background_color = None
self.__retained_rebin_1d = dict()
@property
def data(self):
"""Return the data."""
return self.__data
@data.setter
def data(self, data):
"""Set the data and mark the canvas item for updating.
Data should be a numpy array with a range from 0,1.
"""
self.__data = data
self.update()
@property
def background_color(self):
"""Return the background color."""
return self.__background_color
@background_color.setter
def background_color(self, background_color):
"""Set the background color. Use CSS color format."""
self.__background_color = background_color
self.update()
def _repaint(self, drawing_context):
"""Repaint the canvas item. This will occur on a thread."""
# canvas size
canvas_width = self.canvas_size[1]
canvas_height = self.canvas_size[0]
# draw background
if self.background_color:
with drawing_context.saver():
drawing_context.begin_path()
drawing_context.move_to(0,0)
drawing_context.line_to(canvas_width,0)
drawing_context.line_to(canvas_width,canvas_height)
drawing_context.line_to(0,canvas_height)
drawing_context.close_path()
drawing_context.fill_style = self.background_color
drawing_context.fill()
# draw the data, if any
if (self.data is not None and len(self.data) > 0):
# draw the histogram itself
with drawing_context.saver():
drawing_context.begin_path()
binned_data = Image.rebin_1d(self.data, int(canvas_width), self.__retained_rebin_1d) if int(canvas_width) != self.data.shape[0] else self.data
for i in range(canvas_width):
drawing_context.move_to(i, canvas_height)
drawing_context.line_to(i, canvas_height * (1 - binned_data[i]))
drawing_context.line_width = 1
drawing_context.stroke_style = "#444"
drawing_context.stroke()
class ColorMapCanvasItem(CanvasItem.AbstractCanvasItem):
def __init__(self):
super().__init__()
self.update_sizing(self.sizing.with_fixed_height(4))
self.__color_map_data = None
@property
def color_map_data(self) -> numpy.ndarray:
"""Return the data."""
return self.__color_map_data
@color_map_data.setter
def color_map_data(self, data: numpy.ndarray) -> None:
"""Set the data and mark the canvas item for updating.
Data should be an ndarray of shape (256, 3) with type uint8
"""
self.__color_map_data = data
self.update()
def _repaint(self, drawing_context: DrawingContext.DrawingContext):
"""Repaint the canvas item. This will occur on a thread."""
# canvas size
canvas_width = self.canvas_size.width
canvas_height = self.canvas_size.height
with drawing_context.saver():
if self.__color_map_data is not None:
rgba_image = numpy.empty((4,) + self.__color_map_data.shape[:-1], dtype=numpy.uint32)
Image.get_rgb_view(rgba_image)[:] = self.__color_map_data[numpy.newaxis, :, :] # scalar data assigned to each component of rgb view
Image.get_alpha_view(rgba_image)[:] = 255
drawing_context.draw_image(rgba_image, 0, 0, canvas_width, canvas_height)
class HistogramCanvasItem(CanvasItem.CanvasItemComposition):
"""A canvas item to draw and control a histogram."""
def __init__(self, cursor_changed_fn: typing.Callable[[float], None]):
super().__init__()
# tell the canvas item that we want mouse events.
self.wants_mouse_events = True
# create the component canvas items: adornments and the graph.
self.__adornments_canvas_item = AdornmentsCanvasItem()
self.__simple_line_graph_canvas_item = SimpleLineGraphCanvasItem()
self.__histogram_color_map_canvas_item = ColorMapCanvasItem()
# canvas items get added back to front
column = CanvasItem.CanvasItemComposition()
column.layout = CanvasItem.CanvasItemColumnLayout()
graph_and_adornments = CanvasItem.CanvasItemComposition()
graph_and_adornments.add_canvas_item(self.__simple_line_graph_canvas_item)
graph_and_adornments.add_canvas_item(self.__adornments_canvas_item)
column.add_canvas_item(graph_and_adornments)
column.add_canvas_item(self.__histogram_color_map_canvas_item)
self.add_canvas_item(column)
# used for mouse tracking.
self.__pressed = False
self.on_set_display_limits = None
self.__cursor_changed = cursor_changed_fn
def close(self):
self._set_histogram_data(None)
super().close()
@property
def background_color(self):
"""Return the background color."""
return self.__simple_line_graph_canvas_item.background_color
@background_color.setter
def background_color(self, background_color):
"""Set the background color, in the CSS color format."""
self.__simple_line_graph_canvas_item.background_color = background_color
def _set_histogram_data(self, histogram_data):
# if the user is currently dragging the display limits, we don't want to update
# from changing data at the same time. but we _do_ want to draw the updated data.
if not self.__pressed:
self.__adornments_canvas_item.display_limits = (0, 1)
self.histogram_data = histogram_data
# make sure the adornments get updated
self.__adornments_canvas_item.update()
@property
def histogram_data(self):
return self.__simple_line_graph_canvas_item.data
@histogram_data.setter
def histogram_data(self, histogram_data):
self.__simple_line_graph_canvas_item.data = histogram_data
@property
def color_map_data(self) -> numpy.ndarray:
return self.__histogram_color_map_canvas_item.color_map_data
@color_map_data.setter
def color_map_data(self, color_map_data: numpy.ndarray) -> None:
self.__histogram_color_map_canvas_item.color_map_data = color_map_data
def __set_display_limits(self, display_limits):
self.__adornments_canvas_item.display_limits = display_limits
self.__adornments_canvas_item.update()
def mouse_double_clicked(self, x, y, modifiers):
if super().mouse_double_clicked(x, y, modifiers):
return True
self.__set_display_limits((0, 1))
if callable(self.on_set_display_limits):
self.on_set_display_limits(None)
return True
def mouse_pressed(self, x, y, modifiers):
if super().mouse_pressed(x, y, modifiers):
return True
self.__pressed = True
self.start = float(x)/self.canvas_size[1]
self.__set_display_limits((self.start, self.start))
return True
def mouse_released(self, x, y, modifiers):
if super().mouse_released(x, y, modifiers):
return True
self.__pressed = False
display_limit_range = self.__adornments_canvas_item.display_limits[1] - self.__adornments_canvas_item.display_limits[0]
if 0 < display_limit_range < 1:
if callable(self.on_set_display_limits):
self.on_set_display_limits(self.__adornments_canvas_item.display_limits)
self.__set_display_limits((0, 1))
return True
def mouse_position_changed(self, x, y, modifiers):
if callable(self.__cursor_changed):
self.__cursor_changed(x / self.canvas_size[1])
if super().mouse_position_changed(x, y, modifiers):
return True
canvas_width = self.canvas_size[1]
if self.__pressed:
current = float(x)/canvas_width
self.__set_display_limits((min(self.start, current), max(self.start, current)))
return True
def mouse_exited(self) -> bool:
if callable(self.__cursor_changed):
self.__cursor_changed(None)
return True
class HistogramWidgetData:
def __init__(self, data=None, display_range=None):
self.data = data
self.display_range = display_range
class HistogramWidget(Widgets.CompositeWidgetBase):
def __init__(self, document_controller, display_item_stream, histogram_widget_data_model, color_map_data_model, cursor_changed_fn):
super().__init__(document_controller.ui.create_column_widget(properties={"min-height": 84, "max-height": 84}))
ui = document_controller.ui
self.__display_item_stream = display_item_stream.add_ref()
self.__histogram_data_model = histogram_widget_data_model
self.__color_map_data_model = color_map_data_model
self.__display_range = None
def histogram_data_changed(key: str) -> None:
if key == "value":
histogram_widget_data = self.__histogram_data_model.value
self.__histogram_canvas_item._set_histogram_data(histogram_widget_data.data)
self.__display_range = histogram_widget_data.display_range
self.__histogram_data_property_changed_event_listener = self.__histogram_data_model.property_changed_event.listen(histogram_data_changed)
def set_display_limits(display_limits):
# display_limits in this context are in the range of 0,1
# we ask for the display_range from the display to get actual
# data values (never None), and create new display limits
# based on those data values combined with display_limits.
# then we set the display_limits on the display, which have
# the same units as the data values.
display_item = self.__display_item_stream.value
display_data_channel = display_item.display_data_channel if display_item else None
if display_data_channel:
new_display_limits = None
if display_limits is not None and self.__display_range is not None:
data_min, data_max = self.__display_range
lower_display_limit = data_min + display_limits[0] * (data_max - data_min)
upper_display_limit = data_min + display_limits[1] * (data_max - data_min)
new_display_limits = (lower_display_limit, upper_display_limit)
command = DisplayPanel.ChangeDisplayDataChannelCommand(document_controller.document_model, display_data_channel, display_limits=new_display_limits, title=_("Change Display Limits"))
command.perform()
document_controller.push_undo_command(command)
def cursor_changed(canvas_x):
if callable(cursor_changed_fn):
cursor_changed_fn(canvas_x, self.__display_range)
# create a canvas widget for this panel and put a histogram canvas item in it.
self.__histogram_canvas_item = HistogramCanvasItem(cursor_changed)
self.__histogram_canvas_item.on_set_display_limits = set_display_limits
histogram_widget = ui.create_canvas_widget()
histogram_widget.canvas_item.add_canvas_item(self.__histogram_canvas_item)
def handle_update_color_map_data(color_map_data):
self.__histogram_canvas_item.color_map_data = color_map_data
def color_map_data_changed(key: str) -> None:
if key == "value":
self.__histogram_canvas_item.color_map_data = self.__color_map_data_model.value
self.__color_map_data_stream_listener = self.__color_map_data_model.property_changed_event.listen(color_map_data_changed)
histogram_data_changed("value")
color_map_data_changed("value")
self.content_widget.add(histogram_widget)
def close(self):
self.__color_map_data_stream_listener.close()
self.__color_map_data_stream_listener = None
self.__display_item_stream.remove_ref()
self.__display_item_stream = None
self.__histogram_canvas_item = None
self.__histogram_data_property_changed_event_listener.close()
self.__histogram_data_property_changed_event_listener = None
super().close()
def _recompute(self):
pass
@property
def _histogram_canvas_item(self):
return self.__histogram_canvas_item
@property
def _histogram_data_func_value_model(self):
# for testing
return self.__histogram_data_model
class StatisticsWidget(Widgets.CompositeWidgetBase):
def __init__(self, ui, statistics_model):
super().__init__(ui.create_column_widget(properties={"min-height": 18 * 3, "max-height": 18 * 3}))
# create property models for the UI
self._stats1_property = Model.PropertyModel(str())
self._stats2_property = Model.PropertyModel(str())
self.__statistics_model = statistics_model
def statistics_changed(key: str) -> None:
if key == "value":
statistics_data = self.__statistics_model.value
statistic_strings = list()
for key in sorted(statistics_data.keys()):
value = statistics_data[key]
if value is not None:
statistic_str = "{0} {1}".format(key, value)
else:
statistic_str = "{0} {1}".format(key, _("N/A"))
statistic_strings.append(statistic_str)
self._stats1_property.value = "\n".join(statistic_strings[:(len(statistic_strings) + 1) // 2])
self._stats2_property.value = "\n".join(statistic_strings[(len(statistic_strings) + 1) // 2:])
self.__statistics_property_changed_event_listener = self.__statistics_model.property_changed_event.listen(statistics_changed)
statistics_changed("value")
stats_column1 = ui.create_column_widget(properties={"min-width": 140, "max-width": 140})
stats_column2 = ui.create_column_widget(properties={"min-width": 140, "max-width": 140})
stats_column1_label = ui.create_label_widget()
stats_column2_label = ui.create_label_widget()
stats_column1.add(stats_column1_label)
stats_column2.add(stats_column2_label)
stats_section = ui.create_row_widget()
stats_section.add_spacing(13)
stats_section.add(stats_column1)
stats_section.add_stretch()
stats_section.add(stats_column2)
stats_section.add_spacing(13)
stats_column1_label.bind_text(Binding.PropertyBinding(self._stats1_property, "value"))
stats_column2_label.bind_text(Binding.PropertyBinding(self._stats2_property, "value"))
self.content_widget.add(stats_section)
def close(self):
self.__statistics_property_changed_event_listener.close()
self.__statistics_property_changed_event_listener = None
super().close()
@property
def _statistics_func_value_model(self):
# for testing
return self.__statistics_model
def _recompute(self):
pass
# import asyncio
class HistogramPanel(Panel.Panel):
""" A panel to present a histogram of the selected data item. """
def __init__(self, document_controller, panel_id, properties, debounce=True, sample=True):
super().__init__(document_controller, panel_id, _("Histogram"))
def calculate_region_data(display_data_and_metadata, region):
if region is not None and display_data_and_metadata is not None:
if display_data_and_metadata.is_data_1d and isinstance(region, Graphics.IntervalGraphic):
interval = region.interval
if 0 <= interval[0] < 1 and 0 < interval[1] <= 1:
start, end = int(interval[0] * display_data_and_metadata.data_shape[0]), int(interval[1] * display_data_and_metadata.data_shape[0])
if end - start >= 1:
cropped_data_and_metadata = Core.function_crop_interval(display_data_and_metadata, interval)
if cropped_data_and_metadata:
return cropped_data_and_metadata
elif display_data_and_metadata.is_data_2d and isinstance(region, Graphics.RectangleTypeGraphic):
cropped_data_and_metadata = Core.function_crop(display_data_and_metadata, region.bounds)
if cropped_data_and_metadata:
return cropped_data_and_metadata
return display_data_and_metadata
def calculate_region_data_func(display_data_and_metadata, region):
return functools.partial(calculate_region_data, display_data_and_metadata, region)
def calculate_histogram_widget_data(display_data_and_metadata_func, display_range):
bins = 320
subsample = 0 # hard coded subsample size
subsample_fraction = None # fraction of total pixels
subsample_min = 1024 # minimum subsample size
display_data_and_metadata = display_data_and_metadata_func()
display_data = display_data_and_metadata.data if display_data_and_metadata else None
if display_data is not None:
total_pixels = numpy.product(display_data.shape, dtype=numpy.uint64)
if not subsample and subsample_fraction:
subsample = min(max(total_pixels * subsample_fraction, subsample_min), total_pixels)
if subsample:
factor = total_pixels / subsample
data_sample = numpy.random.choice(display_data.reshape(numpy.product(display_data.shape, dtype=numpy.uint64)), subsample)
else:
factor = 1.0
data_sample = numpy.copy(display_data)
if display_range is None or data_sample is None:
return HistogramWidgetData()
histogram_data = factor * numpy.histogram(data_sample, range=display_range, bins=bins)[0]
histogram_max = numpy.max(histogram_data) # assumes that histogram_data is int
if histogram_max > 0:
histogram_data = histogram_data / float(histogram_max)
return HistogramWidgetData(histogram_data, display_range)
return HistogramWidgetData()
def calculate_histogram_widget_data_func(display_data_and_metadata_model_func, display_range):
return functools.partial(calculate_histogram_widget_data, display_data_and_metadata_model_func, display_range)
display_item_stream = TargetDisplayItemStream(document_controller)
display_data_channel_stream = StreamPropertyStream(display_item_stream, "display_data_channel")
region_stream = TargetRegionStream(display_item_stream)
def compare_data(a, b):
return numpy.array_equal(a.data if a else None, b.data if b else None)
display_data_and_metadata_stream = DisplayDataChannelTransientsStream(display_data_channel_stream, "display_data_and_metadata", cmp=compare_data)
display_range_stream = DisplayDataChannelTransientsStream(display_data_channel_stream, "display_range")
region_data_and_metadata_func_stream = Stream.CombineLatestStream((display_data_and_metadata_stream, region_stream), calculate_region_data_func)
histogram_widget_data_func_stream = Stream.CombineLatestStream((region_data_and_metadata_func_stream, display_range_stream), calculate_histogram_widget_data_func)
color_map_data_stream = StreamPropertyStream(display_data_channel_stream, "color_map_data", cmp=numpy.array_equal)
if debounce:
histogram_widget_data_func_stream = Stream.DebounceStream(histogram_widget_data_func_stream, 0.05, document_controller.event_loop)
if sample:
histogram_widget_data_func_stream = Stream.SampleStream(histogram_widget_data_func_stream, 0.5, document_controller.event_loop)
def cursor_changed_fn(canvas_x: float, display_range) -> None:
if not canvas_x:
document_controller.cursor_changed(None)
if display_item_stream and display_item_stream.value and canvas_x:
if display_range is not None: # can be None with empty data
displayed_intensity_calibration = display_item_stream.value.displayed_intensity_calibration
adjusted_x = display_range[0] + canvas_x * (display_range[1] - display_range[0])
adjusted_x = displayed_intensity_calibration.convert_to_calibrated_value_str(adjusted_x)
document_controller.cursor_changed([_('Intensity: ') + str(adjusted_x)])
else:
document_controller.cursor_changed(None)
self.__histogram_widget_data_model = Model.FuncStreamValueModel(histogram_widget_data_func_stream, document_controller.event_loop, value=HistogramWidgetData(), cmp=numpy.array_equal)
self.__color_map_data_model = Model.StreamValueModel(color_map_data_stream, cmp=numpy.array_equal)
self._histogram_widget = HistogramWidget(document_controller, display_item_stream, self.__histogram_widget_data_model, self.__color_map_data_model, cursor_changed_fn)
def calculate_statistics(display_data_and_metadata_func, display_data_range, region, displayed_intensity_calibration):
display_data_and_metadata = display_data_and_metadata_func()
data = display_data_and_metadata.data if display_data_and_metadata else None
data_range = display_data_range
if data is not None and data.size > 0 and displayed_intensity_calibration:
mean = numpy.mean(data)
std = numpy.std(data)
rms = numpy.sqrt(numpy.mean(numpy.square(numpy.absolute(data))))
sum_data = mean * functools.reduce(operator.mul, Image.dimensional_shape_from_shape_and_dtype(data.shape, data.dtype))
if region is None:
data_min, data_max = data_range if data_range is not None else (None, None)
else:
data_min, data_max = numpy.amin(data), numpy.amax(data)
mean_str = displayed_intensity_calibration.convert_to_calibrated_value_str(mean)
std_str = displayed_intensity_calibration.convert_to_calibrated_value_str(std)
data_min_str = displayed_intensity_calibration.convert_to_calibrated_value_str(data_min)
data_max_str = displayed_intensity_calibration.convert_to_calibrated_value_str(data_max)
rms_str = displayed_intensity_calibration.convert_to_calibrated_value_str(rms)
sum_data_str = displayed_intensity_calibration.convert_to_calibrated_value_str(sum_data)
return { "mean": mean_str, "std": std_str, "min": data_min_str, "max": data_max_str, "rms": rms_str, "sum": sum_data_str }
return dict()
def calculate_statistics_func(display_data_and_metadata_model_func, display_data_range, region, displayed_intensity_calibration):
return functools.partial(calculate_statistics, display_data_and_metadata_model_func, display_data_range, region, displayed_intensity_calibration)
display_data_range_stream = DisplayDataChannelTransientsStream(display_data_channel_stream, "data_range")
displayed_intensity_calibration_stream = StreamPropertyStream(display_item_stream, 'displayed_intensity_calibration')
statistics_func_stream = Stream.CombineLatestStream((region_data_and_metadata_func_stream, display_data_range_stream, region_stream, displayed_intensity_calibration_stream), calculate_statistics_func)
if debounce:
statistics_func_stream = Stream.DebounceStream(statistics_func_stream, 0.05, document_controller.event_loop)
if sample:
statistics_func_stream = Stream.SampleStream(statistics_func_stream, 0.5, document_controller.event_loop)
self.__statistics_model = Model.FuncStreamValueModel(statistics_func_stream, document_controller.event_loop, value=dict(), cmp=numpy.array_equal)
self._statistics_widget = StatisticsWidget(self.ui, self.__statistics_model)
# create the main column with the histogram and the statistics section
column = self.ui.create_column_widget(properties={"height": 80 + 18 * 3 + 12})
column.add(self._histogram_widget)
column.add_spacing(6)
column.add(self._statistics_widget)
column.add_spacing(6)
column.add_stretch()
# this is necessary to make the panel happy
self.widget = column
def close(self):
self.__histogram_widget_data_model.close()
self.__histogram_widget_data_model = None
self.__color_map_data_model.close()
self.__color_map_data_model = None
self.__statistics_model.close()
self.__statistics_model = None
super().close()
class TargetDataItemStream(Stream.AbstractStream):
def __init__(self, document_controller):
super().__init__()
# outgoing messages
self.value_stream = Event.Event()
# cached values
self.__value = None
# listen for selected data item changes
self.__focused_data_item_changed_event_listener = document_controller.focused_data_item_changed_event.listen(self.__focused_data_item_changed)
# manually send the first data item changed message to set things up.
self.__focused_display_item_changed(document_controller.selected_display_item)
def close(self):
# disconnect data item binding
self.__focused_display_item_changed(None)
self.__focused_display_item_changed_event_listener.close()
self.__focused_display_item_changed_event_listener = None
super().close()
@property
def value(self):
return self.__value
def __focused_display_item_changed(self, display_item: typing.Optional[DisplayItem.DisplayItem]) -> None:
data_item = display_item.data_item if display_item else None
if data_item != self.__value:
self.__value = data_item
self.value_stream.fire(data_item)
class TargetDisplayItemStream(Stream.AbstractStream):
def __init__(self, document_controller):
super().__init__()
# outgoing messages
self.value_stream = Event.Event()
# cached values
self.__value = None
# listen for selected data item changes
self.__focused_display_item_changed_event_listener = document_controller.focused_display_item_changed_event.listen(self.__focused_display_item_changed)
# manually send the first data item changed message to set things up.
self.__focused_display_item_changed(document_controller.selected_display_item)
def close(self):
# disconnect data item binding
self.__focused_display_item_changed(None)
self.__focused_display_item_changed_event_listener.close()
self.__focused_display_item_changed_event_listener = None
super().close()
@property
def value(self):
return self.__value
def __focused_display_item_changed(self, display_item: typing.Optional[DisplayItem.DisplayItem]) -> None:
if display_item != self.__value:
self.__value = display_item
self.value_stream.fire(display_item)
class TargetRegionStream(Stream.AbstractStream):
def __init__(self, display_item_stream):
super().__init__()
# outgoing messages
self.value_stream = Event.Event()
# references
self.__display_item_stream = display_item_stream.add_ref()
# initialize
self.__display_graphic_selection_changed_event_listener = None
self.__value = None
# listen for display changes
self.__display_stream_listener = display_item_stream.value_stream.listen(self.__display_item_changed)
self.__graphic_changed_event_listener = None
self.__graphic_about_to_be_removed_event_listener = None
self.__display_item_changed(display_item_stream.value)
def close(self):
self.__display_item_changed(None)
self.__display_stream_listener.close()
self.__display_stream_listener = None
self.__display_item_stream.remove_ref()
self.__display_item_stream = None
super().close()
@property
def value(self):
return self.__value
def __display_item_changed(self, display_item):
def display_graphic_selection_changed(graphic_selection):
current_index = graphic_selection.current_index
if current_index is not None:
new_value = display_item.graphics[current_index]
if new_value != self.__value:
self.__value = new_value
def graphic_changed():
self.value_stream.fire(self.__value)
def graphic_removed():
self.__value = None
self.value_stream.fire(None)
if self.__graphic_changed_event_listener:
self.__graphic_changed_event_listener.close()
self.__graphic_changed_event_listener = None
if self.__graphic_about_to_be_removed_event_listener:
self.__graphic_about_to_be_removed_event_listener.close()
self.__graphic_about_to_be_removed_event_listener = None
if self.__value:
self.__graphic_changed_event_listener = self.__value.graphic_changed_event.listen(graphic_changed)
self.__graphic_about_to_be_removed_event_listener = self.__value.about_to_be_removed_event.listen(graphic_removed)
graphic_changed()
elif self.__value is not None:
self.__value = None
if self.__graphic_changed_event_listener:
self.__graphic_changed_event_listener.close()
self.__graphic_changed_event_listener = None
if self.__graphic_about_to_be_removed_event_listener:
self.__graphic_about_to_be_removed_event_listener.close()
self.__graphic_about_to_be_removed_event_listener = None
self.value_stream.fire(None)
if self.__graphic_changed_event_listener:
self.__graphic_changed_event_listener.close()
self.__graphic_changed_event_listener = None
if self.__graphic_about_to_be_removed_event_listener:
self.__graphic_about_to_be_removed_event_listener.close()
self.__graphic_about_to_be_removed_event_listener = None
if self.__display_graphic_selection_changed_event_listener:
self.__display_graphic_selection_changed_event_listener.close()
self.__display_graphic_selection_changed_event_listener = None
if display_item:
self.__display_graphic_selection_changed_event_listener = display_item.graphic_selection_changed_event.listen(display_graphic_selection_changed)
display_graphic_selection_changed(display_item.graphic_selection)
elif self.__value is not None:
self.__value = None
self.value_stream.fire(None)
class StreamPropertyStream(Stream.ConcatStream):
def __init__(self, stream, property_name, cmp=None):
super().__init__(stream, lambda x: Stream.PropertyChangedEventStream(x, property_name, cmp))
class DisplayDataChannelTransientsStream(Stream.AbstractStream):
# TODO: add a display_data_changed to Display class and use it here
def __init__(self, display_data_channel_stream, property_name, cmp=None):
super().__init__()
# outgoing messages
self.value_stream = Event.Event()
# initialize
self.__property_name = property_name
self.__value = None
self.__display_values_changed_listener = None
self.__next_calculated_display_values_listener = None
self.__cmp = cmp if cmp else operator.eq
# listen for display changes
self.__display_data_channel_stream = display_data_channel_stream.add_ref()
self.__display_data_channel_stream_listener = display_data_channel_stream.value_stream.listen(self.__display_data_channel_changed)
self.__display_data_channel_changed(display_data_channel_stream.value)
def close(self):
self.__display_data_channel_changed(None)
self.__display_data_channel_stream_listener.close()
self.__display_data_channel_stream_listener = None
self.__display_data_channel_stream.remove_ref()
self.__display_data_channel_stream = None
super().close()
@property
def value(self):
return self.__value
def __display_data_channel_changed(self, display_data_channel):
def display_values_changed():
display_values = display_data_channel.get_calculated_display_values(True)
new_value = getattr(display_values, self.__property_name) if display_values else None
if not self.__cmp(new_value, self.__value):
self.__value = new_value
self.value_stream.fire(self.__value)
if self.__next_calculated_display_values_listener:
self.__next_calculated_display_values_listener.close()
self.__next_calculated_display_values_listener = None
if self.__display_values_changed_listener:
self.__display_values_changed_listener.close()
self.__display_values_changed_listener = None
if display_data_channel:
# there are two listeners - the first when new display properties have triggered new display values.
# the second whenever actual new display values arrive. this ensures the display gets updated after
# the user changes it. could use some rethinking.
self.__next_calculated_display_values_listener = display_data_channel.add_calculated_display_values_listener(display_values_changed)
self.__display_values_changed_listener = display_data_channel.display_values_changed_event.listen(display_values_changed)
display_values_changed()
else:
self.__value = None
self.value_stream.fire(None)
| gpl-3.0 | -7,656,245,261,976,129,000 | 44.968254 | 208 | 0.638467 | false |
leighpauls/k2cro4 | third_party/webdriver/pylib/test/selenium/webdriver/common/google_one_box.py | 51 | 1418 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium.common.exceptions import NoSuchElementException
from results_page import ResultsPage
from page_loader import require_loaded
class GoogleOneBox(object):
"""This class models a page that has a google search bar."""
def __init__(self, driver, url):
self._driver = driver
self._url = url
def is_loaded(self):
try :
self._driver.find_element_by_name("q")
return True
except NoSuchElementException:
return False
def load(self):
self._driver.get(self._url)
@require_loaded
def search_for(self, search_term):
element = self._driver.find_element_by_name("q")
element.send_keys(search_term)
element.submit()
return ResultsPage(self._driver)
| bsd-3-clause | -8,576,295,042,937,328,000 | 32.761905 | 74 | 0.688293 | false |
elit3ge/SickRage | sickbeard/providers/binsearch.py | 7 | 3930 | # Author: moparisthebest <[email protected]>
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import urllib
import re
import generic
from sickbeard import logger
from sickbeard import tvcache
class BinSearchProvider(generic.NZBProvider):
def __init__(self):
generic.NZBProvider.__init__(self, "BinSearch")
self.enabled = False
self.public = True
self.cache = BinSearchCache(self)
self.urls = {'base_url': 'https://www.binsearch.info/'}
self.url = self.urls['base_url']
def isEnabled(self):
return self.enabled
class BinSearchCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll Binsearch every 30 minutes max
self.minTime = 30
# compile and save our regular expressions
# this pulls the title from the URL in the description
self.descTitleStart = re.compile('^.*https?://www\.binsearch\.info/.b=')
self.descTitleEnd = re.compile('&.*$')
# these clean up the horrible mess of a title if the above fail
self.titleCleaners = [
re.compile('.?yEnc.?\(\d+/\d+\)$'),
re.compile(' \[\d+/\d+\] '),
]
def _get_title_and_url(self, item):
"""
Retrieves the title and URL data from the item XML node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns: A tuple containing two strings representing title and URL respectively
"""
title = item.get('description')
if title:
title = u'' + title
if self.descTitleStart.match(title):
title = self.descTitleStart.sub('', title)
title = self.descTitleEnd.sub('', title)
title = title.replace('+', '.')
else:
# just use the entire title, looks hard/impossible to parse
title = item.get('title')
if title:
for titleCleaner in self.titleCleaners:
title = titleCleaner.sub('', title)
url = item.get('link')
if url:
url = url.replace('&', '&')
return (title, url)
def updateCache(self):
# check if we should update
if not self.shouldUpdate():
return
# clear cache
self._clearCache()
# set updated
self.setLastUpdate()
cl = []
for group in ['alt.binaries.boneless','alt.binaries.misc','alt.binaries.hdtv','alt.binaries.hdtv.x264','alt.binaries.tv','alt.binaries.tvseries','alt.binaries.teevee']:
url = self.provider.url + 'rss.php?'
urlArgs = {'max': 1000,'g': group}
url += urllib.urlencode(urlArgs)
logger.log(u"BinSearch cache update URL: " + url, logger.DEBUG)
for item in self.getRSSFeed(url)['entries'] or []:
ci = self._parseItem(item)
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
def _checkAuth(self, data):
return data if data['feed'] and data['feed']['title'] != 'Invalid Link' else None
provider = BinSearchProvider()
| gpl-3.0 | -7,481,161,041,487,897,000 | 32.589744 | 176 | 0.599237 | false |
kchodorow/tensorflow | tensorflow/python/client/session_benchmark.py | 32 | 4750 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for interacting with the `tf.Session`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class SessionBenchmark(test.Benchmark):
"""Tests and benchmarks for interacting with the `tf.Session`."""
def _benchmarkFeed(self, name, target, size, iters):
"""Runs a microbenchmark to measure the cost of feeding a tensor.
Reports the median cost of feeding a tensor of `size` * `sizeof(float)`
bytes.
Args:
name: A human-readable name for logging the output.
target: The session target to use for the benchmark.
size: The number of floating-point numbers to be feed.
iters: The number of iterations to perform.
"""
feed_val = np.random.rand(size).astype(np.float32)
times = []
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=[size])
# Fetch the operation rather than the tensor, to avoid measuring the time
# to fetch back the value.
no_op = array_ops.identity(p).op
with session.Session(target) as sess:
sess.run(no_op, feed_dict={p: feed_val}) # Warm-up run.
for _ in xrange(iters):
start_time = time.time()
sess.run(no_op, feed_dict={p: feed_val})
end_time = time.time()
times.append(end_time - start_time)
print("%s %d %f" % (name, size, np.median(times)))
self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
def _benchmarkFetch(self, name, target, size, iters):
"""Runs a microbenchmark to measure the cost of fetching a tensor.
Reports the median cost of fetching a tensor of `size` * `sizeof(float)`
bytes.
Args:
name: A human-readable name for logging the output.
target: The session target to use for the benchmark.
size: The number of floating-point numbers to be fetched.
iters: The number of iterations to perform.
"""
times = []
with ops.Graph().as_default():
# Define the tensor to be fetched as a variable, to avoid
# constant-folding.
v = variables.Variable(random_ops.random_normal([size]))
with session.Session(target) as sess:
sess.run(v.initializer)
sess.run(v) # Warm-up run.
for _ in xrange(iters):
start_time = time.time()
sess.run(v)
end_time = time.time()
times.append(end_time - start_time)
print("%s %d %f" % (name, size, np.median(times)))
self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
def benchmarkGrpcSession(self):
server = server_lib.Server.create_local_server()
self._benchmarkFeed("benchmark_session_feed_grpc_4B", server.target, 1,
10000)
session.Session.reset(server.target)
self._benchmarkFeed("benchmark_session_feed_grpc_4MB", server.target, 1
<< 20, 100)
session.Session.reset(server.target)
self._benchmarkFetch("benchmark_session_fetch_grpc_4B", server.target, 1,
20000)
session.Session.reset(server.target)
self._benchmarkFetch("benchmark_session_fetch_grpc_4MB", server.target, 1
<< 20, 100)
session.Session.reset(server.target)
def benchmarkDirectSession(self):
self._benchmarkFeed("benchmark_session_feed_direct_4B", "", 1, 5000)
self._benchmarkFeed("benchmark_session_feed_direct_4MB", "", 1 << 20, 200)
self._benchmarkFetch("benchmark_session_fetch_direct_4B", "", 1, 5000)
self._benchmarkFetch("benchmark_session_fetch_direct_4MB", "", 1 << 20, 100)
if __name__ == "__main__":
test.main()
| apache-2.0 | -6,427,539,207,456,145,000 | 39.254237 | 80 | 0.665263 | false |
tatsuy/ardupilot | Tools/LogAnalyzer/tests/TestDualGyroDrift.py | 73 | 5485 | from __future__ import print_function
from LogAnalyzer import Test,TestResult
import DataflashLog
# import scipy
# import pylab #### TEMP!!! only for dev
# from scipy import signal
class TestDualGyroDrift(Test):
'''test for gyro drift between dual IMU data'''
def __init__(self):
Test.__init__(self)
self.name = "Gyro Drift"
self.enable = False
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
# if "IMU" not in logdata.channels or "IMU2" not in logdata.channels:
# self.result.status = TestResult.StatusType.NA
# return
# imuX = logdata.channels["IMU"]["GyrX"].listData
# imu2X = logdata.channels["IMU2"]["GyrX"].listData
# # NOTE: weird thing about Holger's log is that the counts of IMU+IMU2 are different
# print("length 1: %.2f, length 2: %.2f" % (len(imuX),len(imu2X)))
# #assert(len(imuX) == len(imu2X))
# # divide the curve into segments and get the average of each segment
# # we will get the diff between those averages, rather than a per-sample diff as the IMU+IMU2 arrays are often not the same length
# diffThresholdWARN = 0.03
# diffThresholdFAIL = 0.05
# nSamples = 10
# imu1XAverages, imu1YAverages, imu1ZAverages, imu2XAverages, imu2YAverages, imu2ZAverages = ([],[],[],[],[],[])
# imuXDiffAverages, imuYDiffAverages, imuZDiffAverages = ([],[],[])
# maxDiffX, maxDiffY, maxDiffZ = (0,0,0)
# sliceLength1 = len(logdata.channels["IMU"]["GyrX"].dictData.values()) / nSamples
# sliceLength2 = len(logdata.channels["IMU2"]["GyrX"].dictData.values()) / nSamples
# for i in range(0,nSamples):
# imu1XAverages.append(numpy.mean(logdata.channels["IMU"]["GyrX"].dictData.values()[i*sliceLength1:i*sliceLength1+sliceLength1]))
# imu1YAverages.append(numpy.mean(logdata.channels["IMU"]["GyrY"].dictData.values()[i*sliceLength1:i*sliceLength1+sliceLength1]))
# imu1ZAverages.append(numpy.mean(logdata.channels["IMU"]["GyrZ"].dictData.values()[i*sliceLength1:i*sliceLength1+sliceLength1]))
# imu2XAverages.append(numpy.mean(logdata.channels["IMU2"]["GyrX"].dictData.values()[i*sliceLength2:i*sliceLength2+sliceLength2]))
# imu2YAverages.append(numpy.mean(logdata.channels["IMU2"]["GyrY"].dictData.values()[i*sliceLength2:i*sliceLength2+sliceLength2]))
# imu2ZAverages.append(numpy.mean(logdata.channels["IMU2"]["GyrZ"].dictData.values()[i*sliceLength2:i*sliceLength2+sliceLength2]))
# imuXDiffAverages.append(imu2XAverages[-1]-imu1XAverages[-1])
# imuYDiffAverages.append(imu2YAverages[-1]-imu1YAverages[-1])
# imuZDiffAverages.append(imu2ZAverages[-1]-imu1ZAverages[-1])
# if abs(imuXDiffAverages[-1]) > maxDiffX:
# maxDiffX = imuXDiffAverages[-1]
# if abs(imuYDiffAverages[-1]) > maxDiffY:
# maxDiffY = imuYDiffAverages[-1]
# if abs(imuZDiffAverages[-1]) > maxDiffZ:
# maxDiffZ = imuZDiffAverages[-1]
# if max(maxDiffX,maxDiffY,maxDiffZ) > diffThresholdFAIL:
# self.result.status = TestResult.StatusType.FAIL
# self.result.statusMessage = "IMU/IMU2 gyro averages differ by more than %s radians" % diffThresholdFAIL
# elif max(maxDiffX,maxDiffY,maxDiffZ) > diffThresholdWARN:
# self.result.status = TestResult.StatusType.WARN
# self.result.statusMessage = "IMU/IMU2 gyro averages differ by more than %s radians" % diffThresholdWARN
# # pylab.plot(zip(*imuX)[0], zip(*imuX)[1], 'g')
# # pylab.plot(zip(*imu2X)[0], zip(*imu2X)[1], 'r')
# #pylab.plot(range(0,(nSamples*sliceLength1),sliceLength1), imu1ZAverages, 'b')
# print("Gyro averages1X: " + repr(imu1XAverages))
# print("Gyro averages1Y: " + repr(imu1YAverages))
# print("Gyro averages1Z: " + repr(imu1ZAverages) + "\n")
# print("Gyro averages2X: " + repr(imu2XAverages))
# print("Gyro averages2Y: " + repr(imu2YAverages))
# print("Gyro averages2Z: " + repr(imu2ZAverages) + "\n")
# print("Gyro averages diff X: " + repr(imuXDiffAverages))
# print("Gyro averages diff Y: " + repr(imuYDiffAverages))
# print("Gyro averages diff Z: " + repr(imuZDiffAverages))
# # lowpass filter using numpy
# # cutoff = 100
# # fs = 10000.0
# # b,a = scipy.signal.filter_design.butter(5,cutoff/(fs/2))
# # imuXFiltered = scipy.signal.filtfilt(b,a,zip(*imuX)[1])
# # imu2XFiltered = scipy.signal.filtfilt(b,a,zip(*imu2X)[1])
# #pylab.plot(imuXFiltered, 'r')
# # TMP: DISPLAY BEFORE+AFTER plots
# pylab.show()
# # print("imuX average before lowpass filter: %.8f" % logdata.channels["IMU"]["GyrX"].avg())
# # print("imuX average after lowpass filter: %.8f" % numpy.mean(imuXFiltered))
# # print("imu2X average before lowpass filter: %.8f" % logdata.channels["IMU2"]["GyrX"].avg())
# # print("imu2X average after lowpass filter: %.8f" % numpy.mean(imu2XFiltered))
# avg1X = logdata.channels["IMU"]["GyrX"].avg()
# avg1Y = logdata.channels["IMU"]["GyrY"].avg()
# avg1Z = logdata.channels["IMU"]["GyrZ"].avg()
# avg2X = logdata.channels["IMU2"]["GyrX"].avg()
# avg2Y = logdata.channels["IMU2"]["GyrY"].avg()
# avg2Z = logdata.channels["IMU2"]["GyrZ"].avg()
# avgRatioX = (max(avg1X,avg2X) - min(avg1X,avg2X)) / #abs(max(avg1X,avg2X) / min(avg1X,avg2X))
# avgRatioY = abs(max(avg1Y,avg2Y) / min(avg1Y,avg2Y))
# avgRatioZ = abs(max(avg1Z,avg2Z) / min(avg1Z,avg2Z))
# self.result.statusMessage = "IMU gyro avg: %.4f,%.4f,%.4f\nIMU2 gyro avg: %.4f,%.4f,%.4f\nAvg ratio: %.4f,%.4f,%.4f" % (avg1X,avg1Y,avg1Z, avg2X,avg2Y,avg2Z, avgRatioX,avgRatioY,avgRatioZ)
| gpl-3.0 | -2,669,074,225,204,437,500 | 44.330579 | 193 | 0.687511 | false |
fragro/django-postman | setup.py | 6 | 1183 | from setuptools import setup, find_packages
setup(
name='django-postman',
version=__import__('postman').__version__,
description='User-to-User messaging system for Django, with gateway to AnonymousUser,' \
' moderation and thread management, user & exchange filters, inbox/sent/archives/trash folders,' \
' support for apps: auto-complete, notification, mailer.',
long_description=open('docs/index.rst').read().split('\n----\n', 1)[0],
author='Patrick Samson',
author_email='[email protected]',
url='http://bitbucket.org/psam/django-postman/overview',
license='BSD',
packages=find_packages(exclude=('docs',)),
include_package_data=True,
keywords='django messages messaging email moderation',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications :: Email',
],
install_requires=[
'Django',
],
)
| bsd-3-clause | 622,612,458,379,260,200 | 37.16129 | 106 | 0.640744 | false |
konstruktoid/ansible-upstream | lib/ansible/modules/monitoring/icinga2_feature.py | 89 | 4294 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Loic Blot <[email protected]>
# Copyright (c) 2018, Ansible Project
# Sponsored by Infopro Digital. http://www.infopro-digital.com/
# Sponsored by E.T.A.I. http://www.etai.fr/
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: icinga2_feature
short_description: Manage Icinga2 feature
description:
- This module can be used to enable or disable an Icinga2 feature.
version_added: "2.3"
author: "Loic Blot (@nerzhul)"
options:
name:
description:
- This is the feature name to enable or disable.
required: True
state:
description:
- If set to C(present) and feature is disabled, then feature is enabled.
- If set to C(present) and feature is already enabled, then nothing is changed.
- If set to C(absent) and feature is enabled, then feature is disabled.
- If set to C(absent) and feature is already disabled, then nothing is changed.
choices: [ "present", "absent" ]
default: present
'''
EXAMPLES = '''
- name: Enable ido-pgsql feature
icinga2_feature:
name: ido-pgsql
state: present
- name: Disable api feature
icinga2_feature:
name: api
state: absent
'''
RETURN = '''
#
'''
import re
from ansible.module_utils.basic import AnsibleModule
class Icinga2FeatureHelper:
def __init__(self, module):
self.module = module
self._icinga2 = module.get_bin_path('icinga2', True)
self.feature_name = self.module.params['name']
self.state = self.module.params['state']
def _exec(self, args):
cmd = [self._icinga2, 'feature']
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
return rc, out
def manage(self):
rc, out = self._exec(["list"])
if rc != 0:
self.module.fail_json(msg="Unable to list icinga2 features. "
"Ensure icinga2 is installed and present in binary path.")
# If feature is already in good state, just exit
if (re.search("Disabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "absent") or \
(re.search("Enabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "present"):
self.module.exit_json(changed=False)
if self.module.check_mode:
self.module.exit_json(changed=True)
feature_enable_str = "enable" if self.state == "present" else "disable"
rc, out = self._exec([feature_enable_str, self.feature_name])
change_applied = False
if self.state == "present":
if rc != 0:
self.module.fail_json(msg="Failed to %s feature %s."
" icinga2 command returned %s" % (feature_enable_str,
self.feature_name,
out))
if re.search("already enabled", out) is None:
change_applied = True
else:
if rc == 0:
change_applied = True
# RC is not 0 for this already disabled feature, handle it as no change applied
elif re.search("Cannot disable feature '%s'. Target file .* does not exist" % self.feature_name, out):
change_applied = False
else:
self.module.fail_json(msg="Failed to disable feature. Command returns %s" % out)
self.module.exit_json(changed=change_applied)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', choices=["present", "absent"], default="present")
),
supports_check_mode=True
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
Icinga2FeatureHelper(module).manage()
if __name__ == '__main__':
main()
| gpl-3.0 | 261,821,691,930,976,100 | 31.530303 | 114 | 0.585235 | false |
Distrotech/intellij-community | python/lib/Lib/dummy_threading.py | 102 | 2900 | """Faux ``threading`` version using ``dummy_thread`` instead of ``thread``.
The module ``_dummy_threading`` is added to ``sys.modules`` in order
to not have ``threading`` considered imported. Had ``threading`` been
directly imported it would have made all subsequent imports succeed
regardless of whether ``thread`` was available which is not desired.
:Author: Brett Cannon
:Contact: [email protected]
XXX: Try to get rid of ``_dummy_threading``.
"""
from sys import modules as sys_modules
import dummy_thread
# Declaring now so as to not have to nest ``try``s to get proper clean-up.
holding_thread = False
holding_threading = False
holding__threading_local = False
try:
# Could have checked if ``thread`` was not in sys.modules and gone
# a different route, but decided to mirror technique used with
# ``threading`` below.
if 'thread' in sys_modules:
held_thread = sys_modules['thread']
holding_thread = True
# Must have some module named ``thread`` that implements its API
# in order to initially import ``threading``.
sys_modules['thread'] = sys_modules['dummy_thread']
if 'threading' in sys_modules:
# If ``threading`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held_threading = sys_modules['threading']
holding_threading = True
del sys_modules['threading']
if '_threading_local' in sys_modules:
# If ``_threading_local`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held__threading_local = sys_modules['_threading_local']
holding__threading_local = True
del sys_modules['_threading_local']
import threading
# Need a copy of the code kept somewhere...
sys_modules['_dummy_threading'] = sys_modules['threading']
del sys_modules['threading']
sys_modules['_dummy__threading_local'] = sys_modules['_threading_local']
del sys_modules['_threading_local']
from _dummy_threading import *
from _dummy_threading import __all__
finally:
# Put back ``threading`` if we overwrote earlier
if holding_threading:
sys_modules['threading'] = held_threading
del held_threading
del holding_threading
# Put back ``_threading_local`` if we overwrote earlier
if holding__threading_local:
sys_modules['_threading_local'] = held__threading_local
del held__threading_local
del holding__threading_local
# Put back ``thread`` if we overwrote, else del the entry we made
if holding_thread:
sys_modules['thread'] = held_thread
del held_thread
else:
del sys_modules['thread']
del holding_thread
del dummy_thread
del sys_modules
| apache-2.0 | 7,024,737,521,234,688,000 | 33.939759 | 76 | 0.674138 | false |
dssg/cincinnati2015-public | evaluation/webapp/evaluation.py | 1 | 5838 | from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import metrics
from webapp import config
def weighted_f1(scores):
f1_0 = scores["f1"][0] * scores["support"][0]
f1_1 = scores["f1"][1] * scores["support"][1]
return (f1_0 + f1_1) / (scores["support"][0] + scores["support"][1])
def plot_normalized_confusion_matrix(labels, predictions):
cutoff = 0.5
predictions_binary = np.copy(predictions)
predictions_binary[predictions_binary >= cutoff] = 1
predictions_binary[predictions_binary < cutoff] = 0
cm = metrics.confusion_matrix(labels, predictions_binary)
np.set_printoptions(precision=2)
fig = plt.figure()
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
target_names = ["No violation", "Violation"]
plt.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Normalized Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return fig
def plot_feature_importances(feature_names, feature_importances):
importances = list(zip(feature_names, list(feature_importances)))
importances = pd.DataFrame(importances, columns=["Feature", "Importance"])
importances = importances.set_index("Feature")
importances = importances.sort(columns="Importance", ascending=False)
importances = importances[0:20]
with plt.style.context(('ggplot')):
fig, ax = plt.subplots()
importances.plot(kind="barh", legend=False, ax=ax)
plt.tight_layout()
plt.title("Feature importances (Top 20)")
return fig
def plot_growth(results):
results = pd.DataFrame(results, columns=["date", "score"])
results = results.set_index("date")
results["score"] = results["score"].astype(float)
results = results.reindex(pd.date_range(datetime(2015, 7, 28), datetime(2015, 8, 27)))
results["random"] = pd.Series(3409/float(6124), index=results.index)
with plt.style.context(('ggplot')):
fig, ax = plt.subplots(figsize=(8, 3))
results["score"].plot(legend=False, ax=ax, marker="x")
results["random"].plot(legend=False, ax=ax, style='--')
ax.set_ylabel(config.score_name)
plt.tight_layout()
ax.set_ylim(0.5, 1.0)
return fig
def precision_at_x_percent(test_labels, test_predictions, x_percent=0.01, return_cutoff=False):
cutoff_index = int(len(test_predictions) * x_percent)
cutoff_index = min(cutoff_index, len(test_predictions) -1)
sorted_by_probability = np.sort(test_predictions)[::-1]
cutoff_probability = sorted_by_probability[cutoff_index]
test_predictions_binary = np.copy(test_predictions)
test_predictions_binary[test_predictions_binary >= cutoff_probability] = 1
test_predictions_binary[test_predictions_binary < cutoff_probability] = 0
precision, _, _, _ = metrics.precision_recall_fscore_support(test_labels, test_predictions_binary)
precision = precision[1] # only interested in precision for label 1
if return_cutoff:
return precision, cutoff_probability
else:
return precision
def plot_precision_recall_n(test_labels, test_predictions):
y_score = test_predictions
precision_curve, recall_curve, pr_thresholds = metrics.precision_recall_curve(test_labels, y_score)
precision_curve = precision_curve[:-1]
recall_curve = recall_curve[:-1]
pct_above_per_thresh = []
number_scored = len(y_score)
for value in pr_thresholds:
num_above_thresh = len(y_score[y_score>=value])
pct_above_thresh = num_above_thresh / float(number_scored)
pct_above_per_thresh.append(pct_above_thresh)
pct_above_per_thresh = np.array(pct_above_per_thresh)
with plt.style.context(('ggplot')):
plt.clf()
fig, ax1 = plt.subplots()
ax1.plot(pct_above_per_thresh, precision_curve, "#000099")
ax1.set_xlabel('percent of population')
ax1.set_ylabel('precision', color="#000099")
plt.ylim([0.0, 1.0])
ax2 = ax1.twinx()
ax2.plot(pct_above_per_thresh, recall_curve, "#CC0000")
ax2.set_ylabel('recall', color="#CC0000")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.title("Precision-recall for top x%")
return fig
def plot_precision_cutoff(test_labels, test_predictions):
percent_range = [0.001* i for i in range(1, 10)] + [0.01 * i for i in range(1, 101)]
precisions_and_cutoffs = [precision_at_x_percent(test_labels, test_predictions, x_percent=p, return_cutoff=True)
for p in percent_range]
precisions, cutoffs = zip(*precisions_and_cutoffs)
with plt.style.context(('ggplot')):
fig, ax = plt.subplots()
ax.plot(percent_range, precisions, "#000099")
ax.set_xlabel('percent of population')
ax.set_ylabel('precision', color="#000099")
plt.ylim([0.0, 1.0])
ax2 = ax.twinx()
ax2.plot(percent_range, cutoffs, "#CC0000")
ax2.set_ylabel('cutoff at', color="#CC0000")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.title("Precision at x%")
return fig
def plot_ROC(test_labels, test_predictions):
fpr, tpr, thresholds = metrics.roc_curve(test_labels, test_predictions, pos_label=1)
with plt.style.context(('ggplot')):
fig, ax = plt.subplots()
ax.plot(fpr[2], tpr[2])
#ax.plot([0, 1], [0, 1], 'k--')
#plt.xlim([0.0, 1.0])
#plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
return fig
| mit | -4,625,434,244,903,208,000 | 36.909091 | 116 | 0.651936 | false |
oihane/odoo | openerp/addons/base/tests/test_orm.py | 49 | 18125 | from collections import defaultdict
from openerp.tools import mute_logger
from openerp.tests import common
UID = common.ADMIN_USER_ID
DB = common.DB
class TestORM(common.TransactionCase):
""" test special behaviors of ORM CRUD functions
TODO: use real Exceptions types instead of Exception """
def setUp(self):
super(TestORM, self).setUp()
cr, uid = self.cr, self.uid
self.partner = self.registry('res.partner')
self.users = self.registry('res.users')
self.p1 = self.partner.name_create(cr, uid, 'W')[0]
self.p2 = self.partner.name_create(cr, uid, 'Y')[0]
self.ir_rule = self.registry('ir.rule')
# sample unprivileged user
employee_gid = self.ref('base.group_user')
self.uid2 = self.users.create(cr, uid, {'name': 'test user', 'login': 'test', 'groups_id': [4,employee_gid]})
@mute_logger('openerp.models')
def testAccessDeletedRecords(self):
""" Verify that accessing deleted records works as expected """
cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
self.partner.unlink(cr, uid, [p1])
# read() is expected to skip deleted records because our API is not
# transactional for a sequence of search()->read() performed from the
# client-side... a concurrent deletion could therefore cause spurious
# exceptions even when simply opening a list view!
# /!\ Using unprileged user to detect former side effects of ir.rules!
self.assertEqual([{'id': p2, 'name': 'Y'}], self.partner.read(cr, uid2, [p1,p2], ['name']), "read() should skip deleted records")
self.assertEqual([], self.partner.read(cr, uid2, [p1], ['name']), "read() should skip deleted records")
# Deleting an already deleted record should be simply ignored
self.assertTrue(self.partner.unlink(cr, uid, [p1]), "Re-deleting should be a no-op")
# Updating an already deleted record should raise, even as admin
with self.assertRaises(Exception):
self.partner.write(cr, uid, [p1], {'name': 'foo'})
@mute_logger('openerp.models')
def testAccessFilteredRecords(self):
""" Verify that accessing filtered records works as expected for non-admin user """
cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
partner_model = self.registry('ir.model').search(cr, uid, [('model','=','res.partner')])[0]
self.ir_rule.create(cr, uid, {'name': 'Y is invisible',
'domain_force': [('id', '!=', p1)],
'model_id': partner_model})
# search as unprivileged user
partners = self.partner.search(cr, uid2, [])
self.assertFalse(p1 in partners, "W should not be visible...")
self.assertTrue(p2 in partners, "... but Y should be visible")
# read as unprivileged user
with self.assertRaises(Exception):
self.partner.read(cr, uid2, [p1], ['name'])
# write as unprivileged user
with self.assertRaises(Exception):
self.partner.write(cr, uid2, [p1], {'name': 'foo'})
# unlink as unprivileged user
with self.assertRaises(Exception):
self.partner.unlink(cr, uid2, [p1])
# Prepare mixed case
self.partner.unlink(cr, uid, [p2])
# read mixed records: some deleted and some filtered
with self.assertRaises(Exception):
self.partner.read(cr, uid2, [p1,p2], ['name'])
# delete mixed records: some deleted and some filtered
with self.assertRaises(Exception):
self.partner.unlink(cr, uid2, [p1,p2])
def test_multi_read(self):
record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
records = self.partner.read(self.cr, UID, [record_id])
self.assertIsInstance(records, list)
def test_one_read(self):
record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
record = self.partner.read(self.cr, UID, record_id)
self.assertIsInstance(record, dict)
@mute_logger('openerp.models')
def test_search_read(self):
# simple search_read
self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
found = self.partner.search_read(self.cr, UID, [['name', '=', 'MyPartner1']], ['name'])
self.assertEqual(len(found), 1)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertTrue('id' in found[0])
# search_read correct order
self.partner.create(self.cr, UID, {'name': 'MyPartner2'})
found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertEqual(found[1]['name'], 'MyPartner2')
found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name desc")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner2')
self.assertEqual(found[1]['name'], 'MyPartner1')
# search_read that finds nothing
found = self.partner.search_read(self.cr, UID, [['name', '=', 'Does not exists']], ['name'])
self.assertEqual(len(found), 0)
def test_exists(self):
partner = self.partner.browse(self.cr, UID, [])
# check that records obtained from search exist
recs = partner.search([])
self.assertTrue(recs)
self.assertEqual(recs.exists(), recs)
# check that there is no record with id 0
recs = partner.browse([0])
self.assertFalse(recs.exists())
def test_groupby_date(self):
partners = dict(
A='2012-11-19',
B='2012-12-17',
C='2012-12-31',
D='2013-01-07',
E='2013-01-14',
F='2013-01-28',
G='2013-02-11',
)
all_partners = []
partners_by_day = defaultdict(set)
partners_by_month = defaultdict(set)
partners_by_year = defaultdict(set)
for name, date in partners.items():
p = self.partner.create(self.cr, UID, dict(name=name, date=date))
all_partners.append(p)
partners_by_day[date].add(p)
partners_by_month[date.rsplit('-', 1)[0]].add(p)
partners_by_year[date.split('-', 1)[0]].add(p)
def read_group(interval, domain=None):
main_domain = [('id', 'in', all_partners)]
if domain:
domain = ['&'] + main_domain + domain
else:
domain = main_domain
rg = self.partner.read_group(self.cr, self.uid, domain, ['date'], 'date' + ':' + interval)
result = {}
for r in rg:
result[r['date:' + interval]] = set(self.partner.search(self.cr, self.uid, r['__domain']))
return result
self.assertEqual(len(read_group('day')), len(partners_by_day))
self.assertEqual(len(read_group('month')), len(partners_by_month))
self.assertEqual(len(read_group('year')), len(partners_by_year))
rg = self.partner.read_group(self.cr, self.uid, [('id', 'in', all_partners)],
['date'], ['date:month', 'date:day'], lazy=False)
self.assertEqual(len(rg), len(all_partners))
class TestInherits(common.TransactionCase):
""" test the behavior of the orm for models that use _inherits;
specifically: res.users, that inherits from res.partner
"""
def setUp(self):
super(TestInherits, self).setUp()
self.partner = self.registry('res.partner')
self.user = self.registry('res.users')
def test_default(self):
""" `default_get` cannot return a dictionary or a new id """
defaults = self.user.default_get(self.cr, UID, ['partner_id'])
if 'partner_id' in defaults:
self.assertIsInstance(defaults['partner_id'], (bool, int, long))
def test_create(self):
""" creating a user should automatically create a new partner """
partners_before = self.partner.search(self.cr, UID, [])
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo = self.user.browse(self.cr, UID, foo_id)
self.assertNotIn(foo.partner_id.id, partners_before)
def test_create_with_ancestor(self):
""" creating a user with a specific 'partner_id' should not create a new partner """
par_id = self.partner.create(self.cr, UID, {'name': 'Foo'})
partners_before = self.partner.search(self.cr, UID, [])
foo_id = self.user.create(self.cr, UID, {'partner_id': par_id, 'login': 'foo', 'password': 'foo'})
partners_after = self.partner.search(self.cr, UID, [])
self.assertEqual(set(partners_before), set(partners_after))
foo = self.user.browse(self.cr, UID, foo_id)
self.assertEqual(foo.name, 'Foo')
self.assertEqual(foo.partner_id.id, par_id)
@mute_logger('openerp.models')
def test_read(self):
""" inherited fields should be read without any indirection """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo_values, = self.user.read(self.cr, UID, [foo_id])
partner_id = foo_values['partner_id'][0]
partner_values, = self.partner.read(self.cr, UID, [partner_id])
self.assertEqual(foo_values['name'], partner_values['name'])
foo = self.user.browse(self.cr, UID, foo_id)
self.assertEqual(foo.name, foo.partner_id.name)
@mute_logger('openerp.models')
def test_copy(self):
""" copying a user should automatically copy its partner, too """
foo_id = self.user.create(self.cr, UID, {
'name': 'Foo',
'login': 'foo',
'password': 'foo',
'supplier': True,
})
foo_before, = self.user.read(self.cr, UID, [foo_id])
del foo_before['__last_update']
bar_id = self.user.copy(self.cr, UID, foo_id, {
'login': 'bar',
'password': 'bar',
})
foo_after, = self.user.read(self.cr, UID, [foo_id])
del foo_after['__last_update']
self.assertEqual(foo_before, foo_after)
foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
self.assertEqual(bar.name, 'Foo (copy)')
self.assertEqual(bar.login, 'bar')
self.assertEqual(foo.supplier, bar.supplier)
self.assertNotEqual(foo.id, bar.id)
self.assertNotEqual(foo.partner_id.id, bar.partner_id.id)
@mute_logger('openerp.models')
def test_copy_with_ancestor(self):
""" copying a user with 'parent_id' in defaults should not duplicate the partner """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo',
'login_date': '2016-01-01', 'signature': 'XXX'})
par_id = self.partner.create(self.cr, UID, {'name': 'Bar'})
foo_before, = self.user.read(self.cr, UID, [foo_id])
del foo_before['__last_update']
partners_before = self.partner.search(self.cr, UID, [])
bar_id = self.user.copy(self.cr, UID, foo_id, {'partner_id': par_id, 'login': 'bar'})
foo_after, = self.user.read(self.cr, UID, [foo_id])
del foo_after['__last_update']
partners_after = self.partner.search(self.cr, UID, [])
self.assertEqual(foo_before, foo_after)
self.assertEqual(set(partners_before), set(partners_after))
foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
self.assertNotEqual(foo.id, bar.id)
self.assertEqual(bar.partner_id.id, par_id)
self.assertEqual(bar.login, 'bar', "login is given from copy parameters")
self.assertFalse(bar.login_date, "login_date should not be copied from original record")
self.assertEqual(bar.name, 'Bar', "name is given from specific partner")
self.assertEqual(bar.signature, foo.signature, "signature should be copied")
CREATE = lambda values: (0, False, values)
UPDATE = lambda id, values: (1, id, values)
DELETE = lambda id: (2, id, False)
FORGET = lambda id: (3, id, False)
LINK_TO = lambda id: (4, id, False)
DELETE_ALL = lambda: (5, False, False)
REPLACE_WITH = lambda ids: (6, False, ids)
def sorted_by_id(list_of_dicts):
"sort dictionaries by their 'id' field; useful for comparisons"
return sorted(list_of_dicts, key=lambda d: d.get('id'))
class TestO2MSerialization(common.TransactionCase):
""" test the orm method 'write' on one2many fields """
def setUp(self):
super(TestO2MSerialization, self).setUp()
self.partner = self.registry('res.partner')
def test_no_command(self):
" empty list of commands yields an empty list of records "
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [])
self.assertEqual(results, [])
def test_CREATE_commands(self):
" returns the VALUES dict as-is "
values = [{'foo': 'bar'}, {'foo': 'baz'}, {'foo': 'baq'}]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', map(CREATE, values))
self.assertEqual(results, values)
def test_LINK_TO_command(self):
" reads the records from the database, records are returned with their ids. "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = map(LINK_TO, ids)
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_bare_ids_command(self):
" same as the equivalent LINK_TO commands "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', ids, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_UPDATE_command(self):
" take the in-db records and merge the provided information in "
id_foo = self.partner.create(self.cr, UID, {'name': 'foo'})
id_bar = self.partner.create(self.cr, UID, {'name': 'bar'})
id_baz = self.partner.create(self.cr, UID, {'name': 'baz', 'city': 'tag'})
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [
LINK_TO(id_foo),
UPDATE(id_bar, {'name': 'qux', 'city': 'tagtag'}),
UPDATE(id_baz, {'name': 'quux'})
], ['name', 'city'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': id_foo, 'name': 'foo', 'city': False},
{'id': id_bar, 'name': 'qux', 'city': 'tagtag'},
{'id': id_baz, 'name': 'quux', 'city': 'tag'}
]))
def test_DELETE_command(self):
" deleted records are not returned at all. "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = [DELETE(ids[0]), DELETE(ids[1]), DELETE(ids[2])]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(results, [])
def test_mixed_commands(self):
ids = [
self.partner.create(self.cr, UID, {'name': name})
for name in ['NObar', 'baz', 'qux', 'NOquux', 'NOcorge', 'garply']
]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [
CREATE({'name': 'foo'}),
UPDATE(ids[0], {'name': 'bar'}),
LINK_TO(ids[1]),
DELETE(ids[2]),
UPDATE(ids[3], {'name': 'quux',}),
UPDATE(ids[4], {'name': 'corge'}),
CREATE({'name': 'grault'}),
LINK_TO(ids[5])
], ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'name': 'foo'},
{'id': ids[0], 'name': 'bar'},
{'id': ids[1], 'name': 'baz'},
{'id': ids[3], 'name': 'quux'},
{'id': ids[4], 'name': 'corge'},
{'name': 'grault'},
{'id': ids[5], 'name': 'garply'}
]))
def test_LINK_TO_pairs(self):
"LINK_TO commands can be written as pairs, instead of triplets"
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = map(lambda id: (4, id), ids)
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_singleton_commands(self):
"DELETE_ALL can appear as a singleton"
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [DELETE_ALL()], ['name'])
self.assertEqual(results, [])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 8,808,555,461,813,095,000 | 41.151163 | 137 | 0.569766 | false |
anirudhSK/chromium | tools/telemetry/telemetry/core/platform/android_platform_backend_unittest.py | 2 | 3190 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import unittest
from telemetry import test
from telemetry.core import bitmap
from telemetry.core import util
from telemetry.core.platform import android_platform_backend
from telemetry.unittest import system_stub
class MockAdbCommands(object):
def __init__(self, mock_content, system_properties):
self.mock_content = mock_content
self.system_properties = system_properties
if self.system_properties.get('ro.product.cpu.abi') == None:
self.system_properties['ro.product.cpu.abi'] = 'armeabi-v7a'
def CanAccessProtectedFileContents(self):
return True
# pylint: disable=W0613
def GetProtectedFileContents(self, file_name):
return self.mock_content
def PushIfNeeded(self, host_binary, device_path):
pass
def RunShellCommand(self, command):
return []
class AndroidPlatformBackendTest(unittest.TestCase):
def setUp(self):
self._stubs = system_stub.Override(android_platform_backend,
['perf_control', 'thermal_throttle'])
def tearDown(self):
self._stubs.Restore()
@test.Disabled('chromeos')
def testGetCpuStats(self):
proc_stat_content = [
'7702 (.android.chrome) S 167 167 0 0 -1 1077936448 '
'3247 0 0 0 4 1 0 0 20 0 9 0 5603962 337379328 5867 '
'4294967295 1074458624 1074463824 3197495984 3197494152 '
'1074767676 0 4612 0 38136 4294967295 0 0 17 0 0 0 0 0 0 '
'1074470376 1074470912 1102155776']
adb_valid_proc_content = MockAdbCommands(proc_stat_content, {})
backend = android_platform_backend.AndroidPlatformBackend(
adb_valid_proc_content, False)
cpu_stats = backend.GetCpuStats('7702')
self.assertEquals(cpu_stats, {'CpuProcessTime': 5.0})
@test.Disabled('chromeos')
def testGetCpuStatsInvalidPID(self):
# Mock an empty /proc/pid/stat.
adb_empty_proc_stat = MockAdbCommands([], {})
backend = android_platform_backend.AndroidPlatformBackend(
adb_empty_proc_stat, False)
cpu_stats = backend.GetCpuStats('7702')
self.assertEquals(cpu_stats, {})
@test.Disabled
def testFramesFromMp4(self):
mock_adb = MockAdbCommands([])
backend = android_platform_backend.AndroidPlatformBackend(mock_adb, False)
try:
backend.InstallApplication('avconv')
finally:
if not backend.CanLaunchApplication('avconv'):
logging.warning('Test not supported on this platform')
return # pylint: disable=W0150
vid = os.path.join(util.GetUnittestDataDir(), 'vid.mp4')
expected_timestamps = [
0,
763,
783,
940,
1715,
1732,
1842,
1926,
]
# pylint: disable=W0212
for i, timestamp_bitmap in enumerate(backend._FramesFromMp4(vid)):
timestamp, bmp = timestamp_bitmap
self.assertEquals(timestamp, expected_timestamps[i])
expected_bitmap = bitmap.Bitmap.FromPngFile(os.path.join(
util.GetUnittestDataDir(), 'frame%d.png' % i))
self.assertTrue(expected_bitmap.IsEqual(bmp))
| bsd-3-clause | -3,807,787,277,986,968,000 | 31.55102 | 78 | 0.689655 | false |
hubert667/AIR | build/scripts-2.7/summarize-learning-experiment.py | 1 | 3971 | #!/usr/bin/python
# This file is part of Lerot.
#
# Lerot is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lerot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lerot. If not, see <http://www.gnu.org/licenses/>.
# KH, 2012/06/25
"""
Summarizes the output of an online learning experiment.
"""
try:
from include import *
except:
pass
import argparse
import gzip
import yaml
from numpy import cumsum, mean, std, zeros
# parse arguments
parser = argparse.ArgumentParser(
prog="python summarize-learning-experiment.py",
description="Summarize the output of an online learning experiment.")
parser.add_argument("-g", "--discount_factor", type=float, default=0.995,
help="Discount factor to apply when evaluating online performance.")
parser.add_argument("-f", "--fold_dirs", nargs="+", required=True,
help="List all directories that contain runs of different folds for the "
"current data set. Results will be averaged over all folds and runs.")
parser.add_argument("-s", "--file_ext", default="txt.gz",
help="File extension of the files in which run results are stored.")
parser.add_argument("-o", "--output_base", #required=True,
help="Filebase for output files. Output will be stored in OUTPUT_BASE.txt"
" (numbers) and OUTPUT_BASE_(online|offline).pdf (plots).")
args = parser.parse_args()
is_initialized = False
agg_online_ndcg = None
add_offline_ndcg = None
count_queries = 0
count_runs = 0
# for each fold and run
for fold in args.fold_dirs:
for filename in sorted(os.listdir(fold)):
if not filename.endswith(args.file_ext):
continue
filename = os.path.join(fold, filename)
if os.path.getsize(filename) == 0:
continue
if filename.endswith(".gz"):
fh = gzip.open(filename, "r")
else:
fh = open(filename, "r")
print >> sys.stderr, "Processing %s" % filename
count_runs += 1
# read data from output file
data = yaml.load(fh)
fh.close()
if not is_initialized:
count_queries = len(data["online_evaluation.NdcgEval"])
agg_online_ndcg = [ [] for i in range(count_queries) ]
agg_offline_ndcg = [ [] for i in range(count_queries) ]
is_initialized = True
# aggregate (and apply discounting)
# (i is the index of the query, i.e., i=3 means performance after the
# third query has been observed), the second index points to
# the run id
for i, value in enumerate(data["online_evaluation.NdcgEval"]):
prev = 0.0
if i > 0:
prev = agg_online_ndcg[i-1][-1]
# discount + cumsum
agg_online_ndcg[i].append(prev + args.discount_factor**i * value)
for i, value in enumerate(data["offline_test_evaluation.NdcgEval"]):
agg_offline_ndcg[i].append(value)
print >> sys.stderr, "Computing results for up to %d queries." % count_queries
print >> sys.stderr, "Averaging over %d folds and runs." % count_runs
# output gnuplot file:
# QUERY_COUNT OFFLINE_MEAN OFFLINE_STD ONLINE_MEAN ONLINE_STD
if args.output_base:
out_filename = "%s.txt" % args.output_base
out_fh = open(out_filename, "w")
else:
out_fh = sys.stdout
for i in range(count_queries):
print >> out_fh, "%d %.5f %.5f %.5f %.5f" % (i,
mean(agg_offline_ndcg[i]), std(agg_offline_ndcg[i]),
mean(agg_online_ndcg[i]), std(agg_online_ndcg[i]))
if args.output_base:
out_fh.close()
| gpl-3.0 | -5,586,761,019,017,056,000 | 36.462264 | 78 | 0.658776 | false |
devendermishrajio/nova_test_latest | doc/ext/nova_todo.py | 64 | 3385 | # -*- coding: utf-8 -*-
# This is a hack of the builtin todo extension, to make the todo_list
# more user friendly.
from sphinx.ext.todo import *
import re
def _(s):
return s
def process_todo_nodes(app, doctree, fromdocname):
if not app.config['todo_include_todos']:
for node in doctree.traverse(todo_node):
node.parent.remove(node)
# Replace all todolist nodes with a list of the collected todos.
# Augment each todo with a backlink to the original location.
env = app.builder.env
if not hasattr(env, 'todo_all_todos'):
env.todo_all_todos = []
# remove the item that was added in the constructor, since I'm tired of
# reading through docutils for the proper way to construct an empty list
lists = []
for i in range(5):
lists.append(nodes.bullet_list("", nodes.Text('', '')))
lists[i].remove(lists[i][0])
lists[i]['classes'].append('todo_list')
for node in doctree.traverse(todolist):
if not app.config['todo_include_todos']:
node.replace_self([])
continue
for todo_info in env.todo_all_todos:
para = nodes.paragraph()
# Create a reference
newnode = nodes.reference('', '')
filename = env.doc2path(todo_info['docname'], base=None)
link = (_('%(filename)s, line %(line_info)d') %
{'filename': filename, 'line_info': todo_info['lineno']})
innernode = nodes.emphasis(link, link)
newnode['refdocname'] = todo_info['docname']
try:
newnode['refuri'] = app.builder.get_relative_uri(
fromdocname, todo_info['docname'])
newnode['refuri'] += '#' + todo_info['target']['refid']
except NoUri:
# ignore if no URI can be determined, e.g. for LaTeX output
pass
newnode.append(innernode)
para += newnode
para['classes'].append('todo_link')
todo_entry = todo_info['todo']
env.resolve_references(todo_entry, todo_info['docname'],
app.builder)
item = nodes.list_item('', para)
todo_entry[1]['classes'].append('details')
comment = todo_entry[1]
m = re.match(r"^P(\d)", comment.astext())
priority = 5
if m:
priority = int(m.group(1))
if priority < 0:
priority = 1
if priority > 5:
priority = 5
item['classes'].append('todo_p' + str(priority))
todo_entry['classes'].append('todo_p' + str(priority))
item.append(comment)
lists[priority - 1].insert(0, item)
node.replace_self(lists)
def setup(app):
app.add_config_value('todo_include_todos', False, False)
app.add_node(todolist)
app.add_node(todo_node,
html=(visit_todo_node, depart_todo_node),
latex=(visit_todo_node, depart_todo_node),
text=(visit_todo_node, depart_todo_node))
app.add_directive('todo', Todo)
app.add_directive('todolist', TodoList)
app.connect('doctree-read', process_todos)
app.connect('doctree-resolved', process_todo_nodes)
app.connect('env-purge-doc', purge_todos)
| apache-2.0 | 8,875,676,542,032,268,000 | 31.548077 | 78 | 0.554505 | false |
zuoshifan/instimager | imager/telescope.py | 1 | 33995 | import abc
import numpy as np
import visibility
from util import util
from util import typeutil
from cora.util import hputil, units
from caput import config
def in_range(arr, min, max):
"""Check if array entries are within the given range.
Parameters
----------
arr : np.ndarray
Array to check.
min, max : scalar or np.ndarray
Minimum and maximum values to test against. Values can be in arrays
broadcastable against `arr`.
Returns
-------
val : boolean
True if all entries are within range.
"""
return (arr >= min).all() and (arr < max).all()
def out_of_range(arr, min, max):
return not in_range(arr, min, max)
def map_half_plane(arr):
arr = np.where((arr[:, 0] < 0.0)[:, np.newaxis], -arr, arr)
arr = np.where(np.logical_and(arr[:, 0] == 0.0, arr[:, 1] < 0.0)[:, np.newaxis], -arr, arr)
return arr
def _merge_keyarray(keys1, keys2, mask1=None, mask2=None):
tmask1 = mask1 if mask1 is not None else np.ones_like(keys1, dtype=np.bool)
tmask2 = mask2 if mask2 is not None else np.ones_like(keys2, dtype=np.bool)
# Merge two groups of feed arrays
cmask = np.logical_and(tmask1, tmask2)
ckeys = _remap_keyarray(keys1 + 1.0J * keys2, mask=cmask)
if mask1 is None and mask2 is None:
return ckeys
else:
return ckeys, cmask
def _remap_keyarray(keyarray, mask=None):
# Look through an array of keys and attach integer labels to each
# equivalent classes of keys (also take into account masking).
if mask is None:
mask = np.ones(keyarray.shape, np.bool)
ind = np.where(mask)
un, inv = np.unique(keyarray[ind], return_inverse=True)
fmap = -1*np.ones(keyarray.shape, dtype=np.int)
fmap[ind] = np.arange(un.size)[inv]
return fmap
def _get_indices(keyarray, mask=None):
# Return a pair of indices for each group of equivalent feed pairs
if mask is None:
mask = np.ones(keyarray.shape, np.bool)
wm = np.where(mask.ravel())[0]
keysflat = keyarray.ravel()[wm]
un, ind = np.unique(keysflat, return_index=True)
# CHANGE: np (< 1.6) does not support multiple indices in np.unravel_index
#upairs = np.array(np.unravel_index(wm[ind], keyarray.shape)).T
upairs = np.array([np.unravel_index(i1, keyarray.shape) for i1 in wm[ind] ])
#return np.sort(upairs, axis=-1) # Sort to ensure we are in upper triangle
return upairs
def max_lm(baselines, wavelengths, uwidth, vwidth=0.0):
"""Get the maximum (l,m) that a baseline is sensitive to.
Parameters
----------
baselines : np.ndarray
An array of baselines.
wavelengths : np.ndarray
An array of wavelengths.
uwidth : np.ndarray
Width of the receiver in the u-direction.
vwidth : np.ndarray
Width of the receiver in the v-direction.
Returns
-------
lmax, mmax : array_like
"""
umax = (np.abs(baselines[:, 0]) + uwidth) / wavelengths
vmax = (np.abs(baselines[:, 1]) + vwidth) / wavelengths
mmax = np.ceil(2 * np.pi * umax).astype(np.int64)
lmax = np.ceil((mmax**2 + (2*np.pi*vmax)**2)**0.5).astype(np.int64)
return lmax, mmax
def latlon_to_sphpol(latlon):
zenith = np.array([np.pi / 2.0 - np.radians(latlon[0]),
np.remainder(np.radians(latlon[1]), 2*np.pi)])
return zenith
class TransitTelescope(config.Reader):
"""Base class for simulating any transit interferometer.
This is an abstract class, and several methods must be implemented before it
is usable. These are:
* `feedpositions` - a property which contains the positions of all the feeds
* `_get_unique` - calculates which baselines are identical
* `_transfer_single` - calculate the beam transfer for a single baseline+freq
* `_make_matrix_array` - makes an array of the right size to hold the
transfer functions
* `_copy_transfer_into_single` - copy a single transfer matrix into a
collection.
The last two are required for supporting polarised beam functions.
Properties
----------
freq_lower, freq_higher : scalar
The lower / upper bound of the lowest / highest frequency bands.
num_freq : scalar
The number of frequency bands (only use for setting up the frequency
binning). Generally using `nfreq` is preferred.
tsys_flat : scalar
The system temperature (in K). Override `tsys` for anything more
sophisticated.
positive_m_only: boolean
Whether to only deal with half the `m` range. In many cases we are
much less sensitive to negative-m (depending on the hemisphere, and
baseline alignment). This does not affect the beams calculated, only
how they're used in further calculation. Default: False
minlength, maxlength : scalar
Minimum and maximum baseline lengths to include (in metres).
"""
__metaclass__ = abc.ABCMeta # Enforce Abstract class
# zenith = config.Property(proptype=latlon_to_sphpol, default=[45.0, 0.0])
zenith = config.Property(proptype=latlon_to_sphpol, default=[45.0, 10.0])
freq_lower = config.Property(proptype=typeutil.nonnegative_float, default=400.0)
freq_upper = config.Property(proptype=typeutil.nonnegative_float, default=800.0)
num_freq = config.Property(proptype=typeutil.positive_int, default=50)
tsys_flat = config.Property(proptype=typeutil.nonnegative_float, default=50.0, key='tsys')
ndays = config.Property(proptype=typeutil.positive_int, default=733)
accuracy_boost = config.Property(proptype=typeutil.positive_float, default=1.0)
l_boost = config.Property(proptype=typeutil.positive_float, default=1.0)
minlength = config.Property(proptype=typeutil.nonnegative_float, default=0.0)
maxlength = config.Property(proptype=typeutil.nonnegative_float, default=1.0e7)
auto_correlations = config.Property(proptype=bool, default=False)
# def __init__(self, latitude=45, longitude=0):
# """Initialise a telescope object.
# Parameters
# ----------
# latitude, longitude : scalar
# Position on the Earths surface of the telescope (in degrees).
# """
# # NOTE: latlon_to_sphpol is automatically applied on assignment
# self.zenith = [latitude, longitude]
def __getstate__(self):
state = self.__dict__.copy()
#delkeys = ['_baselines', '_redundancy', '_frequencies'] + self._extdelkeys
for key in self.__dict__:
#if (key in delkeys) or (key[0] == "_"):
if (key[0] == "_"):
del state[key]
return state
#========= Properties related to baselines =========
_baselines = None
@property
def baselines(self):
"""The unique baselines in the telescope. Packed as [[u1, v1], [u2, v2], ...]."""
if self._baselines == None:
self.calculate_feedpairs()
return self._baselines
_redundancy = None
@property
def redundancy(self):
"""The redundancy of each baseline (corresponds to entries in
cyl.baselines)."""
if self._redundancy == None:
self.calculate_feedpairs()
return self._redundancy
@property
def nbase(self):
"""The number of unique baselines."""
return self.npairs
@property
def npairs(self):
"""The number of unique feed pairs."""
return self.uniquepairs.shape[0]
_uniquepairs = None
@property
def uniquepairs(self):
"""An (npairs, 2) array of the feed pairs corresponding to each baseline."""
if self._uniquepairs == None:
self.calculate_feedpairs()
return self._uniquepairs
_feedmap = None
@property
def feedmap(self):
"""An (nfeed, nfeed) array giving the mapping between feedpairs and
the calculated baselines. Each entry is an index into the arrays of unique pairs."""
if self._feedmap == None:
self.calculate_feedpairs()
return self._feedmap
_feedmask = None
@property
def feedmask(self):
"""An (nfeed, nfeed) array giving the entries that have been
calculated. This allows to mask out pairs we want to ignore."""
if self._feedmask == None:
self.calculate_feedpairs()
return self._feedmask
_feedconj = None
@property
def feedconj(self):
"""An (nfeed, nfeed) array giving the feed pairs which must be complex
conjugated."""
if self._feedconj == None:
self.calculate_feedpairs()
return self._feedconj
#===================================================
#======== Properties related to frequencies ========
_frequencies = None
@property
def frequencies(self):
"""The centre of each frequency band (in MHz)."""
if self._frequencies == None:
self.calculate_frequencies()
return self._frequencies
def calculate_frequencies(self):
#self._frequencies = np.linspace(self.freq_lower, self.freq_upper, self.num_freq)
self._frequencies = self.freq_lower + (np.arange(self.num_freq) + 0.5) * ((self.freq_upper - self.freq_lower) / self.num_freq)
@property
def wavelengths(self):
"""The central wavelength of each frequency band (in metres)."""
return units.c / (1e6 * self.frequencies)
@property
def nfreq(self):
"""The number of frequency bins."""
return self.frequencies.shape[0]
#===================================================
#======== Properties related to the feeds ==========
@property
def nfeed(self):
"""The number of feeds."""
return self.feedpositions.shape[0]
#===================================================
#======= Properties related to polarisation ========
@property
def num_pol_sky(self):
"""The number of polarisation combinations on the sky that we are
considering. Should be either 1 (T=I only), 3 (T, Q, U) or 4 (T, Q, U and V).
"""
return self._npol_sky_
#===================================================
#===== Properties related to harmonic spread =======
@property
def lmax(self):
"""The maximum l the telescope is sensitive to."""
lmax, mmax = max_lm(self.baselines, self.wavelengths[-1], self.u_width, self.v_width)
return int(np.ceil(lmax.max() * self.l_boost))
@property
def mmax(self):
"""The maximum m the telescope is sensitive to."""
lmax, mmax = max_lm(self.baselines, self.wavelengths[-1], self.u_width, self.v_width)
return int(np.ceil(mmax.max() * self.l_boost))
#===================================================
#== Methods for calculating the unique baselines ===
def calculate_feedpairs(self):
"""Calculate all the unique feedpairs and their redundancies, and set
the internal state of the object.
"""
# Get unique pairs, and create mapping arrays
self._feedmap, self._feedmask = self._get_unique()
# Identify conjugate pairs
self._feedconj = np.tril(np.ones_like(self._feedmap), -1).astype(np.bool)
# Reorder and conjugate baselines such that the default feedpair
# points W->E (to ensure we use positive-m)
self._make_ew()
# Sort baselines into order
self._sort_pairs()
# Create mask of included pairs, that are not conjugated
tmask = np.logical_and(self._feedmask, np.logical_not(self._feedconj))
self._uniquepairs = _get_indices(self._feedmap, mask=tmask)
self._redundancy = np.bincount(self._feedmap[np.where(tmask)]) # Triangle mask to avoid double counting
self._baselines = self.feedpositions[self._uniquepairs[:, 0]] - self.feedpositions[self._uniquepairs[:, 1]]
def _make_ew(self):
# Reorder baselines pairs, such that the baseline vector always points E (or pure N)
tmask = np.logical_and(self._feedmask, np.logical_not(self._feedconj))
uniq = _get_indices(self._feedmap, mask=tmask)
for i in range(uniq.shape[0]):
sep = self.feedpositions[uniq[i, 0]] - self.feedpositions[uniq[i, 1]]
if sep[0] < 0.0 or (sep[0] == 0.0 and sep[1] < 0.0):
# Reorder feed pairs and conjugate mapping
# self._uniquepairs[i, 1], self._uniquepairs[i, 0] = self._uniquepairs[i, 0], self._uniquepairs[i, 1]
self._feedconj = np.where(self._feedmap == i, np.logical_not(self._feedconj), self._feedconj)
def _unique_baselines(self):
"""Map of equivalent baseline lengths, and mask of ones to exclude.
"""
# Construct array of indices
fshape = [self.nfeed, self.nfeed]
f_ind = np.indices(fshape)
# Construct array of baseline separations in complex representation
bl1 = (self.feedpositions[f_ind[0]] - self.feedpositions[f_ind[1]])
bl2 = np.around(bl1[..., 0] + 1.0J * bl1[..., 1], 7)
# Construct array of baseline lengths
blen = np.sum(bl1**2, axis=-1)**0.5
# Create mask of included baselines
mask = np.logical_and(blen >= self.minlength, blen <= self.maxlength)
# Remove the auto correlated baselines between all polarisations
if not self.auto_correlations:
mask = np.logical_and(blen > 0.0, mask)
return _remap_keyarray(bl2, mask), mask
def _unique_beams(self):
"""Map of unique beam pairs, and mask of ones to exclude.
"""
# Construct array of indices
fshape = [self.nfeed, self.nfeed]
bci, bcj = np.broadcast_arrays(self.beamclass[:, np.newaxis], self.beamclass[np.newaxis, :])
beam_map = _merge_keyarray(bci, bcj)
if self.auto_correlations:
beam_mask = np.ones(fshape, dtype=np.bool)
else:
beam_mask = np.logical_not(np.identity(self.nfeed, dtype=np.bool))
return beam_map, beam_mask
def _get_unique(self):
"""Calculate the unique baseline pairs.
All feeds are assumed to be identical. Baselines are identified if
they have the same length, and are selected such that they point East
(to ensure that the sensitivity ends up in positive-m modes).
It is also possible to select only baselines within a particular
length range by setting the `minlength` and `maxlength` properties.
Parameters
----------
fpairs : np.ndarray
An array of all the feed pairs, packed as [[i1, i2, ...], [j1, j2, ...] ].
Returns
-------
baselines : np.ndarray
An array of all the unique pairs. Packed as [ [i1, i2, ...], [j1, j2, ...]].
redundancy : np.ndarray
For each unique pair, give the number of equivalent pairs.
"""
# Fetch and merge map of unique feed pairs
base_map, base_mask = self._unique_baselines()
beam_map, beam_mask = self._unique_beams()
comb_map, comb_mask = _merge_keyarray(base_map, beam_map, mask1=base_mask, mask2=beam_mask)
# Take into account conjugation by identifying
comb_map = np.dstack((comb_map, comb_map.T)).min(axis=-1)
comb_map = _remap_keyarray(comb_map, comb_mask)
return comb_map, comb_mask
def _sort_pairs(self):
"""Re-order keys into a desired sort order.
By default the order is lexicographic in (baseline u, baselines v,
beamclass i, beamclass j).
"""
# Create mask of included pairs, that are not conjugated
tmask = np.logical_and(self._feedmask, np.logical_not(self._feedconj))
uniq = _get_indices(self._feedmap, mask=tmask)
fi, fj = uniq[:, 0], uniq[:, 1]
# Fetch keys by which to sort (lexicographically)
bx = self.feedpositions[fi, 0] - self.feedpositions[fj, 0]
by = self.feedpositions[fi, 1] - self.feedpositions[fj, 1]
ci = self.beamclass[fi]
cj = self.beamclass[fj]
## Sort by constructing a numpy array with the keys as fields, and use
## np.argsort to get the indices
# Create array of keys to sort
dt = np.dtype('f8,f8,i4,i4')
sort_arr = np.zeros(fi.size, dtype=dt)
sort_arr['f0'] = bx
sort_arr['f1'] = by
sort_arr['f2'] = cj
sort_arr['f3'] = ci
# Get map which sorts
sort_ind = np.argsort(sort_arr)
# Invert mapping
tmp_sort_ind = sort_ind.copy()
sort_ind[tmp_sort_ind] = np.arange(sort_ind.size)
# Remap feedmap entries
fm_copy = self._feedmap.copy()
wmask = np.where(self._feedmask)
fm_copy[wmask] = sort_ind[self._feedmap[wmask]]
self._feedmap = fm_copy
#===================================================
#==== Methods for calculating Transfer matrices ====
def transfer_matrices(self, bl_indices, f_indices, global_lmax = True):
"""Calculate the spherical harmonic transfer matrices for baseline and
frequency combinations.
Parameters
----------
bl_indices : array_like
Indices of baselines to calculate.
f_indices : array_like
Indices of frequencies to calculate. Must be broadcastable against
`bl_indices`.
global_lmax : boolean, optional
If set (default), the output size `lside` in (l,m) is big enough to
hold the maximum for the entire telescope. If not set it is only big
enough for the requested set.
Returns
-------
transfer : np.ndarray, dtype=np.complex128
An array containing the transfer functions. The shape is somewhat
complicated, the first indices correspond to the broadcast size of
`bl_indices` and `f_indices`, then there may be some polarisation
indices, then finally the (l,m) indices, range (lside, 2*lside-1).
"""
# Broadcast arrays against each other
bl_indices, f_indices = np.broadcast_arrays(bl_indices, f_indices)
## Check indices are all in range
if out_of_range(bl_indices, 0, self.npairs):
raise Exception("Baseline indices aren't valid")
if out_of_range(f_indices, 0, self.nfreq):
raise Exception("Frequency indices aren't valid")
# Fetch the set of lmax's for the baselines (in order to reduce time
# regenerating Healpix maps)
lmax, mmax = np.ceil(self.l_boost * np.array(max_lm(self.baselines[bl_indices], self.wavelengths[f_indices], self.u_width, self.v_width))).astype(np.int64)
#lmax, mmax = lmax * self.l_boost, mmax * self.l_boost
# Set the size of the (l,m) array to write into
lside = self.lmax if global_lmax else lmax.max()
# Generate the array for the Transfer functions
tshape = bl_indices.shape + (self.num_pol_sky, lside+1, 2*lside+1)
print "Size: %i elements. Memory %f GB." % (np.prod(tshape), 2*np.prod(tshape) * 8.0 / 2**30)
tarray = np.zeros(tshape, dtype=np.complex128)
# Sort the baselines by ascending lmax and iterate through in that
# order, calculating the transfer matrices
for iflat in np.argsort(lmax.flat):
ind = np.unravel_index(iflat, lmax.shape)
trans = self._transfer_single(bl_indices[ind], f_indices[ind], lmax[ind], lside)
## Iterate over pol combinations and copy into transfer array
for pi in range(self.num_pol_sky):
islice = (ind + (pi,) + (slice(None),slice(None)))
tarray[islice] = trans[pi]
return tarray
def transfer_for_frequency(self, freq):
"""Fetch all transfer matrices for a given frequency.
Parameters
----------
freq : integer
The frequency index.
Returns
-------
transfer : np.ndarray
The transfer matrices. Packed as in `TransitTelescope.transfer_matrices`.
"""
bi = np.arange(self.npairs)
fi = freq * np.ones_like(bi)
return self.transfer_matrices(bi, fi)
def transfer_for_baseline(self, baseline):
"""Fetch all transfer matrices for a given baseline.
Parameters
----------
baseline : integer
The baseline index.
Returns
-------
transfer : np.ndarray
The transfer matrices. Packed as in `TransitTelescope.transfer_matrices`.
"""
fi = np.arange(self.nfreq)
bi = baseline * np.ones_like(fi)
return self.transfer_matrices(bi, fi)
#===================================================
#======== Noise properties of the telescope ========
def tsys(self, f_indices = None):
"""The system temperature.
Currenty has a flat T_sys across the whole bandwidth. Override for
anything more complicated.
Parameters
----------
f_indices : array_like
Indices of frequencies to get T_sys at.
Returns
-------
tsys : array_like
System temperature at requested frequencies.
"""
if f_indices == None:
freq = self.frequencies
else:
freq = self.frequencies[f_indices]
return np.ones_like(freq) * self.tsys_flat
def noisepower(self, bl_indices, f_indices, ndays = None):
"""Calculate the instrumental noise power spectrum.
Assume we are still within the regime where the power spectrum is white
in `m` modes.
Parameters
----------
bl_indices : array_like
Indices of baselines to calculate.
f_indices : array_like
Indices of frequencies to calculate. Must be broadcastable against
`bl_indices`.
ndays : integer
The number of sidereal days observed.
Returns
-------
noise_ps : np.ndarray
The noise power spectrum.
"""
ndays = self.ndays if not ndays else ndays # Set to value if not set.
# Broadcast arrays against each other
bl_indices, f_indices = np.broadcast_arrays(bl_indices, f_indices)
#bw = np.abs(self.frequencies[1] - self.frequencies[0]) * 1e6
bw = 1.0e6 * (self.freq_upper - self.freq_lower) / self.num_freq
delnu = units.t_sidereal * bw / (2*np.pi)
noisepower = self.tsys(f_indices)**2 / (2 * np.pi * delnu * ndays)
noisebase = noisepower / self.redundancy[bl_indices]
return noisebase
def noisepower_feedpairs(self, fi, fj, f_indices, m, ndays=None):
ndays = self.ndays if not ndays else ndays
bw = np.abs(self.frequencies[1] - self.frequencies[0]) * 1e6
delnu = units.t_sidereal * bw / (2*np.pi)
noisepower = self.tsys(f_indices)**2 / (2 * np.pi * delnu * ndays)
return np.ones_like(fi) * np.ones_like(fj) * np.ones_like(m) * noisepower / 2.0 # For unpolarised only at the moment.
#===================================================
_nside = None
def _init_trans(self, nside):
## Internal function for generating some common Healpix maps (position,
## horizon). These should need to be generated only when nside changes.
# Angular positions in healpix map of nside
self._nside = nside
self._angpos = hputil.ang_positions(nside)
# The horizon function
self._horizon = visibility.horizon(self._angpos, self.zenith)
#===================================================
#================ ABSTRACT METHODS =================
#===================================================
# Implement to specify feed positions in the telescope.
@abc.abstractproperty
def feedpositions(self):
"""An (nfeed,2) array of the feed positions relative to an arbitary point (in m)"""
return
# Implement to specify the beams of the telescope
@abc.abstractproperty
def beamclass(self):
"""An nfeed array of the class of each beam (identical labels are
considered to have identical beams)."""
return
# Implement to specify feed positions in the telescope.
@abc.abstractproperty
def u_width(self):
"""The approximate physical width (in the u-direction) of the dish/telescope etc, for
calculating the maximum (l,m)."""
return
# Implement to specify feed positions in the telescope.
@abc.abstractproperty
def v_width(self):
"""The approximate physical length (in the v-direction) of the dish/telescope etc, for
calculating the maximum (l,m)."""
return
# The work method which does the bulk of calculating all the transfer matrices.
@abc.abstractmethod
def _transfer_single(self, bl_index, f_index, lmax, lside):
"""Calculate transfer matrix for a single baseline+frequency.
**Abstract method** must be implemented.
Parameters
----------
bl_index : integer
The index of the baseline to calculate.
f_index : integer
The index of the frequency to calculate.
lmax : integer
The maximum *l* we are interested in. Determines accuracy of
spherical harmonic transforms.
lside : integer
The size of array to embed the transfer matrix within.
Returns
-------
transfer : np.ndarray
The transfer matrix, an array of shape (pol_indices, lside,
2*lside-1). Where the `pol_indices` are usually only present if
considering the polarised case.
"""
return
#===================================================
#============== END ABSTRACT METHODS ===============
#===================================================
class UnpolarisedTelescope(TransitTelescope):
"""A base for an unpolarised telescope.
Again, an abstract class, but the only things that require implementing are
the `feedpositions`, `_get_unique` and the `beam` function.
"""
__metaclass__ = abc.ABCMeta
_npol_sky_ = 1
@abc.abstractmethod
def beam(self, feed, freq):
"""Beam for a particular feed.
Parameters
----------
feed : integer
Index for the feed.
freq : integer
Index for the frequency.
Returns
-------
beam : np.ndarray
A Healpix map (of size self._nside) of the beam. Potentially
complex.
"""
return
#===== Implementations of abstract functions =======
def _beam_map_single(self, bl_index, f_index):
# Get beam maps for each feed.
feedi, feedj = self.uniquepairs[bl_index]
beami, beamj = self.beam(feedi, f_index), self.beam(feedj, f_index)
# Get baseline separation and fringe map.
uv = self.baselines[bl_index] / self.wavelengths[f_index]
fringe = visibility.fringe(self._angpos, self.zenith, uv)
# Beam solid angle (integrate over beam^2 - equal area pixels)
omega_A = (np.abs(beami) * np.abs(beamj) * self._horizon).sum() * (4*np.pi / beami.size)
# Calculate the complex visibility
cvis = self._horizon * fringe * beami * beamj / omega_A
return cvis
def _transfer_single(self, bl_index, f_index, lmax, lside):
if self._nside != hputil.nside_for_lmax(lmax, accuracy_boost=self.accuracy_boost):
self._init_trans(hputil.nside_for_lmax(lmax, accuracy_boost=self.accuracy_boost))
cvis = self._beam_map_single(bl_index, f_index)
# Perform the harmonic transform to get the transfer matrix (conj is correct - see paper)
btrans = hputil.sphtrans_complex(cvis.conj(), centered = False, lmax = lmax, lside=lside).conj()
return [ btrans ]
#===================================================
def noisepower(self, bl_indices, f_indices, ndays = None):
"""Calculate the instrumental noise power spectrum.
Assume we are still within the regime where the power spectrum is white
in `m` modes.
Parameters
----------
bl_indices : array_like
Indices of baselines to calculate.
f_indices : array_like
Indices of frequencies to calculate. Must be broadcastable against
`bl_indices`.
ndays : integer
The number of sidereal days observed.
Returns
-------
noise_ps : np.ndarray
The noise power spectrum.
"""
bnoise = TransitTelescope.noisepower(self, bl_indices, f_indices, ndays)
return bnoise[..., np.newaxis] * 0.5 # Correction for unpolarisedness
class PolarisedTelescope(TransitTelescope):
"""A base for a polarised telescope.
Again, an abstract class, but the only things that require implementing are
the `feedpositions`, `_get_unique` and the beam functions `beamx` and `beamy`.
Abstract Methods
----------------
beamx, beamy : methods
Routines giving the field pattern for the x and y feeds.
"""
__metaclass__ = abc.ABCMeta
_npol_sky_ = 4
def _beam_map_single(self, bl_index, f_index):
p_stokes = [ 0.5 * np.array([[1.0, 0.0], [0.0, 1.0]]),
0.5 * np.array([[1.0, 0.0], [0.0, -1.0]]),
0.5 * np.array([[0.0, 1.0], [1.0, 0.0]]),
0.5 * np.array([[0.0, -1.0J], [1.0J, 0.0]]) ]
# Get beam maps for each feed.
feedi, feedj = self.uniquepairs[bl_index]
beami, beamj = self.beam(feedi, f_index), self.beam(feedj, f_index)
# Get baseline separation and fringe map.
uv = self.baselines[bl_index] / self.wavelengths[f_index]
fringe = visibility.fringe(self._angpos, self.zenith, uv)
pow_stokes = [ np.sum(beami * np.dot(beamj, polproj), axis=1) * self._horizon for polproj in p_stokes]
pxarea = (4*np.pi / beami.shape[0])
om_i = np.sum(np.abs(beami)**2 * self._horizon[:, np.newaxis]) * pxarea
om_j = np.sum(np.abs(beamj)**2 * self._horizon[:, np.newaxis]) * pxarea
omega_A = (om_i * om_j)**0.5
cv_stokes = [ p * (2 * fringe / omega_A) for p in pow_stokes ]
return cv_stokes
#===== Implementations of abstract functions =======
def _transfer_single(self, bl_index, f_index, lmax, lside):
if self._nside != hputil.nside_for_lmax(lmax):
self._init_trans(hputil.nside_for_lmax(lmax))
bmap = self._beam_map_single(bl_index, f_index)
btrans = [ pb.conj() for pb in hputil.sphtrans_complex_pol([bm.conj() for bm in bmap], centered = False, lmax = int(lmax), lside=lside) ]
return btrans
#===================================================
class SimpleUnpolarisedTelescope(UnpolarisedTelescope):
"""A base for a unpolarised telescope.
Again, an abstract class, but the only things that require implementing are
the `feedpositions`, `_get_unique` and the beam functions `beamx` and `beamy`.
Abstract Methods
----------------
beam : method
Routines giving the field pattern for the feeds.
"""
__metaclass__ = abc.ABCMeta
@property
def beamclass(self):
"""Simple beam mode of single polarisation feeds."""
return np.zeros(self._single_feedpositions.shape[0], dtype=np.int)
@abc.abstractproperty
def _single_feedpositions(self):
"""An (nfeed,2) array of the feed positions relative to an arbitrary point (in m)"""
return
@property
def feedpositions(self):
return self._single_feedpositions
class SimplePolarisedTelescope(PolarisedTelescope):
"""A base for a polarised telescope.
Again, an abstract class, but the only things that require implementing are
the `feedpositions`, `_get_unique` and the beam functions `beamx` and `beamy`.
Abstract Methods
----------------
beamx, beamy : methods
Routines giving the field pattern for the x and y feeds.
"""
__metaclass__ = abc.ABCMeta
@property
def beamclass(self):
"""Simple beam mode of dual polarisation feeds."""
nsfeed = self._single_feedpositions.shape[0]
return np.concatenate((np.zeros(nsfeed), np.ones(nsfeed))).astype(np.int)
def beam(self, feed, freq):
if self.beamclass[feed] % 2 == 0:
return self.beamx(feed, freq)
else:
return self.beamy(feed, freq)
@abc.abstractproperty
def _single_feedpositions(self):
"""An (nfeed,2) array of the feed positions relative to an arbitrary point (in m)"""
return
@property
def feedpositions(self):
return np.concatenate((self._single_feedpositions, self._single_feedpositions))
@abc.abstractmethod
def beamx(self, feed, freq):
"""Beam for the X polarisation feed.
Parameters
----------
feed : integer
Index for the feed.
freq : integer
Index for the frequency.
Returns
-------
beam : np.ndarray
Healpix maps (of size [self._nside, 2]) of the field pattern in the
theta and phi directions.
"""
@abc.abstractmethod
def beamy(self, feed, freq):
"""Beam for the Y polarisation feed.
Parameters
----------
feed : integer
Index for the feed.
freq : integer
Index for the frequency.
Returns
-------
beam : np.ndarray
Healpix maps (of size [self._nside, 2]) of the field pattern in the
theta and phi directions.
"""
| gpl-2.0 | 1,637,115,405,818,776,000 | 30.564531 | 163 | 0.592911 | false |
you21979/phantomjs | src/breakpad/src/tools/gyp/test/sibling/gyptest-all.py | 151 | 1061 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('build/all.gyp', chdir='src')
test.build('build/all.gyp', test.ALL, chdir='src')
chdir = 'src/build'
# The top-level Makefile is in the directory where gyp was run.
# TODO(mmoss) Should the Makefile go in the directory of the passed in .gyp
# file? What about when passing in multiple .gyp files? Would sub-project
# Makefiles (see http://codereview.chromium.org/340008 comments) solve this?
if test.format == 'make':
chdir = 'src'
if test.format == 'xcode':
chdir = 'src/prog1'
test.run_built_executable('prog1',
chdir=chdir,
stdout="Hello from prog1.c\n")
if test.format == 'xcode':
chdir = 'src/prog2'
test.run_built_executable('prog2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
| bsd-3-clause | 9,062,123,375,338,552,000 | 26.205128 | 76 | 0.63902 | false |
ovresko/erpnext | erpnext/education/doctype/fee_schedule/fee_schedule.py | 7 | 5324 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
from frappe.utils import money_in_words
from frappe.utils import cint, flt, cstr
from frappe.utils.background_jobs import enqueue
from frappe import _
class FeeSchedule(Document):
def onload(self):
info = self.get_dashboard_info()
self.set_onload('dashboard_info', info)
def get_dashboard_info(self):
info = {
"total_paid": 0,
"total_unpaid": 0,
"currency": erpnext.get_company_currency(self.company)
}
fees_amount = frappe.db.sql("""select sum(grand_total), sum(outstanding_amount) from tabFees
where fee_schedule=%s and docstatus=1""", (self.name))
if fees_amount:
info["total_paid"] = flt(fees_amount[0][0]) - flt(fees_amount[0][1])
info["total_unpaid"] = flt(fees_amount[0][1])
return info
def validate(self):
self.calculate_total_and_program()
def calculate_total_and_program(self):
no_of_students = 0
for d in self.student_groups:
# if not d.total_students:
d.total_students = get_total_students(d.student_group, self.academic_year,
self.academic_term, self.student_category)
no_of_students += cint(d.total_students)
# validate the program of fee structure and student groups
student_group_program = frappe.db.get_value("Student Group", d.student_group, "program")
if self.program and student_group_program and self.program != student_group_program:
frappe.msgprint(_("Program in the Fee Structure and Student Group {0} are different.")
.format(d.student_group))
self.grand_total = no_of_students*self.total_amount
self.grand_total_in_words = money_in_words(self.grand_total)
def create_fees(self):
self.db_set("fee_creation_status", "In Process")
frappe.publish_realtime("fee_schedule_progress",
{"progress": "0", "reload": 1}, user=frappe.session.user)
total_records = sum([int(d.total_students) for d in self.student_groups])
if total_records > 10:
frappe.msgprint(_('''Fee records will be created in the background.
In case of any error the error message will be updated in the Schedule.'''))
enqueue(generate_fee, queue='default', timeout=6000, event='generate_fee',
fee_schedule=self.name)
else:
generate_fee(self.name)
def generate_fee(fee_schedule):
doc = frappe.get_doc("Fee Schedule", fee_schedule)
error = False
total_records = sum([int(d.total_students) for d in doc.student_groups])
created_records = 0
if not total_records:
frappe.throw(_("Please setup Students under Student Groups"))
for d in doc.student_groups:
students = get_students(d.student_group, doc.academic_year, doc.academic_term, doc.student_category)
for student in students:
try:
fees_doc = get_mapped_doc("Fee Schedule", fee_schedule, {
"Fee Schedule": {
"doctype": "Fees",
"field_map": {
"name": "Fee Schedule"
}
}
})
fees_doc.student = student.student
fees_doc.student_name = student.student_name
fees_doc.program = student.program
fees_doc.student_batch = student.student_batch_name
fees_doc.send_payment_request = doc.send_email
fees_doc.save()
fees_doc.submit()
created_records += 1
frappe.publish_realtime("fee_schedule_progress", {"progress": str(int(created_records * 100/total_records))}, user=frappe.session.user)
except Exception as e:
error = True
err_msg = frappe.local.message_log and "\n\n".join(frappe.local.message_log) or cstr(e)
if error:
frappe.db.rollback()
frappe.db.set_value("Fee Schedule", fee_schedule, "fee_creation_status", "Failed")
frappe.db.set_value("Fee Schedule", fee_schedule, "error_log", err_msg)
else:
frappe.db.set_value("Fee Schedule", fee_schedule, "fee_creation_status", "Successful")
frappe.db.set_value("Fee Schedule", fee_schedule, "error_log", None)
frappe.publish_realtime("fee_schedule_progress",
{"progress": "100", "reload": 1}, user=frappe.session.user)
def get_students(student_group, academic_year, academic_term=None, student_category=None):
conditions = ""
if student_category:
conditions = " and pe.student_category='{}'".format(frappe.db.escape(student_category))
if academic_term:
conditions = " and pe.academic_term='{}'".format(frappe.db.escape(academic_term))
students = frappe.db.sql("""
select pe.student, pe.student_name, pe.program, pe.student_batch_name
from `tabStudent Group Student` sgs, `tabProgram Enrollment` pe
where
pe.student = sgs.student and pe.academic_year = %s
and sgs.parent = %s and sgs.active = 1
{conditions}
""".format(conditions=conditions), (academic_year, student_group), as_dict=1)
return students
@frappe.whitelist()
def get_total_students(student_group, academic_year, academic_term=None, student_category=None):
total_students = get_students(student_group, academic_year, academic_term, student_category)
return len(total_students)
@frappe.whitelist()
def get_fee_structure(source_name,target_doc=None):
fee_request = get_mapped_doc("Fee Structure", source_name,
{"Fee Structure": {
"doctype": "Fee Schedule"
}}, ignore_permissions=True)
return fee_request
| gpl-3.0 | -6,626,682,885,636,041,000 | 35.217687 | 139 | 0.710368 | false |
iiisthu/sparkSdn | python/pyspark/conf.py | 4 | 5745 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
>>> from pyspark.conf import SparkConf
>>> from pyspark.context import SparkContext
>>> conf = SparkConf()
>>> conf.setMaster("local").setAppName("My app")
<pyspark.conf.SparkConf object at ...>
>>> conf.get("spark.master")
u'local'
>>> conf.get("spark.app.name")
u'My app'
>>> sc = SparkContext(conf=conf)
>>> sc.master
u'local'
>>> sc.appName
u'My app'
>>> sc.sparkHome == None
True
>>> conf = SparkConf()
>>> conf.setSparkHome("/path")
<pyspark.conf.SparkConf object at ...>
>>> conf.get("spark.home")
u'/path'
>>> conf.setExecutorEnv("VAR1", "value1")
<pyspark.conf.SparkConf object at ...>
>>> conf.setExecutorEnv(pairs = [("VAR3", "value3"), ("VAR4", "value4")])
<pyspark.conf.SparkConf object at ...>
>>> conf.get("spark.executorEnv.VAR1")
u'value1'
>>> print conf.toDebugString()
spark.executorEnv.VAR1=value1
spark.executorEnv.VAR3=value3
spark.executorEnv.VAR4=value4
spark.home=/path
>>> sorted(conf.getAll(), key=lambda p: p[0])
[(u'spark.executorEnv.VAR1', u'value1'), (u'spark.executorEnv.VAR3', u'value3'), (u'spark.executorEnv.VAR4', u'value4'), (u'spark.home', u'/path')]
"""
class SparkConf(object):
"""
Configuration for a Spark application. Used to set various Spark
parameters as key-value pairs.
Most of the time, you would create a SparkConf object with
C{SparkConf()}, which will load values from C{spark.*} Java system
properties as well. In this case, any parameters you set directly on
the C{SparkConf} object take priority over system properties.
For unit tests, you can also call C{SparkConf(false)} to skip
loading external settings and get the same configuration no matter
what the system properties are.
All setter methods in this class support chaining. For example,
you can write C{conf.setMaster("local").setAppName("My app")}.
Note that once a SparkConf object is passed to Spark, it is cloned
and can no longer be modified by the user.
"""
def __init__(self, loadDefaults=True, _jvm=None):
"""
Create a new Spark configuration.
@param loadDefaults: whether to load values from Java system
properties (True by default)
@param _jvm: internal parameter used to pass a handle to the
Java VM; does not need to be set by users
"""
from pyspark.context import SparkContext
SparkContext._ensure_initialized()
_jvm = _jvm or SparkContext._jvm
self._jconf = _jvm.SparkConf(loadDefaults)
def set(self, key, value):
"""Set a configuration property."""
self._jconf.set(key, unicode(value))
return self
def setMaster(self, value):
"""Set master URL to connect to."""
self._jconf.setMaster(value)
return self
def setAppName(self, value):
"""Set application name."""
self._jconf.setAppName(value)
return self
def setSparkHome(self, value):
"""Set path where Spark is installed on worker nodes."""
self._jconf.setSparkHome(value)
return self
def setExecutorEnv(self, key=None, value=None, pairs=None):
"""Set an environment variable to be passed to executors."""
if (key != None and pairs != None) or (key == None and pairs == None):
raise Exception("Either pass one key-value pair or a list of pairs")
elif key != None:
self._jconf.setExecutorEnv(key, value)
elif pairs != None:
for (k, v) in pairs:
self._jconf.setExecutorEnv(k, v)
return self
def setAll(self, pairs):
"""
Set multiple parameters, passed as a list of key-value pairs.
@param pairs: list of key-value pairs to set
"""
for (k, v) in pairs:
self._jconf.set(k, v)
return self
def get(self, key, defaultValue=None):
"""Get the configured value for some key, or return a default otherwise."""
if defaultValue == None: # Py4J doesn't call the right get() if we pass None
if not self._jconf.contains(key):
return None
return self._jconf.get(key)
else:
return self._jconf.get(key, defaultValue)
def getAll(self):
"""Get all values as a list of key-value pairs."""
pairs = []
for elem in self._jconf.getAll():
pairs.append((elem._1(), elem._2()))
return pairs
def contains(self, key):
"""Does this configuration contain a given key?"""
return self._jconf.contains(key)
def toDebugString(self):
"""
Returns a printable version of the configuration, as a list of
key=value pairs, one per line.
"""
return self._jconf.toDebugString()
def _test():
import doctest
(failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | -854,066,612,910,973,700 | 32.994083 | 147 | 0.646127 | false |
mfjb/scikit-learn | sklearn/feature_selection/rfe.py | 137 | 17066 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause | 9,013,960,329,241,268,000 | 37.436937 | 79 | 0.607641 | false |
citrusleaf/dd-agent | utils/flare.py | 5 | 20834 | # stdlib
import atexit
import cStringIO as StringIO
from functools import partial
import glob
try:
import grp
except ImportError:
# The module only exists on Unix platforms
grp = None
import logging
import os
try:
import pwd
except ImportError:
# Same as above (exists on Unix platforms only)
pwd = None
import re
import stat
import subprocess
import sys
import tarfile
import tempfile
from time import strftime
import traceback
# 3p
import requests
# DD imports
from checks.check_status import CollectorStatus, DogstatsdStatus, ForwarderStatus
from config import (
check_yaml,
get_confd_path,
get_config,
get_config_path,
get_logging_config,
get_url_endpoint,
)
from jmxfetch import JMXFetch
from util import get_hostname
from utils.jmx import jmx_command, JMXFiles
from utils.platform import Platform
# Globals
log = logging.getLogger(__name__)
def configcheck():
all_valid = True
for conf_path in glob.glob(os.path.join(get_confd_path(), "*.yaml")):
basename = os.path.basename(conf_path)
try:
check_yaml(conf_path)
except Exception, e:
all_valid = False
print "%s contains errors:\n %s" % (basename, e)
else:
print "%s is valid" % basename
if all_valid:
print "All yaml files passed. You can now run the Datadog agent."
return 0
else:
print("Fix the invalid yaml files above in order to start the Datadog agent. "
"A useful external tool for yaml parsing can be found at "
"http://yaml-online-parser.appspot.com/")
return 1
class Flare(object):
"""
Compress all important logs and configuration files for debug,
and then send them to Datadog (which transfers them to Support)
"""
DATADOG_SUPPORT_URL = '/support/flare'
PASSWORD_REGEX = re.compile('( *(\w|_)*pass(word)?:).+')
COMMENT_REGEX = re.compile('^ *#.*')
APIKEY_REGEX = re.compile('^api_key: *\w+(\w{5})$')
REPLACE_APIKEY = r'api_key: *************************\1'
COMPRESSED_FILE = 'datadog-agent-{0}.tar.bz2'
# We limit to 10MB arbitrarily
MAX_UPLOAD_SIZE = 10485000
TIMEOUT = 60
def __init__(self, cmdline=False, case_id=None):
self._case_id = case_id
self._cmdline = cmdline
self._init_tarfile()
self._init_permissions_file()
self._save_logs_path()
self._config = get_config()
self._api_key = self._config.get('api_key')
self._url = "{0}{1}".format(
get_url_endpoint(self._config.get('dd_url'), endpoint_type='flare'),
self.DATADOG_SUPPORT_URL
)
self._hostname = get_hostname(self._config)
self._prefix = "datadog-{0}".format(self._hostname)
# On Unix system, check that the user is root (to call supervisorctl & status)
# Otherwise emit a warning, and ask for confirmation
@staticmethod
def check_user_rights():
if Platform.is_linux() and not os.geteuid() == 0:
log.warning("You are not root, some information won't be collected")
choice = raw_input('Are you sure you want to continue [y/N]? ')
if choice.strip().lower() not in ['yes', 'y']:
print 'Aborting'
sys.exit(1)
else:
log.warn('Your user has to have at least read access'
' to the logs and conf files of the agent')
# Collect all conf and logs files and compress them
def collect(self):
if not self._api_key:
raise Exception('No api_key found')
log.info("Collecting logs and configuration files:")
self._add_logs_tar()
self._add_conf_tar()
log.info(" * datadog-agent configcheck output")
self._add_command_output_tar('configcheck.log', configcheck)
log.info(" * datadog-agent status output")
self._add_command_output_tar('status.log', self._supervisor_status)
log.info(" * datadog-agent info output")
self._add_command_output_tar('info.log', self._info_all)
self._add_jmxinfo_tar()
log.info(" * pip freeze")
self._add_command_output_tar('freeze.log', self._pip_freeze,
command_desc="pip freeze --no-cache-dir")
log.info(" * log permissions on collected files")
self._permissions_file.close()
self._add_file_tar(self._permissions_file.name, 'permissions.log',
log_permissions=False)
log.info("Saving all files to {0}".format(self._tar_path))
self._tar.close()
# Upload the tar file
def upload(self):
self._check_size()
if self._cmdline:
self._ask_for_confirmation()
email = self._ask_for_email()
log.info("Uploading {0} to Datadog Support".format(self._tar_path))
url = self._url
if self._case_id:
url = '{0}/{1}'.format(self._url, str(self._case_id))
url = "{0}?api_key={1}".format(url, self._api_key)
files = {'flare_file': open(self._tar_path, 'rb')}
data = {
'case_id': self._case_id,
'hostname': self._hostname,
'email': email
}
self._resp = requests.post(url, files=files, data=data,
timeout=self.TIMEOUT)
self._analyse_result()
# Start by creating the tar file which will contain everything
def _init_tarfile(self):
# Default temp path
self._tar_path = os.path.join(
tempfile.gettempdir(),
self.COMPRESSED_FILE.format(strftime("%Y-%m-%d-%H-%M-%S"))
)
if os.path.exists(self._tar_path):
os.remove(self._tar_path)
self._tar = tarfile.open(self._tar_path, 'w:bz2')
# Create a file to log permissions on collected files and write header line
def _init_permissions_file(self):
self._permissions_file = tempfile.NamedTemporaryFile(mode='w', prefix='dd', delete=False)
if Platform.is_unix():
self._permissions_file_format = "{0:50} | {1:5} | {2:10} | {3:10}\n"
header = self._permissions_file_format.format("File path", "mode", "owner", "group")
self._permissions_file.write(header)
self._permissions_file.write('-'*len(header) + "\n")
else:
self._permissions_file.write("Not implemented: file permissions are only logged on Unix platforms")
# Save logs file paths
def _save_logs_path(self):
prefix = ''
if Platform.is_windows():
prefix = 'windows_'
config = get_logging_config()
self._collector_log = config.get('{0}collector_log_file'.format(prefix))
self._forwarder_log = config.get('{0}forwarder_log_file'.format(prefix))
self._dogstatsd_log = config.get('{0}dogstatsd_log_file'.format(prefix))
self._jmxfetch_log = config.get('jmxfetch_log_file')
# Add logs to the tarfile
def _add_logs_tar(self):
self._add_log_file_tar(self._collector_log)
self._add_log_file_tar(self._forwarder_log)
self._add_log_file_tar(self._dogstatsd_log)
self._add_log_file_tar(self._jmxfetch_log)
self._add_log_file_tar(
"{0}/*supervisord.log".format(os.path.dirname(self._collector_log))
)
def _add_log_file_tar(self, file_path):
for f in glob.glob('{0}*'.format(file_path)):
if self._can_read(f):
self._add_file_tar(
f,
os.path.join('log', os.path.basename(f))
)
# Collect all conf
def _add_conf_tar(self):
conf_path = get_config_path()
if self._can_read(conf_path):
self._add_file_tar(
self._strip_comment(conf_path),
os.path.join('etc', 'datadog.conf'),
original_file_path=conf_path
)
if not Platform.is_windows():
supervisor_path = os.path.join(
os.path.dirname(get_config_path()),
'supervisor.conf'
)
if self._can_read(supervisor_path):
self._add_file_tar(
self._strip_comment(supervisor_path),
os.path.join('etc', 'supervisor.conf'),
original_file_path=supervisor_path
)
for file_path in glob.glob(os.path.join(get_confd_path(), '*.yaml')) +\
glob.glob(os.path.join(get_confd_path(), '*.yaml.default')):
if self._can_read(file_path, output=False):
self._add_clean_confd(file_path)
# Collect JMXFetch-specific info and save to jmxinfo directory if jmx config
# files are present and valid
def _add_jmxinfo_tar(self):
_, _, should_run_jmx = self._capture_output(self._should_run_jmx)
if should_run_jmx:
# status files (before listing beans because executing jmxfetch overwrites status files)
for file_name, file_path in [
(JMXFiles._STATUS_FILE, JMXFiles.get_status_file_path()),
(JMXFiles._PYTHON_STATUS_FILE, JMXFiles.get_python_status_file_path())
]:
if self._can_read(file_path, warn=False):
self._add_file_tar(
file_path,
os.path.join('jmxinfo', file_name)
)
# beans lists
for command in ['list_matching_attributes', 'list_everything']:
log.info(" * datadog-agent jmx {0} output".format(command))
self._add_command_output_tar(
os.path.join('jmxinfo', '{0}.log'.format(command)),
partial(self._jmx_command_call, command)
)
# java version
log.info(" * java -version output")
_, _, java_bin_path = self._capture_output(
lambda: JMXFetch.get_configuration(get_confd_path())[2] or 'java')
self._add_command_output_tar(
os.path.join('jmxinfo', 'java_version.log'),
lambda: self._java_version(java_bin_path),
command_desc="{0} -version".format(java_bin_path)
)
# Add a file to the tar and append the file's rights to the permissions log (on Unix)
# If original_file_path is passed, the file_path will be added to the tar but the original file's
# permissions are logged
def _add_file_tar(self, file_path, target_path, log_permissions=True, original_file_path=None):
target_full_path = os.path.join(self._prefix, target_path)
if log_permissions and Platform.is_unix():
stat_file_path = original_file_path or file_path
file_stat = os.stat(stat_file_path)
# The file mode is returned in binary format, convert it to a more readable octal string
mode = oct(stat.S_IMODE(file_stat.st_mode))
try:
uname = pwd.getpwuid(file_stat.st_uid).pw_name
except KeyError:
uname = str(file_stat.st_uid)
try:
gname = grp.getgrgid(file_stat.st_gid).gr_name
except KeyError:
gname = str(file_stat.st_gid)
self._permissions_file.write(self._permissions_file_format.format(stat_file_path, mode, uname, gname))
self._tar.add(file_path, target_full_path)
# Returns whether JMXFetch should run or not
def _should_run_jmx(self):
jmx_process = JMXFetch(get_confd_path(), self._config)
jmx_process.configure(clean_status_file=False)
return jmx_process.should_run()
# Check if the file is readable (and log it)
@classmethod
def _can_read(cls, f, output=True, warn=True):
if os.access(f, os.R_OK):
if output:
log.info(" * {0}".format(f))
return True
else:
if warn:
log.warn(" * not readable - {0}".format(f))
return False
# Return path to a temp file without comment
def _strip_comment(self, file_path):
_, temp_path = tempfile.mkstemp(prefix='dd')
atexit.register(os.remove, temp_path)
with open(temp_path, 'w') as temp_file:
with open(file_path, 'r') as orig_file:
for line in orig_file.readlines():
if not self.COMMENT_REGEX.match(line):
temp_file.write(re.sub(self.APIKEY_REGEX, self.REPLACE_APIKEY, line))
return temp_path
# Remove password before collecting the file
def _add_clean_confd(self, file_path):
basename = os.path.basename(file_path)
temp_path, password_found = self._strip_password(file_path)
log.info(" * {0}{1}".format(file_path, password_found))
self._add_file_tar(
temp_path,
os.path.join('etc', 'conf.d', basename),
original_file_path=file_path
)
# Return path to a temp file without password and comment
def _strip_password(self, file_path):
_, temp_path = tempfile.mkstemp(prefix='dd')
atexit.register(os.remove, temp_path)
with open(temp_path, 'w') as temp_file:
with open(file_path, 'r') as orig_file:
password_found = ''
for line in orig_file.readlines():
if self.PASSWORD_REGEX.match(line):
line = re.sub(self.PASSWORD_REGEX, r'\1 ********', line)
password_found = ' - this file contains a password which '\
'has been removed in the version collected'
if not self.COMMENT_REGEX.match(line):
temp_file.write(line)
return temp_path, password_found
# Add output of the command to the tarfile
def _add_command_output_tar(self, name, command, command_desc=None):
out, err, _ = self._capture_output(command, print_exc_to_stderr=False)
_, temp_path = tempfile.mkstemp(prefix='dd')
with open(temp_path, 'w') as temp_file:
if command_desc:
temp_file.write(">>>> CMD <<<<\n")
temp_file.write(command_desc)
temp_file.write("\n")
temp_file.write(">>>> STDOUT <<<<\n")
temp_file.write(out.getvalue())
out.close()
temp_file.write(">>>> STDERR <<<<\n")
temp_file.write(err.getvalue())
err.close()
self._add_file_tar(temp_path, name, log_permissions=False)
os.remove(temp_path)
# Capture the output of a command (from both std streams and loggers) and the
# value returned by the command
def _capture_output(self, command, print_exc_to_stderr=True):
backup_out, backup_err = sys.stdout, sys.stderr
out, err = StringIO.StringIO(), StringIO.StringIO()
backup_handlers = logging.root.handlers[:]
logging.root.handlers = [logging.StreamHandler(out)]
sys.stdout, sys.stderr = out, err
return_value = None
try:
return_value = command()
except Exception:
# Print the exception to either stderr or `err`
traceback.print_exc(file=backup_err if print_exc_to_stderr else err)
finally:
# Stop capturing in a `finally` block to reset std streams' and loggers'
# behaviors no matter what
sys.stdout, sys.stderr = backup_out, backup_err
logging.root.handlers = backup_handlers
return out, err, return_value
# Print supervisor status (and nothing on windows)
def _supervisor_status(self):
if Platform.is_windows():
print 'Windows - status not implemented'
else:
agent_exec = self._get_path_agent_exec()
print '{0} status'.format(agent_exec)
self._print_output_command([agent_exec, 'status'])
supervisor_exec = self._get_path_supervisor_exec()
print '{0} status'.format(supervisor_exec)
self._print_output_command([supervisor_exec,
'-c', self._get_path_supervisor_conf(),
'status'])
# Find the agent exec (package or source)
def _get_path_agent_exec(self):
if Platform.is_mac():
agent_exec = '/opt/datadog-agent/bin/datadog-agent'
else:
agent_exec = '/etc/init.d/datadog-agent'
if not os.path.isfile(agent_exec):
agent_exec = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../bin/agent'
)
return agent_exec
# Find the supervisor exec (package or source)
def _get_path_supervisor_exec(self):
supervisor_exec = '/opt/datadog-agent/bin/supervisorctl'
if not os.path.isfile(supervisor_exec):
supervisor_exec = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../venv/bin/supervisorctl'
)
return supervisor_exec
# Find the supervisor conf (package or source)
def _get_path_supervisor_conf(self):
if Platform.is_mac():
supervisor_conf = '/opt/datadog-agent/etc/supervisor.conf'
else:
supervisor_conf = '/etc/dd-agent/supervisor.conf'
if not os.path.isfile(supervisor_conf):
supervisor_conf = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../supervisord/supervisord.conf'
)
return supervisor_conf
# Print output of command
def _print_output_command(self, command):
try:
status = subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, e:
status = 'Not able to get output, exit number {0}, exit output:\n'\
'{1}'.format(str(e.returncode), e.output)
print status
# Print info of all agent components
def _info_all(self):
CollectorStatus.print_latest_status(verbose=True)
DogstatsdStatus.print_latest_status(verbose=True)
ForwarderStatus.print_latest_status(verbose=True)
# Call jmx_command with std streams redirection
def _jmx_command_call(self, command):
try:
jmx_command([command], self._config, redirect_std_streams=True)
except Exception, e:
print "Unable to call jmx command {0}: {1}".format(command, e)
# Print java version
def _java_version(self, java_bin_path):
try:
self._print_output_command([java_bin_path, '-version'])
except OSError:
print 'Unable to execute java bin with command: {0}'.format(java_bin_path)
# Run a pip freeze
def _pip_freeze(self):
try:
import pip
pip.main(['freeze', '--no-cache-dir'])
except ImportError:
print 'Unable to import pip'
# Check if the file is not too big before upload
def _check_size(self):
if os.path.getsize(self._tar_path) > self.MAX_UPLOAD_SIZE:
log.info("{0} won't be uploaded, its size is too important.\n"
"You can send it directly to support by mail.")
sys.exit(1)
# Function to ask for confirmation before upload
def _ask_for_confirmation(self):
print '{0} is going to be uploaded to Datadog.'.format(self._tar_path)
choice = raw_input('Do you want to continue [Y/n]? ')
if choice.strip().lower() not in ['yes', 'y', '']:
print 'Aborting (you can still use {0})'.format(self._tar_path)
sys.exit(1)
# Ask for email if needed
def _ask_for_email(self):
# We ask everytime now, as it is also the 'id' to check
# that the case is the good one if it exists
return raw_input('Please enter your email: ').lower()
# Print output (success/error) of the request
def _analyse_result(self):
# First catch our custom explicit 400
if self._resp.status_code == 400:
raise Exception('Your request is incorrect: {0}'.format(self._resp.json()['error']))
# Then raise potential 500 and 404
self._resp.raise_for_status()
try:
json_resp = self._resp.json()
# Failed parsing
except ValueError:
raise Exception('An unknown error has occured - '
'Please contact support by email')
# Finally, correct
log.info("Your logs were successfully uploaded. For future reference,"
" your internal case id is {0}".format(json_resp['case_id']))
| bsd-3-clause | 3,683,223,257,445,479,000 | 38.68381 | 114 | 0.575214 | false |
OdifYltsaeb/django-guardian | guardian/backends.py | 12 | 2073 | from django.db import models
from guardian.conf import settings
from guardian.exceptions import WrongAppError
from guardian.core import ObjectPermissionChecker
from guardian.models import User
class ObjectPermissionBackend(object):
supports_object_permissions = True
supports_anonymous_user = True
supports_inactive_user = True
def authenticate(self, username, password):
return None
def has_perm(self, user_obj, perm, obj=None):
"""
Returns ``True`` if given ``user_obj`` has ``perm`` for ``obj``. If no
``obj`` is given, ``False`` is returned.
.. note::
Remember, that if user is not *active*, all checks would return
``False``.
Main difference between Django's ``ModelBackend`` is that we can pass
``obj`` instance here and ``perm`` doesn't have to contain
``app_label`` as it can be retrieved from given ``obj``.
**Inactive user support**
If user is authenticated but inactive at the same time, all checks
always returns ``False``.
"""
# Backend checks only object permissions
if obj is None:
return False
# Backend checks only permissions for Django models
if not isinstance(obj, models.Model):
return False
# This is how we support anonymous users - simply try to retrieve User
# instance and perform checks for that predefined user
if not user_obj.is_authenticated():
user_obj = User.objects.get(pk=settings.ANONYMOUS_USER_ID)
# Do not check any further if user is not active
if not user_obj.is_active:
return False
if len(perm.split('.')) > 1:
app_label, perm = perm.split('.')
if app_label != obj._meta.app_label:
raise WrongAppError("Passed perm has app label of '%s' and "
"given obj has '%s'" % (app_label, obj._meta.app_label))
check = ObjectPermissionChecker(user_obj)
return check.has_perm(perm, obj)
| bsd-2-clause | 4,688,309,511,736,784,000 | 33.55 | 78 | 0.621322 | false |
ChemiKhazi/Sprytile | rx/linq/observable/groupby.py | 3 | 1224 | from rx import Observable
from rx.internal import extensionmethod
@extensionmethod(Observable)
def group_by(self, key_selector, element_selector=None,
key_serializer=None):
"""Groups the elements of an observable sequence according to a
specified key selector function and comparer and selects the resulting
elements by using a specified function.
1 - observable.group_by(lambda x: x.id)
2 - observable.group_by(lambda x: x.id, lambda x: x.name)
3 - observable.group_by(
lambda x: x.id,
lambda x: x.name,
lambda x: str(x))
Keyword arguments:
key_selector -- A function to extract the key for each element.
element_selector -- [Optional] A function to map each source element to
an element in an observable group.
comparer -- {Function} [Optional] Used to determine whether the objects
are equal.
Returns a sequence of observable groups, each of which corresponds to a
unique key value, containing all elements that share that same key
value.
"""
def duration_selector(x):
return Observable.never()
return self.group_by_until(key_selector, element_selector, duration_selector, key_serializer)
| mit | -6,568,667,219,648,725,000 | 35 | 97 | 0.700163 | false |
codingvirtual/fullstack-p4-conference | utils.py | 384 | 1576 | import json
import os
import time
import uuid
from google.appengine.api import urlfetch
from models import Profile
def getUserId(user, id_type="email"):
if id_type == "email":
return user.email()
if id_type == "oauth":
"""A workaround implementation for getting userid."""
auth = os.getenv('HTTP_AUTHORIZATION')
bearer, token = auth.split()
token_type = 'id_token'
if 'OAUTH_USER_ID' in os.environ:
token_type = 'access_token'
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% (token_type, token))
user = {}
wait = 1
for i in range(3):
resp = urlfetch.fetch(url)
if resp.status_code == 200:
user = json.loads(resp.content)
break
elif resp.status_code == 400 and 'invalid_token' in resp.content:
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% ('access_token', token))
else:
time.sleep(wait)
wait = wait + i
return user.get('user_id', '')
if id_type == "custom":
# implement your own user_id creation and getting algorythm
# this is just a sample that queries datastore for an existing profile
# and generates an id if profile does not exist for an email
profile = Conference.query(Conference.mainEmail == user.email())
if profile:
return profile.id()
else:
return str(uuid.uuid1().get_hex())
| apache-2.0 | -1,614,123,089,165,855,200 | 34.022222 | 78 | 0.560279 | false |
zzxuanyuan/root | documentation/doxygen/converttonotebook.py | 4 | 35547 | #!/usr/bin/env python
# Author: Pau Miquel i Mir <[email protected]> <[email protected]>>
# Date: July, 2016
#
# DISCLAIMER: This script is a prototype and a work in progress. Indeed, it is possible that
# it may not work for certain tutorials, and that it, or the tutorial, might need to be
# tweaked slightly to ensure full functionality. Please do not hesitate to email the author
# with any questions or with examples that do not work.
#
# HELP IT DOESN'T WORK: Two possible solutions:
# 1. Check that all the types returned by the tutorial are in the gTypesList. If they aren't,
# simply add them.
# 2. If the tutorial takes a long time to execute (more than 90 seconds), add the name of the
# tutorial to the list of long tutorials listLongTutorials, in the function findTimeout.
#
# REQUIREMENTS: This script needs jupyter to be properly installed, as it uses the python
# package nbformat and calls the shell commands `jupyter nbconvert` and `jupyter trust`. The
# rest of the packages used should be included in a standard installation of python. The script
# is intended to be run on a UNIX based system.
#
#
# FUNCTIONING:
# -----------
# The converttonotebook script creates Jupyter notebooks from raw C++ or python files.
# Particularly, it is indicated to convert the ROOT tutorials found in the ROOT
# repository.
#
# The script should be called from bash with the following format:
# python /path/to/script/converttonotebook.py /path/to/<macro>.C /path/to/outdir
#
# Indeed the script takes two arguments, the path to the macro and the path to the directory
# where the notebooks will be created
#
# The script's general functioning is as follows. The macro to be converted is imported as a string.
# A series of modifications are made to this string, for instance delimiting where markdown and
# code cells begin and end. Then, this string is converted into ipynb format using a function
# in the nbconvert package. Finally, the notebook is executed and output.
#
# For converting python tutorials it is fairly straightforward. It extracts the description and
# author information from the header and then removes it. It also converts any comment at the
# beginning of a line into a Markdown cell.
#
# For C++ files the process is slightly more complex. The script separates the functions from the
# main code. The main function is identified as it has the same name as the macro file. The other
# functions are considered functions. The main function is "extracted" and presented as main code.
# The helper functions are placed in their own code cell with the %%cpp -d magic to enable function
# defintion. Finally, as with Python macros, relevant information is extracted from the header, and
# newline comments are converted into Markdown cells (unless they are in helper functions).
#
# The script creates an .ipynb version of the macro, with the full output included.
# The files are named:
# <macro>.<C or py>.nbconvert.ipynb
#
# It is called by filter.cxx, which in turn is called by doxygen when processing any file
# in the ROOT repository. filter.cxx only calls convertonotebook.py when the string \notebook
# is found in the header of the tutorial, but this script checks for its presence as well.
import re
import os
import sys
import json
import time
import doctest
import textwrap
import subprocess
from nbformat import v3, v4
from datetime import datetime, date
# List of types that will be considered when looking for a C++ function. If a macro returns a
# type not included on the list, the regular expression will not match it, and thus the function
# will not be properly defined. Thus, any other type returned by function must be added to this list
# for the script to work correctly.
gTypesList = ["void", "int", "Int_t", "TF1", "string", "bool", "double", "float", "char",
"TCanvas", "TTree", "TString", "TSeqCollection", "Double_t", "TFile", "Long64_t", "Bool_t", "TH1",
"RooDataSet", "RooWorkspace" , "HypoTestInverterResult" , "TVectorD" , "TArrayF", "UInt_t"]
# -------------------------------------
# -------- Function definitions--------
# -------------------------------------
def unindenter(string, spaces = 3):
"""
Returns string with each line unindented by 3 spaces. If line isn't indented, it stays the same.
>>> unindenter(" foobar")
'foobar\\n'
>>> unindenter("foobar")
'foobar\\n'
>>> unindenter('''foobar
... foobar
... foobar''')
'foobar\\nfoobar\\nfoobar\\n'
"""
newstring = ''
lines = string.splitlines()
for line in lines:
if line.startswith(spaces*' '):
newstring += (line[spaces:] + "\n")
else:
newstring += (line + "\n")
return newstring
def readHeaderPython(text):
"""
Extract author and description from header, eliminate header from text. Also returns
notebook boolean, which is True if the string \notebook is present in the header
Also determine options (-js, -nodraw, -header) passed in \notebook command, and
return their booleans
>>> readHeaderPython('''## \\file
... ## \\ingroup tutorials
... ## \\\\notebook
... ## This is the description of the tutorial
... ##
... ## \\macro_image
... ## \\macro_code
... ##
... ## \\\\author John Brown
... def tutorialfuncion()''')
('def tutorialfuncion()\\n', 'This is the description of the tutorial\\n\\n\\n', 'John Brown', True, False, False, False)
>>> readHeaderPython('''## \\file
... ## \\ingroup tutorials
... ## \\\\notebook -js
... ## This is the description of the tutorial
... ##
... ## \\macro_image
... ## \\macro_code
... ##
... ## \\\\author John Brown
... def tutorialfuncion()''')
('def tutorialfuncion()\\n', 'This is the description of the tutorial\\n\\n\\n', 'John Brown', True, True, False, False)
>>> readHeaderPython('''## \\file
... ## \\ingroup tutorials
... ## \\\\notebook -nodraw
... ## This is the description of the tutorial
... ##
... ## \\macro_image
... ## \\macro_code
... ##
... ## \\\\author John Brown
... def tutorialfuncion()''')
('def tutorialfuncion()\\n', 'This is the description of the tutorial\\n\\n\\n', 'John Brown', True, False, True, False)
"""
lines = text.splitlines()
description = ''
author = ''
isNotebook = False
isJsroot = False
nodraw = False
needsHeaderFile = False
for i, line in enumerate(lines):
if line.startswith("## \\aut"):
author = line[11:]
elif line.startswith("## \\note"):
isNotebook = True
if "-js" in line:
isJsroot = True
if "-nodraw" in line:
nodraw = True
if "-header" in line:
needsHeaderFile = True
elif line.startswith("##"):
if not line.startswith("## \\") and isNotebook:
description += (line[3:] + '\n')
else:
break
newtext = ''
for line in lines[i:]:
newtext += (line + "\n")
return newtext, description, author, isNotebook, isJsroot, nodraw, needsHeaderFile
def pythonComments(text):
"""
Converts comments delimited by # or ## and on a new line into a markdown cell.
For python files only
>>> pythonComments('''## This is a
... ## multiline comment
... def function()''')
'# <markdowncell>\\n## This is a\\n## multiline comment\\n# <codecell>\\ndef function()\\n'
>>> pythonComments('''def function():
... variable = 5 # Comment not in cell
... # Comment also not in cell''')
'def function():\\n variable = 5 # Comment not in cell\\n # Comment also not in cell\\n'
"""
text = text.splitlines()
newtext = ''
inComment = False
for i, line in enumerate(text):
if line.startswith("#") and not inComment: # True if first line of comment
inComment = True
newtext += "# <markdowncell>\n"
newtext += (line + "\n")
elif inComment and not line.startswith("#"): # True if first line after comment
inComment = False
newtext += "# <codecell>\n"
newtext += (line+"\n")
else:
newtext += (line+"\n")
return newtext
def pythonMainFunction(text):
lines = text.splitlines()
functionContentRe = re.compile('def %s\\(.*\\):' % tutName , flags = re.DOTALL | re.MULTILINE)
newtext = ''
inMainFunction = False
hasMainFunction = False
for line in lines:
if hasMainFunction:
if line.startswith("""if __name__ == "__main__":""") or line.startswith("""if __name__ == '__main__':"""):
break
match = functionContentRe.search(line)
if inMainFunction and not line.startswith(" ") and line != "":
inMainFunction = False
if match:
inMainFunction = True
hasMainFunction = True
else:
if inMainFunction:
newtext += (line[4:] + '\n')
else:
newtext += (line + '\n')
return newtext
def readHeaderCpp(text):
"""
Extract author and description from header, eliminate header from text. Also returns
notebook boolean, which is True if the string \notebook is present in the header
Also determine options (-js, -nodraw, -header) passed in \notebook command, and
return their booleans
>>> readHeaderCpp('''/// \\file
... /// \\ingroup tutorials
... /// \\\\notebook
... /// This is the description of the tutorial
... ///
... /// \\macro_image
... /// \\macro_code
... ///
... /// \\\\author John Brown
... void tutorialfuncion(){}''')
('void tutorialfuncion(){}\\n', '# This is the description of the tutorial\\n# \\n# \\n', 'John Brown', True, False, False, False)
>>> readHeaderCpp('''/// \\file
... /// \\ingroup tutorials
... /// \\\\notebook -js
... /// This is the description of the tutorial
... ///
... /// \\macro_image
... /// \\macro_code
... ///
... /// \\\\author John Brown
... void tutorialfuncion(){}''')
('void tutorialfuncion(){}\\n', '# This is the description of the tutorial\\n# \\n# \\n', 'John Brown', True, True, False, False)
>>> readHeaderCpp('''/// \\file
... /// \\ingroup tutorials
... /// \\\\notebook -nodraw
... /// This is the description of the tutorial
... ///
... /// \\macro_image
... /// \\macro_code
... ///
... /// \\\\author John Brown
... void tutorialfuncion(){}''')
('void tutorialfuncion(){}\\n', '# This is the description of the tutorial\\n# \\n# \\n', 'John Brown', True, False, True, False)
"""
lines = text.splitlines()
description = ''
author = ''
isNotebook = False
isJsroot = False
nodraw = False
needsHeaderFile = False
for i, line in enumerate(lines):
if line.startswith("/// \\aut"):
author = line[12:]
if line.startswith("/// \\note"):
isNotebook = True
if "-js" in line:
isJsroot = True
if "-nodraw" in line:
nodraw = True
if "-header" in line:
needsHeaderFile = True
if line.startswith("///"):
if not line.startswith("/// \\") and isNotebook:
description += ('# ' + line[4:] + '\n')
else:
break
newtext = ''
for line in lines[i:]:
newtext += (line + "\n")
description = description.replace("\\f$", "$")
description = description.replace("\\f[", "$$")
description = description.replace("\\f]", "$$")
return newtext, description, author, isNotebook, isJsroot, nodraw, needsHeaderFile
def cppFunction(text):
"""
Extracts main function for the function enclosure by means of regular expression
>>> cppFunction('''void mainfunction(arguments = values){
... content of function
... which spans
... several lines
... }''')
'\\n content of function\\n which spans\\n several lines\\n'
>>> cppFunction('''void mainfunction(arguments = values)
... {
... content of function
... which spans
... several lines
... }''')
'\\n content of function\\n which spans\\n several lines\\n'
>>> cppFunction('''void mainfunction(arguments = values
... morearguments = morevalues)
... {
... content of function
... which spans
... several lines
... }''')
'\\n content of function\\n which spans\\n several lines\\n'
"""
functionContentRe = re.compile(r'(?<=\{).*(?=^\})', flags = re.DOTALL | re.MULTILINE)
match = functionContentRe.search(text)
if match:
return match.group()
else:
return text
def cppComments(text):
"""
Converts comments delimited by // and on a new line into a markdown cell. For C++ files only.
>>> cppComments('''// This is a
... // multiline comment
... void function(){}''')
'# <markdowncell>\\n# This is a\\n# multiline comment\\n# <codecell>\\nvoid function(){}\\n'
>>> cppComments('''void function(){
... int variable = 5 // Comment not in cell
... // Comment also not in cell
... }''')
'void function(){\\n int variable = 5 // Comment not in cell\\n // Comment also not in cell\\n}\\n'
"""
text = text.splitlines()
newtext = ''
inComment = False
for line in text:
if line.startswith("//") and not inComment: # True if first line of comment
inComment = True
newtext += "# <markdowncell>\n"
if line[2:].lstrip().startswith("#"): # Don't use .capitalize() if line starts with hash, ie it is a header
newtext += ("# " + line[2:]+"\n")
else:
newtext += ("# " + line[2:].lstrip().capitalize()+"\n")
elif inComment and not line.startswith("//"): # True if first line after comment
inComment = False
newtext += "# <codecell>\n"
newtext += (line+"\n")
elif inComment and line.startswith("//"): # True if in the middle of a comment block
newtext += ("# " + line[2:] + "\n")
else:
newtext += (line+"\n")
return newtext
def split(text):
"""
Splits the text string into main, helpers, and rest. main is the main function,
i.e. the function tha thas the same name as the macro file. Helpers is a list of
strings, each a helper function, i.e. any other function that is not the main function.
Finally, rest is a string containing any top-level code outside of any function.
Comments immediately prior to a helper cell are converted into markdown cell,
added to the helper, and removed from rest.
Intended for C++ files only.
>>> split('''void tutorial(){
... content of tutorial
... }''')
('void tutorial(){\\n content of tutorial\\n}', [], '')
>>> split('''void tutorial(){
... content of tutorial
... }
... void helper(arguments = values){
... helper function
... content spans lines
... }''')
('void tutorial(){\\n content of tutorial\\n}', ['\\n# <markdowncell>\\n A helper function is created: \\n# <codecell>\\n%%cpp -d\\nvoid helper(arguments = values){\\n helper function\\n content spans lines\\n}'], '')
>>> split('''#include <header.h>
... using namespace NAMESPACE
... void tutorial(){
... content of tutorial
... }
... void helper(arguments = values){
... helper function
... content spans lines
... }''')
('void tutorial(){\\n content of tutorial\\n}', ['\\n# <markdowncell>\\n A helper function is created: \\n# <codecell>\\n%%cpp -d\\nvoid helper(arguments = values){\\n helper function\\n content spans lines\\n}'], '#include <header.h>\\nusing namespace NAMESPACE')
>>> split('''void tutorial(){
... content of tutorial
... }
... // This is a multiline
... // description of the
... // helper function
... void helper(arguments = values){
... helper function
... content spans lines
... }''')
('void tutorial(){\\n content of tutorial\\n}', ['\\n# <markdowncell>\\n This is a multiline\\n description of the\\n helper function\\n \\n# <codecell>\\n%%cpp -d\\nvoid helper(arguments = values){\\n helper function\\n content spans lines\\n}'], '')
"""
functionReString="("
for cpptype in gTypesList:
functionReString += ("^%s|") % cpptype
functionReString = functionReString[:-1] + r")\s?\*?&?\s?[\w:]*?\s?\([^\)]*\)\s*\{.*?^\}"
functionRe = re.compile(functionReString, flags = re.DOTALL | re.MULTILINE)
#functionre = re.compile(r'(^void|^int|^Int_t|^TF1|^string|^bool|^double|^float|^char|^TCanvas|^TTree|^TString|^TSeqCollection|^Double_t|^TFile|^Long64_t|^Bool_t)\s?\*?\s?[\w:]*?\s?\([^\)]*\)\s*\{.*?^\}', flags = re.DOTALL | re.MULTILINE)
functionMatches = functionRe.finditer(text)
helpers = []
main = ""
for matchString in [match.group() for match in functionMatches]:
if tutName == findFunctionName(matchString): # if the name of the function is that of the macro
main = matchString
else:
helpers.append(matchString)
# Create rest by replacing the main and helper functions with blank strings
rest = text.replace(main, "")
for helper in helpers:
rest = rest.replace(helper, "")
newHelpers = []
lines = text.splitlines()
for helper in helpers: # For each helper function
for i, line in enumerate(lines): # Look through the lines until the
if line.startswith(helper[:helper.find("\n")]): # first line of the helper is found
j = 1
commentList = []
while lines[i-j].startswith("//"): # Add comment lines immediately prior to list
commentList.append(lines[i-j])
j += 1
if commentList: # Convert list to string
commentList.reverse()
helperDescription = ''
for comment in commentList:
if comment in ("//", "// "):
helperDescription += "\n\n" # Two newlines to create hard break in Markdown
else:
helperDescription += (comment[2:] + "\n")
rest = rest.replace(comment, "")
break
else: # If no comments are found create generic description
helperDescription = "A helper function is created:"
break
if findFunctionName(helper) != "main": # remove void main function
newHelpers.append("\n# <markdowncell>\n " + helperDescription + " \n# <codecell>\n%%cpp -d\n" + helper)
rest = rest.rstrip("\n /") # remove newlines and empty comments at the end of string
return main, newHelpers, rest
def findFunctionName(text):
"""
Takes a string representation of a C++ function as an input,
finds and returns the name of the function
>>> findFunctionName('void functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void functionName (arguments = values){}')
'functionName'
>>> findFunctionName('void *functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void* functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void * functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void class::functionName(arguments = values){}')
'class::functionName'
"""
functionNameReString="(?<="
for cpptype in gTypesList:
functionNameReString += ("(?<=%s)|") % cpptype
functionNameReString = functionNameReString[:-1] + r")\s?\*?\s?[^\s]*?(?=\s?\()"
functionNameRe = re.compile(functionNameReString, flags = re.DOTALL | re.MULTILINE)
#functionnamere = re.compile(r'(?<=(?<=int)|(?<=void)|(?<=TF1)|(?<=Int_t)|(?<=string)|(?<=double)|(?<=Double_t)|(?<=float)|(?<=char)|(?<=TString)|(?<=bool)|(?<=TSeqCollection)|(?<=TCanvas)|(?<=TTree)|(?<=TFile)|(?<=Long64_t)|(?<=Bool_t))\s?\*?\s?[^\s]*?(?=\s?\()', flags = re.DOTALL | re.MULTILINE)
match = functionNameRe.search(text)
functionname = match.group().strip(" *\n")
return functionname
def processmain(text):
"""
Evaluates whether the main function returns a TCanvas or requires input. If it
does then the keepfunction flag is True, meaning the function wont be extracted
by cppFunction. If the initial condition is true then an extra cell is added
before at the end that calls the main function is returned, and added later.
>>> processmain('''void function(){
... content of function
... spanning several
... lines
... }''')
('void function(){\\n content of function\\n spanning several\\n lines\\n}', '')
>>> processmain('''void function(arguments = values){
... content of function
... spanning several
... lines
... }''')
('void function(arguments = values){\\n content of function\\n spanning several\\n lines\\n}', '# <markdowncell> \\n Arguments are defined. \\n# <codecell>\\narguments = values;\\n# <codecell>\\n')
>>> processmain('''void function(argument1 = value1, //comment 1
... argument2 = value2 /*comment 2*/ ,
... argument3 = value3,
... argument4 = value4)
... {
... content of function
... spanning several
... lines
... }''')
('void function(argument1 = value1, //comment 1\\n argument2 = value2 /*comment 2*/ ,\\n argument3 = value3, \\n argument4 = value4)\\n{\\n content of function\\n spanning several\\n lines\\n}', '# <markdowncell> \\n Arguments are defined. \\n# <codecell>\\nargument1 = value1;\\nargument2 = value2;\\nargument3 = value3;\\nargument4 = value4;\\n# <codecell>\\n')
>>> processmain('''TCanvas function(){
... content of function
... spanning several
... lines
... return c1
... }''')
('TCanvas function(){\\n content of function\\n spanning several \\n lines\\n return c1\\n}', '')
"""
argumentsCell = ''
if text:
argumentsre = re.compile(r'(?<=\().*?(?=\))', flags = re.DOTALL | re.MULTILINE)
arguments = argumentsre.search(text)
if len(arguments.group()) > 3:
argumentsCell = "# <markdowncell> \n Arguments are defined. \n# <codecell>\n"
individualArgumentre = re.compile(r'[^/\n,]*?=[^/\n,]*') #, flags = re.DOTALL) #| re.MULTILINE)
argumentList=individualArgumentre.findall(arguments.group())
for argument in argumentList:
argumentsCell += argument.strip("\n ") + ";\n"
argumentsCell += "# <codecell>\n"
return text, argumentsCell
# now define text transformers
def removePaletteEditor(code):
code = code.replace("img->StartPaletteEditor();", "")
code = code.replace("Open the color editor", "")
return code
def runEventExe(code):
if "copytree" in tutName:
return "# <codecell> \n.! $ROOTSYS/test/eventexe 1000 1 1 1 \n" + code
return code
def getLibMathMore(code):
if "quasirandom" == tutName:
return "# <codecell> \ngSystem->Load(\"libMathMore\"); \n# <codecell> \n" + code
return code
def roofitRemoveSpacesComments(code):
def changeString(matchObject):
matchString = matchObject.group()
matchString = matchString[0] + " " + matchString[1:]
matchString = matchString.replace(" " , "THISISASPACE")
matchString = matchString.replace(" " , "")
matchString = matchString.replace("THISISASPACE" , " ")
return matchString
newcode = re.sub("#\s\s?\w\s[\w-]\s\w.*", changeString , code)
return newcode
def declareNamespace(code):
if "using namespace RooFit;\nusing namespace RooStats;" in code:
code = code.replace("using namespace RooFit;\nusing namespace RooStats;", "# <codecell>\n%%cpp -d\n// This is a workaround to make sure the namespace is used inside functions\nusing namespace RooFit;\nusing namespace RooStats;\n# <codecell>\n")
else:
code = code.replace("using namespace RooFit;", "# <codecell>\n%%cpp -d\n// This is a workaround to make sure the namespace is used inside functions\nusing namespace RooFit;\n# <codecell>\n")
code = code.replace("using namespace RooStats;", "# <codecell>\n%%cpp -d\n// This is a workaround to make sure the namespace is used inside functions\nusing namespace RooStats;\n# <codecell>\n")
code = code.replace("using namespace ROOT::Math;", "# <codecell>\n%%cpp -d\n// This is a workaround to make sure the namespace is used inside functions\nusing namespace ROOT::Math;\n# <codecell>\n")
return code
def rs401dGetFiles(code):
if tutName == "rs401d_FeldmanCousins":
code = code.replace(
"""#if !defined(__CINT__) || defined(__MAKECINT__)\n#include "../tutorials/roostats/NuMuToNuE_Oscillation.h"\n#include "../tutorials/roostats/NuMuToNuE_Oscillation.cxx" // so that it can be executed directly\n#else\n#include "../tutorials/roostats/NuMuToNuE_Oscillation.cxx+" // so that it can be executed directly\n#endif""" , """TString tutDir = gROOT->GetTutorialDir();\nTString headerDir = TString::Format("#include \\\"%s/roostats/NuMuToNuE_Oscillation.h\\\"", tutDir.Data());\nTString impDir = TString::Format("#include \\\"%s/roostats/NuMuToNuE_Oscillation.cxx\\\"", tutDir.Data());\ngROOT->ProcessLine(headerDir);\ngROOT->ProcessLine(impDir);""")
return code
def declareIncludes(code):
if tutName != "fitcont":
code = re.sub(r"# <codecell>\s*#include", "# <codecell>\n%%cpp -d\n#include" , code)
return code
def tree4GetFiles(code):
if tutName == "tree4":
code = code.replace(
"""#include \"../test/Event.h\"""" , """# <codecell>\nTString dir = "$ROOTSYS/test/Event.h";\ngSystem->ExpandPathName(dir);\nTString includeCommand = TString::Format("#include \\\"%s\\\"" , dir.Data());\ngROOT->ProcessLine(includeCommand);""")
return code
def disableDrawProgressBar(code):
code = code.replace(":DrawProgressBar",":!DrawProgressBar")
return code
def fixes(code):
codeTransformers=[removePaletteEditor, runEventExe, getLibMathMore,
roofitRemoveSpacesComments, declareNamespace, rs401dGetFiles ,
declareIncludes, tree4GetFiles, disableDrawProgressBar]
for transformer in codeTransformers:
code = transformer(code)
return code
def changeMarkdown(code):
code = code.replace("~~~" , "```")
code = code.replace("{.cpp}", "cpp")
code = code.replace("{.bash}", "bash")
return code
def isCpp():
"""
Return True if extension is a C++ file
"""
return extension in ("C", "c", "cpp", "C++", "cxx")
def findTimeout():
listLongTutorials = ["OneSidedFrequentistUpperLimitWithBands", "StandardBayesianNumericalDemo",
"TwoSidedFrequentistUpperLimitWithBands" , "HybridStandardForm", "rs401d_FeldmanCousins",
"TMVAMultipleBackgroundExample", "TMVARegression", "TMVAClassification", "StandardHypoTestDemo"]
if tutName in listLongTutorials:
return 300
else:
return 90
# -------------------------------------
# ------------ Main Program------------
# -------------------------------------
def mainfunction(text):
"""
Main function. Calls all other functions, depending on whether the macro input is in python or c++.
It adds the header information. Also, it adds a cell that draws all canvases. The working text is
then converted to a version 3 jupyter notebook, subsequently updated to a version 4. Then, metadata
associated with the language the macro is written in is attatched to he notebook. Finally the
notebook is executed and output as a Jupyter notebook.
"""
# Modify text from macros to suit a notebook
if isCpp():
main, helpers, rest = split(text)
main, argumentsCell = processmain(main)
main = cppComments(unindenter(cppFunction(main))) # Remove function, Unindent, and convert comments to Markdown cells
if argumentsCell:
main = argumentsCell + main
rest = cppComments(rest) # Convert top level code comments to Markdown cells
# Construct text by starting with top level code, then the helper functions, and finally the main function.
# Also add cells for headerfile, or keepfunction
if needsHeaderFile:
text = "# <markdowncell>\n# The header file must be copied to the current directory\n# <codecell>\n.!cp %s%s.h .\n# <codecell>\n" % (tutRelativePath, tutName)
text += rest
else:
text = "# <codecell>\n" + rest
for helper in helpers:
text += helper
text += ("\n# <codecell>\n" + main)
if extension == "py":
text = pythonMainFunction(text)
text = pythonComments(text) # Convert comments into Markdown cells
# Perform last minute fixes to the notebook, used for specific fixes needed by some tutorials
text = fixes(text)
# Change to standard Markdown
newDescription = changeMarkdown(description)
# Add the title and header of the notebook
text = "# <markdowncell> \n# # %s\n%s# \n# \n# **Author:** %s \n# <i><small>This notebook tutorial was automatically generated " \
"with <a href= \"https://github.com/root-project/root/blob/master/documentation/doxygen/converttonotebook.py\">ROOTBOOK-izer (Beta)</a> " \
"from the macro found in the ROOT repository on %s.</small></i>\n# <codecell>\n%s" % (tutTitle, newDescription, author, date, text)
# Add cell at the end of the notebook that draws all the canvasses. Add a Markdown cell before explaining it.
if isJsroot and not nodraw:
if isCpp():
text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\n%jsroot on\ngROOT->GetListOfCanvases()->Draw()"
if extension == "py":
text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\n%jsroot on\nfrom ROOT import gROOT \ngROOT.GetListOfCanvases().Draw()"
elif not nodraw:
if isCpp():
text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\ngROOT->GetListOfCanvases()->Draw()"
if extension == "py":
text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\nfrom ROOT import gROOT \ngROOT.GetListOfCanvases().Draw()"
# Create a notebook from the working text
nbook = v3.reads_py(text)
nbook = v4.upgrade(nbook) # Upgrade v3 to v4
# Load notebook string into json format, essentially creating a dictionary
json_data = json.loads(v4.writes(nbook))
# add the corresponding metadata
if extension == "py":
json_data[u'metadata'] = {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.10"
}
}
elif isCpp():
json_data[u'metadata'] = {
"kernelspec": {
"display_name": "ROOT C++",
"language": "c++",
"name": "root"
},
"language_info": {
"codemirror_mode": "text/x-c++src",
"file_extension": ".C",
"mimetype": " text/x-c++src",
"name": "c++"
}
}
# write the json file with the metadata
with open(outPathName, 'w') as fout:
json.dump(json_data, fout, indent=1, sort_keys=True)
print(time.time() - starttime)
timeout = findTimeout()
# Call commmand that executes the notebook and creates a new notebook with the output
r = subprocess.call(["jupyter", "nbconvert", "--ExecutePreprocessor.timeout=%d" % timeout, "--to=notebook", "--execute", outPathName])
if r != 0:
sys.stderr.write("NOTEBOOK_CONVERSION_WARNING: Nbconvert failed for notebook %s with return code %s\n" %(outname,r))
# If notebook conversion did not work, try again without the option --execute
subprocess.call(["jupyter", "nbconvert", "--ExecutePreprocessor.timeout=%d" % timeout, "--to=notebook", outPathName])
else:
if isJsroot:
subprocess.call(["jupyter", "trust", os.path.join(outdir, outnameconverted)])
# Only remove notebook without output if nbconvert succeeds
os.remove(outPathName)
if __name__ == "__main__":
if str(sys.argv[1]) == "-test":
tutName = "tutorial"
doctest.testmod(verbose=True)
else:
# -------------------------------------
# ----- Preliminary definitions--------
# -------------------------------------
# Extract and define the name of the file as well as its derived names
tutPathName = str(sys.argv[1])
tutPath = os.path.dirname(tutPathName)
if tutPath.split("/")[-2] == "tutorials":
tutRelativePath = "$ROOTSYS/tutorials/%s/" % tutPath.split("/")[-1]
tutFileName = os.path.basename(tutPathName)
tutName, extension = tutFileName.split(".")
tutTitle = re.sub( r"([A-Z\d])", r" \1", tutName).title()
outname = tutFileName + ".ipynb"
outnameconverted = tutFileName + ".nbconvert.ipynb"
# Extract output directory
try:
outdir = str(sys.argv[2])
except:
outdir = tutPath
outPathName = os.path.join(outdir, outname)
# Find and define the time and date this script is run
date = datetime.now().strftime("%A, %B %d, %Y at %I:%M %p")
# -------------------------------------
# -------------------------------------
# -------------------------------------
# Set DYLD_LIBRARY_PATH. When run without root access or as a different user, especially from Mac systems,
# it is possible for security reasons that the environment does not include this definition, so it is manually defined.
os.environ["DYLD_LIBRARY_PATH"] = os.environ["ROOTSYS"] + "/lib"
# Open the file to be converted
with open(tutPathName) as fin:
text = fin.read()
# Extract information from header and remove header from text
if extension == "py":
text, description, author, isNotebook, isJsroot, nodraw, needsHeaderFile = readHeaderPython(text)
elif isCpp():
text, description, author, isNotebook, isJsroot, nodraw, needsHeaderFile = readHeaderCpp(text)
if isNotebook:
starttime = time.time()
mainfunction(text)
print(time.time() - starttime)
else:
pass
| lgpl-2.1 | -6,022,820,875,227,234,000 | 41.368296 | 663 | 0.594565 | false |
angelman/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link_unittest.py | 122 | 3414 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.layout_tests.reftests import extract_reference_link
class ExtractLinkMatchTest(unittest.TestCase):
def test_getExtractMatch(self):
html_1 = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>CSS Test: DESCRIPTION OF TEST</title>
<link rel="author" title="NAME_OF_AUTHOR"
href="mailto:EMAIL OR http://CONTACT_PAGE"/>
<link rel="help" href="RELEVANT_SPEC_SECTION"/>
<link rel="match" href="green-box-ref.xht" />
<link rel="match" href="blue-box-ref.xht" />
<link rel="mismatch" href="red-box-notref.xht" />
<link rel="mismatch" href="red-box-notref.xht" />
<meta name="flags" content="TOKENS" />
<meta name="assert" content="TEST ASSERTION"/>
<style type="text/css"><![CDATA[
CSS FOR TEST
]]></style>
</head>
<body>
CONTENT OF TEST
</body>
</html>
"""
matches, mismatches = extract_reference_link.get_reference_link(html_1)
self.assertItemsEqual(matches,
["green-box-ref.xht", "blue-box-ref.xht"])
self.assertItemsEqual(mismatches,
["red-box-notref.xht", "red-box-notref.xht"])
html_2 = ""
empty_tuple_1 = extract_reference_link.get_reference_link(html_2)
self.assertEqual(empty_tuple_1, ([], []))
# Link does not have a "ref" attribute.
html_3 = """<link href="RELEVANT_SPEC_SECTION"/>"""
empty_tuple_2 = extract_reference_link.get_reference_link(html_3)
self.assertEqual(empty_tuple_2, ([], []))
# Link does not have a "href" attribute.
html_4 = """<link rel="match"/>"""
empty_tuple_3 = extract_reference_link.get_reference_link(html_4)
self.assertEqual(empty_tuple_3, ([], []))
# Link does not have a "/" at the end.
html_5 = """<link rel="help" href="RELEVANT_SPEC_SECTION">"""
empty_tuple_4 = extract_reference_link.get_reference_link(html_5)
self.assertEqual(empty_tuple_4, ([], []))
| bsd-3-clause | 1,892,203,215,410,281,500 | 41.675 | 79 | 0.689221 | false |
untrustbank/litecoin | test/functional/feature_logging.py | 13 | 2462 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test debug logging."""
import os
from test_framework.test_framework import BitcoinTestFramework
class LoggingTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
# test default log file name
assert os.path.isfile(os.path.join(self.nodes[0].datadir, "regtest", "debug.log"))
# test alternative log file name in datadir
self.restart_node(0, ["-debuglogfile=foo.log"])
assert os.path.isfile(os.path.join(self.nodes[0].datadir, "regtest", "foo.log"))
# test alternative log file name outside datadir
tempname = os.path.join(self.options.tmpdir, "foo.log")
self.restart_node(0, ["-debuglogfile=%s" % tempname])
assert os.path.isfile(tempname)
# check that invalid log (relative) will cause error
invdir = os.path.join(self.nodes[0].datadir, "regtest", "foo")
invalidname = os.path.join("foo", "foo.log")
self.stop_node(0)
self.assert_start_raises_init_error(0, ["-debuglogfile=%s" % (invalidname)],
"Error: Could not open debug log file")
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (relative) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) will cause error
self.stop_node(0)
invdir = os.path.join(self.options.tmpdir, "foo")
invalidname = os.path.join(invdir, "foo.log")
self.assert_start_raises_init_error(0, ["-debuglogfile=%s" % invalidname],
"Error: Could not open debug log file")
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
if __name__ == '__main__':
LoggingTest().main()
| mit | -1,613,068,283,968,426,000 | 40.728814 | 90 | 0.622665 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/load_balancer_backend_address_pools_operations.py | 1 | 8115 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class LoadBalancerBackendAddressPoolsOperations(object):
"""LoadBalancerBackendAddressPoolsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-11-01"
self.config = config
def list(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancer backed address pools.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of BackendAddressPool
:rtype:
~azure.mgmt.network.v2017_11_01.models.BackendAddressPoolPaged[~azure.mgmt.network.v2017_11_01.models.BackendAddressPool]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.BackendAddressPoolPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.BackendAddressPoolPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'}
def get(
self, resource_group_name, load_balancer_name, backend_address_pool_name, custom_headers=None, raw=False, **operation_config):
"""Gets load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address
pool.
:type backend_address_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: BackendAddressPool or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_11_01.models.BackendAddressPool or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BackendAddressPool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'}
| mit | 1,469,604,475,419,676,000 | 45.371429 | 202 | 0.64313 | false |
ingadhoc/odoo | addons/l10n_be_coda/wizard/__init__.py | 439 | 1098 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_coda_import
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 7,707,345,081,814,983,000 | 42.92 | 78 | 0.619308 | false |
40223234/w16b_test | static/Brython3.1.1-20150328-091302/Lib/unittest/test/testmock/testwith.py | 739 | 5806 | import unittest
from warnings import catch_warnings
from unittest.test.testmock.support import is_instance
from unittest.mock import MagicMock, Mock, patch, sentinel, mock_open, call
something = sentinel.Something
something_else = sentinel.SomethingElse
class WithTest(unittest.TestCase):
def test_with_statement(self):
with patch('%s.something' % __name__, sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
self.assertEqual(something, sentinel.Something)
def test_with_statement_exception(self):
try:
with patch('%s.something' % __name__, sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
raise Exception('pow')
except Exception:
pass
else:
self.fail("patch swallowed exception")
self.assertEqual(something, sentinel.Something)
def test_with_statement_as(self):
with patch('%s.something' % __name__) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertTrue(is_instance(mock_something, MagicMock),
"patching wrong type")
self.assertEqual(something, sentinel.Something)
def test_patch_object_with_statement(self):
class Foo(object):
something = 'foo'
original = Foo.something
with patch.object(Foo, 'something'):
self.assertNotEqual(Foo.something, original, "unpatched")
self.assertEqual(Foo.something, original)
def test_with_statement_nested(self):
with catch_warnings(record=True):
with patch('%s.something' % __name__) as mock_something, patch('%s.something_else' % __name__) as mock_something_else:
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_with_statement_specified(self):
with patch('%s.something' % __name__, sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(mock_something, sentinel.Patched, "wrong patch")
self.assertEqual(something, sentinel.Something)
def testContextManagerMocking(self):
mock = Mock()
mock.__enter__ = Mock()
mock.__exit__ = Mock()
mock.__exit__.return_value = False
with mock as m:
self.assertEqual(m, mock.__enter__.return_value)
mock.__enter__.assert_called_with()
mock.__exit__.assert_called_with(None, None, None)
def test_context_manager_with_magic_mock(self):
mock = MagicMock()
with self.assertRaises(TypeError):
with mock:
'foo' + 3
mock.__enter__.assert_called_with()
self.assertTrue(mock.__exit__.called)
def test_with_statement_same_attribute(self):
with patch('%s.something' % __name__, sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('%s.something' % __name__) as mock_again:
self.assertEqual(something, mock_again, "unpatched")
self.assertEqual(something, mock_something,
"restored with wrong instance")
self.assertEqual(something, sentinel.Something, "not restored")
def test_with_statement_imbricated(self):
with patch('%s.something' % __name__) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('%s.something_else' % __name__) as mock_something_else:
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_dict_context_manager(self):
foo = {}
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
self.assertEqual(foo, {})
with self.assertRaises(NameError):
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
raise NameError('Konrad')
self.assertEqual(foo, {})
class TestMockOpen(unittest.TestCase):
def test_mock_open(self):
mock = mock_open()
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_mock_open_context_manager(self):
mock = mock_open()
handle = mock.return_value
with patch('%s.open' % __name__, mock, create=True):
with open('foo') as f:
f.read()
expected_calls = [call('foo'), call().__enter__(), call().read(),
call().__exit__(None, None, None)]
self.assertEqual(mock.mock_calls, expected_calls)
self.assertIs(f, handle)
def test_explicit_mock(self):
mock = MagicMock()
mock_open(mock)
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_read_data(self):
mock = mock_open(read_data='foo')
with patch('%s.open' % __name__, mock, create=True):
h = open('bar')
result = h.read()
self.assertEqual(result, 'foo')
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | 2,412,066,780,545,380,000 | 31.988636 | 130 | 0.593352 | false |
zeimusu/D1 | nummeth.py | 1 | 1770 | def sample_cubic(x):
return x**3 - 2*x - 1
def Dsample_cubic(x):
return 3*x**2 - 2
def sign(x):
if x<0:
return -1
else:
return 1
def bisection(f,a,b,fa,fb):
"""given a function, f, and an interval [a,b] in which f changes sign
return a new interval [x,y] in which f changes sign, and the values of
f at the end points """
midpoint = (a + b)/2
fm = f(midpoint)
if sign(fa) == sign(fm):
return midpoint,b,fm,fb
else:
return a,midpoint,fa,fm
def interpolation(f,a,b,fa,fb):
"""given a function and an interval [a, b] in which f changes sign,
return a new interval [x,y] in which one endpoint is found by
interpolation and f changes sign"""
x = (a - b) * fa / (fb - fa) + a
fx = f(x)
if sign(fx) == sign(fa):
return x,b,fx,fb,x
else:
return a,x,fa,fx,x
def NR(f,df,x):
""""
Newton Raphson method, given a function and its derivative, and an
initial estimate use Newton-Raphson to return an improved estimate
"""
return x - f(x)/df(x)
def test():
f = sample_cubic
df = Dsample_cubic
A, B = 1, 2
loops = 10
solution = 1.618033988749895
print("Bisection")
a, b= A, B
fa = f(a)
fb = f(b)
for i in range(loops):
a, b, fa, fb = bisection(f,a,b,fa,fb)
print( a, b, 100*abs(a - solution)/solution )
print()
print("interpolation")
a, b =A, B
fa, fb = f(a), f(b)
for i in range(loops):
a, b,fa,fb,x = interpolation(f,a,b,fa,fb)
print(x, 100*abs(x-solution)/solution)
print()
print("Newton Raphson")
x = A
for i in range(loops):
x = NR(f,df,x)
print(x, 100*abs(x-solution)/solution)
test()
| gpl-3.0 | -8,754,877,294,183,066,000 | 21.987013 | 75 | 0.554802 | false |
HPPTECH/hpp_IOSTressTest | Refer/IOST_OLD_SRC/IOST_0.17/Libs/IOST_AboutDialog.py | 2 | 2723 | #!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : IOST_AboutDialog.py
# Date : Sep 21, 2016
# Author : HuuHoang Nguyen
# Contact : [email protected]
# : [email protected]
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import re
import operator
import sys
import base64
import time
from IOST_Prepare import IOST_Prepare
from IOST_Config import *
import gtk
import gtk.glade
class IOST_AboutDialog():
def __init__(self, glade_filename, window_name, object_name ,main_builder):
"This is a function get of Diaglog Help -> About Window"
self.IOST_AboutDialog_window_name = window_name
self.IOST_AboutDialog_object_name = object_name
if not main_builder:
self.IOST_AboutDialog_Builder = gtk.Builder()
self.IOST_AboutDialog_Builder.add_from_file(glade_filename)
self.IOST_AboutDialog_Builder.connect_signals(self)
else:
self.IOST_AboutDialog_Builder = main_builder
self.IOST_Objs[window_name][window_name+ object_name] = self.IOST_AboutDialog_Builder.get_object(window_name+object_name)
self.IOST_Objs[window_name][window_name+ object_name].set_version(self.IOST_Data["ProjectVersion"])
def Run(self, window_name, object_name):
self.IOST_Objs[window_name][window_name+object_name].run()
self.IOST_Objs[window_name][window_name+object_name].hide()
def ActiveLink(self, object_name):
self.IOST_Objs[self.IOST_AboutDialog_window_name][self.IOST_AboutDialog_window_name+ self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_destroy(self, object, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_window_name][self.IOST_AboutDialog_window_name+self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_DialogActionArea_destroy(self, object, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_window_name][self.IOST_AboutDialog_window_name+self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_button_press_event(self, widget, event, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_window_name][self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_DialogVB_button_press_event(self, widget, event, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_window_name][self.IOST_AboutDialog_window_name+ self.IOST_AboutDialog_objectt_name].hide()
| mit | -4,751,123,583,145,951,000 | 40.257576 | 135 | 0.663974 | false |
pietern/caffe2 | caffe2/python/helpers/conv.py | 3 | 11062 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package conv
# Module caffe2.python.helpers.conv
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags
def _ConvBase(
model,
is_nd,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
WeightInitializer=None,
BiasInitializer=None,
group=1,
transform_inputs=None,
use_cudnn=False,
order="NCHW",
cudnn_exhaustive_search=False,
ws_nbytes_limit=None,
**kwargs
):
kernels = []
if is_nd:
if not isinstance(kernel, list):
kernels = [kernel]
else:
kernels = kernel
else:
if isinstance(kernel, list):
assert len(kernel) == 2, "Conv support only a 2D kernel."
kernels = kernel
else:
kernels = [kernel] * 2
requested_engine = kwargs.get('engine')
if requested_engine is not None:
if use_cudnn and requested_engine != 'CUDNN':
raise ValueError(
'When use_cudnn=True, the only engine you can specify is '
'"CUDNN"')
elif not use_cudnn and requested_engine == 'CUDNN':
raise ValueError(
'When use_cudnn=False, the only engine you can specify is '
'""')
if use_cudnn:
kwargs['engine'] = 'CUDNN'
kwargs['exhaustive_search'] = cudnn_exhaustive_search
if ws_nbytes_limit:
kwargs['ws_nbytes_limit'] = ws_nbytes_limit
use_bias =\
False if ("no_bias" in kwargs and kwargs["no_bias"]) else True
blob_out = blob_out or model.net.NextName()
weight_shape = [dim_out]
if order == "NCHW":
weight_shape.append(int(dim_in / group))
weight_shape.extend(kernels)
else:
weight_shape.extend(kernels)
weight_shape.append(int(dim_in / group))
WeightInitializer = initializers.update_initializer(
WeightInitializer, weight_init, ("XavierFill", {})
)
BiasInitializer = initializers.update_initializer(
BiasInitializer, bias_init, ("ConstantFill", {})
)
if not model.init_params:
WeightInitializer = initializers.ExternalInitializer()
BiasInitializer = initializers.ExternalInitializer()
weight = model.create_param(
param_name=blob_out + '_w',
shape=weight_shape,
initializer=WeightInitializer,
tags=ParameterTags.WEIGHT
)
if use_bias:
bias = model.create_param(
param_name=blob_out + '_b',
shape=[dim_out, ],
initializer=BiasInitializer,
tags=ParameterTags.BIAS
)
if use_bias:
inputs = [blob_in, weight, bias]
else:
inputs = [blob_in, weight]
if transform_inputs is not None:
transform_inputs(model, blob_out, inputs)
# For the operator, we no longer need to provide the no_bias field
# because it can automatically figure this out from the number of
# inputs.
if 'no_bias' in kwargs:
del kwargs['no_bias']
if group != 1:
kwargs['group'] = group
if is_nd:
return model.net.Conv(
inputs,
blob_out,
kernels=kernels,
order=order,
**kwargs)
else:
if isinstance(kernel, list):
return model.net.Conv(
inputs,
blob_out,
kernel_h=kernel[0],
kernel_w=kernel[1],
order=order,
**kwargs)
else:
return model.net.Conv(
inputs,
blob_out,
kernel=kernel,
order=order,
**kwargs)
def conv_nd(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
WeightInitializer=None,
BiasInitializer=None,
group=1,
transform_inputs=None,
order="NCHW",
**kwargs
):
"""N-dimensional convolution for inputs with NCHW storage order.
"""
assert order == "NCHW", "ConvNd only supported for NCHW storage."
return _ConvBase(model, True, blob_in, blob_out, dim_in, dim_out, kernel,
weight_init, bias_init, WeightInitializer, BiasInitializer,
group, transform_inputs, order=order, **kwargs)
def conv(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
WeightInitializer=None,
BiasInitializer=None,
group=1,
transform_inputs=None,
**kwargs
):
"""2-dimensional convolution.
"""
return _ConvBase(model, False, blob_in, blob_out, dim_in, dim_out, kernel,
weight_init, bias_init, WeightInitializer, BiasInitializer,
group, transform_inputs, **kwargs)
def conv_transpose(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
use_cudnn=False,
order="NCHW",
cudnn_exhaustive_search=False,
ws_nbytes_limit=None,
**kwargs
):
"""ConvTranspose.
"""
weight_init = weight_init if weight_init else ('XavierFill', {})
bias_init = bias_init if bias_init else ('ConstantFill', {})
blob_out = blob_out or model.net.NextName()
weight_shape = (
[dim_in, dim_out, kernel, kernel]
if order == "NCHW" else [dim_in, kernel, kernel, dim_out]
)
if model.init_params:
weight = model.param_init_net.__getattr__(weight_init[0])(
[],
blob_out + '_w',
shape=weight_shape,
**weight_init[1]
)
bias = model.param_init_net.__getattr__(bias_init[0])(
[],
blob_out + '_b',
shape=[dim_out, ],
**bias_init[1]
)
else:
weight = core.ScopedBlobReference(
blob_out + '_w', model.param_init_net)
bias = core.ScopedBlobReference(
blob_out + '_b', model.param_init_net)
model.AddParameter(weight, ParameterTags.WEIGHT)
model.AddParameter(bias, ParameterTags.BIAS)
if use_cudnn:
kwargs['engine'] = 'CUDNN'
kwargs['exhaustive_search'] = cudnn_exhaustive_search
if ws_nbytes_limit:
kwargs['ws_nbytes_limit'] = ws_nbytes_limit
return model.net.ConvTranspose(
[blob_in, weight, bias],
blob_out,
kernel=kernel,
order=order,
**kwargs
)
def group_conv(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
group=1,
**kwargs
):
"""Group Convolution.
This is essentially the same as Conv with a group argument passed in.
We specialize this for backward interface compatibility.
"""
return conv(model, blob_in, blob_out, dim_in, dim_out, kernel,
weight_init=weight_init, bias_init=bias_init,
group=group, **kwargs)
def group_conv_deprecated(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
group=1,
use_cudnn=False,
order="NCHW",
cudnn_exhaustive_search=False,
ws_nbytes_limit=None,
**kwargs
):
"""GroupConvolution's deprecated interface.
This is used to simulate a group convolution via split and concat. You
should always use the new group convolution in your new code.
"""
weight_init = weight_init if weight_init else ('XavierFill', {})
bias_init = bias_init if bias_init else ('ConstantFill', {})
use_bias = False if ("no_bias" in kwargs and kwargs["no_bias"]) else True
if use_cudnn:
kwargs['engine'] = 'CUDNN'
kwargs['exhaustive_search'] = cudnn_exhaustive_search
if ws_nbytes_limit:
kwargs['ws_nbytes_limit'] = ws_nbytes_limit
if dim_in % group:
raise ValueError("dim_in should be divisible by group.")
if dim_out % group:
raise ValueError("dim_out should be divisible by group.")
splitted_blobs = model.net.DepthSplit(
blob_in,
['_' + blob_out + '_gconv_split_' + str(i) for i in range(group)],
dimensions=[int(dim_in / group) for i in range(group)],
order=order
)
weight_shape = (
[dim_out / group, dim_in / group, kernel, kernel]
if order == "NCHW" else
[dim_out / group, kernel, kernel, dim_in / group]
)
# Make sure that the shapes are of int format. Especially for py3 where
# int division gives float output.
weight_shape = [int(v) for v in weight_shape]
conv_blobs = []
for i in range(group):
if model.init_params:
weight = model.param_init_net.__getattr__(weight_init[0])(
[],
blob_out + '_gconv_%d_w' % i,
shape=weight_shape,
**weight_init[1]
)
if use_bias:
bias = model.param_init_net.__getattr__(bias_init[0])(
[],
blob_out + '_gconv_%d_b' % i,
shape=[int(dim_out / group)],
**bias_init[1]
)
else:
weight = core.ScopedBlobReference(
blob_out + '_gconv_%d_w' % i, model.param_init_net)
if use_bias:
bias = core.ScopedBlobReference(
blob_out + '_gconv_%d_b' % i, model.param_init_net)
model.AddParameter(weight, ParameterTags.WEIGHT)
if use_bias:
model.AddParameter(bias, ParameterTags.BIAS)
if use_bias:
inputs = [weight, bias]
else:
inputs = [weight]
if 'no_bias' in kwargs:
del kwargs['no_bias']
conv_blobs.append(
splitted_blobs[i].Conv(
inputs,
blob_out + '_gconv_%d' % i,
kernel=kernel,
order=order,
**kwargs
)
)
concat, concat_dims = model.net.Concat(
conv_blobs,
[blob_out,
"_" + blob_out + "_concat_dims"],
order=order
)
return concat
| apache-2.0 | 3,940,835,675,468,680,700 | 28.736559 | 80 | 0.564545 | false |
diofeher/django-nfa | django/core/management/commands/runserver.py | 16 | 3339 | from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import os
import sys
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
)
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, addrport='', *args, **options):
import django
from django.core.servers.basehttp import run, AdminMediaHandler, WSGIServerException
from django.core.handlers.wsgi import WSGIHandler
if args:
raise CommandError('Usage is runserver %s' % self.args)
if not addrport:
addr = ''
port = '8000'
else:
try:
addr, port = addrport.split(':')
except ValueError:
addr, port = '', addrport
if not addr:
addr = '127.0.0.1'
if not port.isdigit():
raise CommandError("%r is not a valid port number." % port)
use_reloader = options.get('use_reloader', True)
admin_media_path = options.get('admin_media_path', '')
shutdown_message = options.get('shutdown_message', '')
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
def inner_run():
from django.conf import settings
print "Validating models..."
self.validate(display_num_errors=True)
print "\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE)
print "Development server is running at http://%s:%s/" % (addr, port)
print "Quit the server with %s." % quit_command
try:
path = admin_media_path or django.__path__[0] + '/contrib/admin/media'
handler = AdminMediaHandler(WSGIHandler(), path)
run(addr, int(port), handler)
except WSGIServerException, e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
13: "You don't have permission to access that port.",
98: "That port is already in use.",
99: "That IP address can't be assigned-to.",
}
try:
error_text = ERRORS[e.args[0].args[0]]
except (AttributeError, KeyError):
error_text = str(e)
sys.stderr.write(self.style.ERROR("Error: %s" % error_text) + '\n')
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
print shutdown_message
sys.exit(0)
if use_reloader:
from django.utils import autoreload
autoreload.main(inner_run)
else:
inner_run()
| bsd-3-clause | 5,220,049,861,294,354,000 | 42.363636 | 109 | 0.566637 | false |
yufengg/tensorflow | tensorflow/contrib/boosted_trees/python/ops/model_ops.py | 17 | 5122 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model ops python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.python.ops import gen_model_ops
from tensorflow.contrib.boosted_trees.python.ops.gen_model_ops import tree_ensemble_deserialize
from tensorflow.contrib.boosted_trees.python.ops.gen_model_ops import tree_ensemble_serialize
# pylint: disable=unused-import
from tensorflow.contrib.boosted_trees.python.ops.gen_model_ops import tree_ensemble_stamp_token
# pylint: enable=unused-import
from tensorflow.contrib.util import loader
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver
ops.NotDifferentiable("TreeEnsembleVariable")
ops.NotDifferentiable("TreeEnsembleSerialize")
ops.NotDifferentiable("TreeEnsembleDeserialize")
class TreeEnsembleVariableSavable(saver.BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for TreeEnsembleVariable."""
def __init__(self, tree_ensemble_handle, create_op, name):
"""Creates a TreeEnsembleVariableSavable object.
Args:
tree_ensemble_handle: handle to the tree ensemble variable.
create_op: the op to initialize the variable.
name: the name to save the tree ensemble variable under.
"""
stamp_token, ensemble_config = tree_ensemble_serialize(tree_ensemble_handle)
# slice_spec is useful for saving a slice from a variable.
# It's not meaningful the tree ensemble variable. So we just pass an empty
# value.
slice_spec = ""
specs = [
saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec,
name + "_stamp"),
saver.BaseSaverBuilder.SaveSpec(ensemble_config, slice_spec,
name + "_config"),
]
super(TreeEnsembleVariableSavable,
self).__init__(tree_ensemble_handle, specs, name)
self._tree_ensemble_handle = tree_ensemble_handle
self._create_op = create_op
def restore(self, restored_tensors, unused_restored_shapes):
"""Restores the associated tree ensemble from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint.
unused_restored_shapes: the shapes this object should conform to after
restore. Not meaningful for trees.
Returns:
The operation that restores the state of the tree ensemble variable.
"""
with ops.control_dependencies([self._create_op]):
return tree_ensemble_deserialize(
self._tree_ensemble_handle,
stamp_token=restored_tensors[0],
tree_ensemble_config=restored_tensors[1])
def tree_ensemble_variable(stamp_token,
tree_ensemble_config,
name,
container=None):
r"""Creates a tree ensemble model and returns a handle to it.
Args:
stamp_token: The initial stamp token value for the ensemble resource.
tree_ensemble_config: A `Tensor` of type `string`.
Serialized proto of the tree ensemble.
name: A name for the ensemble variable.
container: An optional `string`. Defaults to `""`.
Returns:
A `Tensor` of type mutable `string`. The handle to the tree ensemble.
"""
with ops.name_scope(name, "TreeEnsembleVariable") as name:
resource_handle = gen_model_ops.decision_tree_ensemble_resource_handle_op(
container, shared_name=name, name=name)
create_op = gen_model_ops.create_tree_ensemble_variable(
resource_handle, stamp_token, tree_ensemble_config)
is_initialized_op = gen_model_ops.tree_ensemble_is_initialized_op(
resource_handle)
# Adds the variable to the savable list.
saveable = TreeEnsembleVariableSavable(resource_handle, create_op,
resource_handle.name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
resources.register_resource(resource_handle, create_op, is_initialized_op)
return resource_handle
# Conditionally load ops, they might already be statically linked in.
try:
_model_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_model_ops.so"))
except (errors.NotFoundError, IOError):
print("Error loading _model_ops.so")
| apache-2.0 | -388,517,911,334,306,400 | 42.042017 | 95 | 0.70285 | false |
aavanian/bokeh | examples/app/crossfilter/main.py | 5 | 2462 | import pandas as pd
from bokeh.layouts import row, widgetbox
from bokeh.models import Select
from bokeh.palettes import Spectral5
from bokeh.plotting import curdoc, figure
from bokeh.sampledata.autompg import autompg_clean as df
df = df.copy()
SIZES = list(range(6, 22, 3))
COLORS = Spectral5
N_SIZES = len(SIZES)
N_COLORS = len(COLORS)
# data cleanup
df.cyl = df.cyl.astype(str)
df.yr = df.yr.astype(str)
del df['name']
columns = sorted(df.columns)
discrete = [x for x in columns if df[x].dtype == object]
continuous = [x for x in columns if x not in discrete]
def create_figure():
xs = df[x.value].values
ys = df[y.value].values
x_title = x.value.title()
y_title = y.value.title()
kw = dict()
if x.value in discrete:
kw['x_range'] = sorted(set(xs))
if y.value in discrete:
kw['y_range'] = sorted(set(ys))
kw['title'] = "%s vs %s" % (x_title, y_title)
p = figure(plot_height=600, plot_width=800, tools='pan,box_zoom,hover,reset', **kw)
p.xaxis.axis_label = x_title
p.yaxis.axis_label = y_title
if x.value in discrete:
p.xaxis.major_label_orientation = pd.np.pi / 4
sz = 9
if size.value != 'None':
if len(set(df[size.value])) > N_SIZES:
groups = pd.qcut(df[size.value].values, N_SIZES, duplicates='drop')
else:
groups = pd.Categorical(df[size.value])
sz = [SIZES[xx] for xx in groups.codes]
c = "#31AADE"
if color.value != 'None':
if len(set(df[color.value])) > N_SIZES:
groups = pd.qcut(df[color.value].values, N_COLORS, duplicates='drop')
else:
groups = pd.Categorical(df[color.value])
c = [COLORS[xx] for xx in groups.codes]
p.circle(x=xs, y=ys, color=c, size=sz, line_color="white", alpha=0.6, hover_color='white', hover_alpha=0.5)
return p
def update(attr, old, new):
layout.children[1] = create_figure()
x = Select(title='X-Axis', value='mpg', options=columns)
x.on_change('value', update)
y = Select(title='Y-Axis', value='hp', options=columns)
y.on_change('value', update)
size = Select(title='Size', value='None', options=['None'] + continuous)
size.on_change('value', update)
color = Select(title='Color', value='None', options=['None'] + continuous)
color.on_change('value', update)
controls = widgetbox([x, y, color, size], width=200)
layout = row(controls, create_figure())
curdoc().add_root(layout)
curdoc().title = "Crossfilter"
| bsd-3-clause | -6,240,720,743,522,132,000 | 27.627907 | 111 | 0.636474 | false |
espadrine/opera | chromium/src/tools/swarm_client/isolateserver_archive.py | 2 | 14494 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Archives a set of files to a server."""
import binascii
import cStringIO
import hashlib
import itertools
import logging
import optparse
import os
import sys
import time
import urllib
import zlib
import run_isolated
import run_test_cases
# The minimum size of files to upload directly to the blobstore.
MIN_SIZE_FOR_DIRECT_BLOBSTORE = 20 * 1024
# The number of files to check the isolate server per /contains query. The
# number here is a trade-off; the more per request, the lower the effect of HTTP
# round trip latency and TCP-level chattiness. On the other hand, larger values
# cause longer lookups, increasing the initial latency to start uploading, which
# is especially an issue for large files. This value is optimized for the "few
# thousands files to look up with minimal number of large files missing" case.
ITEMS_PER_CONTAINS_QUERY = 100
# A list of already compressed extension types that should not receive any
# compression before being uploaded.
ALREADY_COMPRESSED_TYPES = [
'7z', 'avi', 'cur', 'gif', 'h264', 'jar', 'jpeg', 'jpg', 'pdf', 'png',
'wav', 'zip'
]
def randomness():
"""Generates low-entropy randomness for MIME encoding.
Exists so it can be mocked out in unit tests.
"""
return str(time.time())
def encode_multipart_formdata(fields, files,
mime_mapper=lambda _: 'application/octet-stream'):
"""Encodes a Multipart form data object.
Args:
fields: a sequence (name, value) elements for
regular form fields.
files: a sequence of (name, filename, value) elements for data to be
uploaded as files.
mime_mapper: function to return the mime type from the filename.
Returns:
content_type: for httplib.HTTP instance
body: for httplib.HTTP instance
"""
boundary = hashlib.md5(randomness()).hexdigest()
body_list = []
for (key, value) in fields:
if isinstance(key, unicode):
value = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
body_list.append('--' + boundary)
body_list.append('Content-Disposition: form-data; name="%s"' % key)
body_list.append('')
body_list.append(value)
body_list.append('--' + boundary)
body_list.append('')
for (key, filename, value) in files:
if isinstance(key, unicode):
value = key.encode('utf-8')
if isinstance(filename, unicode):
value = filename.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
body_list.append('--' + boundary)
body_list.append('Content-Disposition: form-data; name="%s"; '
'filename="%s"' % (key, filename))
body_list.append('Content-Type: %s' % mime_mapper(filename))
body_list.append('')
body_list.append(value)
body_list.append('--' + boundary)
body_list.append('')
if body_list:
body_list[-2] += '--'
body = '\r\n'.join(body_list)
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, body
def sha1_file(filepath):
"""Calculates the SHA-1 of a file without reading it all in memory at once."""
digest = hashlib.sha1()
with open(filepath, 'rb') as f:
while True:
# Read in 1mb chunks.
chunk = f.read(1024*1024)
if not chunk:
break
digest.update(chunk)
return digest.hexdigest()
def url_open(url, **kwargs):
result = run_isolated.url_open(url, **kwargs)
if not result:
# If we get no response from the server, assume it is down and raise an
# exception.
raise run_isolated.MappingError('Unable to connect to server %s' % url)
return result
def upload_hash_content_to_blobstore(
generate_upload_url, data, hash_key, content):
"""Uploads the given hash contents directly to the blobsotre via a generated
url.
Arguments:
generate_upload_url: The url to get the new upload url from.
data: extra POST data.
hash_key: sha1 of the uncompressed version of content.
content: The contents to upload. Must fit in memory for now.
"""
logging.debug('Generating url to directly upload file to blobstore')
assert isinstance(hash_key, str), hash_key
assert isinstance(content, str), (hash_key, content)
# TODO(maruel): Support large files. This would require streaming support.
content_type, body = encode_multipart_formdata(
data, [('content', hash_key, content)])
for attempt in xrange(run_isolated.URL_OPEN_MAX_ATTEMPTS):
# Retry HTTP 50x here.
response = run_isolated.url_open(generate_upload_url, data=data)
if not response:
raise run_isolated.MappingError(
'Unable to connect to server %s' % generate_upload_url)
upload_url = response.read()
# Do not retry this request on HTTP 50x. Regenerate an upload url each time
# since uploading "consumes" the upload url.
result = run_isolated.url_open(
upload_url, data=body, content_type=content_type, retry_50x=False)
if result:
return result.read()
if attempt != run_isolated.URL_OPEN_MAX_ATTEMPTS - 1:
run_isolated.HttpService.sleep_before_retry(attempt, None)
raise run_isolated.MappingError(
'Unable to connect to server %s' % generate_upload_url)
class UploadRemote(run_isolated.Remote):
def __init__(self, namespace, base_url, token):
self.namespace = str(namespace)
self._token = token
super(UploadRemote, self).__init__(base_url)
def get_file_handler(self, base_url):
base_url = str(base_url)
def upload_file(content, hash_key):
# TODO(maruel): Detect failures.
hash_key = str(hash_key)
content_url = base_url.rstrip('/') + '/content/'
if len(content) > MIN_SIZE_FOR_DIRECT_BLOBSTORE:
url = '%sgenerate_blobstore_url/%s/%s' % (
content_url, self.namespace, hash_key)
# self._token is stored already quoted but it is unnecessary here, and
# only here.
data = [('token', urllib.unquote(self._token))]
upload_hash_content_to_blobstore(url, data, hash_key, content)
else:
url = '%sstore/%s/%s?token=%s' % (
content_url, self.namespace, hash_key, self._token)
url_open(url, data=content, content_type='application/octet-stream')
return upload_file
def check_files_exist_on_server(query_url, queries):
"""Queries the server to see which files from this batch already exist there.
Arguments:
queries: The hash files to potential upload to the server.
Returns:
missing_files: list of files that are missing on the server.
"""
logging.info('Checking existence of %d files...', len(queries))
body = ''.join(
(binascii.unhexlify(meta_data['h']) for (_, meta_data) in queries))
assert (len(body) % 20) == 0, repr(body)
response = url_open(
query_url, data=body, content_type='application/octet-stream').read()
if len(queries) != len(response):
raise run_isolated.MappingError(
'Got an incorrect number of responses from the server. Expected %d, '
'but got %d' % (len(queries), len(response)))
missing_files = [
queries[i] for i, flag in enumerate(response) if flag == chr(0)
]
logging.info('Queried %d files, %d cache hit',
len(queries), len(queries) - len(missing_files))
return missing_files
def compression_level(filename):
"""Given a filename calculates the ideal compression level to use."""
file_ext = os.path.splitext(filename)[1].lower()
# TODO(csharp): Profile to find what compression level works best.
return 0 if file_ext in ALREADY_COMPRESSED_TYPES else 7
def read_and_compress(filepath, level):
"""Reads a file and returns its content gzip compressed."""
compressor = zlib.compressobj(level)
compressed_data = cStringIO.StringIO()
with open(filepath, 'rb') as f:
while True:
chunk = f.read(run_isolated.ZIPPED_FILE_CHUNK)
if not chunk:
break
compressed_data.write(compressor.compress(chunk))
compressed_data.write(compressor.flush(zlib.Z_FINISH))
value = compressed_data.getvalue()
compressed_data.close()
return value
def zip_and_trigger_upload(infile, metadata, upload_function):
# TODO(csharp): Fix crbug.com/150823 and enable the touched logic again.
# if not metadata['T']:
compressed_data = read_and_compress(infile, compression_level(infile))
priority = (
run_isolated.Remote.HIGH if metadata.get('priority', '1') == '0'
else run_isolated.Remote.MED)
return upload_function(priority, compressed_data, metadata['h'], None)
def batch_files_for_check(infiles):
"""Splits list of files to check for existence on the server into batches.
Each batch corresponds to a single 'exists?' query to the server.
Yields:
batches: list of batches, each batch is a list of files.
"""
# TODO(maruel): Make this adaptative, e.g. only query a few, like 10 in one
# request, for the largest files, since they are the ones most likely to be
# missing, then batch larger requests (up to 500) for the tail since they are
# likely to be present.
next_queries = []
items = ((k, v) for k, v in infiles.iteritems() if 's' in v)
for relfile, metadata in sorted(items, key=lambda x: -x[1]['s']):
next_queries.append((relfile, metadata))
if len(next_queries) == ITEMS_PER_CONTAINS_QUERY:
yield next_queries
next_queries = []
if next_queries:
yield next_queries
def get_files_to_upload(contains_hash_url, infiles):
"""Yields files that are missing on the server."""
with run_isolated.ThreadPool(1, 16, 0, prefix='get_files_to_upload') as pool:
for files in batch_files_for_check(infiles):
pool.add_task(0, check_files_exist_on_server, contains_hash_url, files)
for missing_file in itertools.chain.from_iterable(pool.iter_results()):
yield missing_file
def upload_sha1_tree(base_url, indir, infiles, namespace):
"""Uploads the given tree to the given url.
Arguments:
base_url: The base url, it is assume that |base_url|/has/ can be used to
query if an element was already uploaded, and |base_url|/store/
can be used to upload a new element.
indir: Root directory the infiles are based in.
infiles: dict of files to upload files from |indir| to |base_url|.
namespace: The namespace to use on the server.
"""
logging.info('upload tree(base_url=%s, indir=%s, files=%d)' %
(base_url, indir, len(infiles)))
assert base_url.startswith('http'), base_url
base_url = base_url.rstrip('/')
# TODO(maruel): Make this request much earlier asynchronously while the files
# are being enumerated.
token = urllib.quote(url_open(base_url + '/content/get_token').read())
# Create a pool of workers to zip and upload any files missing from
# the server.
num_threads = run_test_cases.num_processors()
zipping_pool = run_isolated.ThreadPool(min(2, num_threads),
num_threads, 0, 'zip')
remote_uploader = UploadRemote(namespace, base_url, token)
# Starts the zip and upload process for files that are missing
# from the server.
contains_hash_url = '%s/content/contains/%s?token=%s' % (
base_url, namespace, token)
uploaded = []
for relfile, metadata in get_files_to_upload(contains_hash_url, infiles):
infile = os.path.join(indir, relfile)
zipping_pool.add_task(0, zip_and_trigger_upload, infile, metadata,
remote_uploader.add_item)
uploaded.append((relfile, metadata))
logging.info('Waiting for all files to finish zipping')
zipping_pool.join()
zipping_pool.close()
logging.info('All files zipped.')
logging.info('Waiting for all files to finish uploading')
# Will raise if any exception occurred.
remote_uploader.join()
remote_uploader.close()
logging.info('All files are uploaded')
total = len(infiles)
total_size = sum(metadata.get('s', 0) for metadata in infiles.itervalues())
logging.info(
'Total: %6d, %9.1fkb',
total,
sum(m.get('s', 0) for m in infiles.itervalues()) / 1024.)
cache_hit = set(infiles.iterkeys()) - set(x[0] for x in uploaded)
cache_hit_size = sum(infiles[i].get('s', 0) for i in cache_hit)
logging.info(
'cache hit: %6d, %9.1fkb, %6.2f%% files, %6.2f%% size',
len(cache_hit),
cache_hit_size / 1024.,
len(cache_hit) * 100. / total,
cache_hit_size * 100. / total_size if total_size else 0)
cache_miss = uploaded
cache_miss_size = sum(infiles[i[0]].get('s', 0) for i in cache_miss)
logging.info(
'cache miss: %6d, %9.1fkb, %6.2f%% files, %6.2f%% size',
len(cache_miss),
cache_miss_size / 1024.,
len(cache_miss) * 100. / total,
cache_miss_size * 100. / total_size if total_size else 0)
return 0
def main(args):
run_isolated.disable_buffering()
parser = optparse.OptionParser(
usage='%prog [options] <file1..fileN> or - to read from stdin',
description=sys.modules[__name__].__doc__)
parser.add_option('-r', '--remote', help='Remote server to archive to')
parser.add_option(
'-v', '--verbose',
action='count', default=0,
help='Use multiple times to increase verbosity')
parser.add_option('--namespace', default='default-gzip',
help='The namespace to use on the server.')
options, files = parser.parse_args(args)
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
logging.basicConfig(
level=levels[min(len(levels)-1, options.verbose)],
format='[%(threadName)s] %(asctime)s,%(msecs)03d %(levelname)5s'
' %(module)15s(%(lineno)3d): %(message)s',
datefmt='%H:%M:%S')
if files == ['-']:
files = sys.stdin.readlines()
if not files:
parser.error('Nothing to upload')
if not options.remote:
parser.error('Nowhere to send. Please specify --remote')
# Load the necessary metadata. This is going to be rewritten eventually to be
# more efficient.
infiles = dict(
(
f,
{
's': os.stat(f).st_size,
'h': sha1_file(f),
}
)
for f in files)
with run_isolated.Profiler('Archive'):
return upload_sha1_tree(
base_url=options.remote,
indir=os.getcwd(),
infiles=infiles,
namespace=options.namespace)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | 7,453,980,111,897,373,000 | 35.054726 | 80 | 0.664068 | false |
dobbymoodge/origin | vendor/github.com/getsentry/raven-go/docs/_sentryext/sentryext.py | 36 | 25388 | import re
import os
import sys
import json
import posixpath
from itertools import chain
from urlparse import urljoin
from docutils import nodes
from docutils.io import StringOutput
from docutils.nodes import document, section
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.environment import url_re
from sphinx.domains import Domain, ObjType
from sphinx.directives import ObjectDescription
from sphinx.util.osutil import relative_uri
from sphinx.util.compat import Directive
from sphinx.util.docfields import Field, TypedField
from sphinx.builders.html import StandaloneHTMLBuilder, DirectoryHTMLBuilder
_http_method_re = re.compile(r'^\s*:http-method:\s+(.*?)$(?m)')
_http_path_re = re.compile(r'^\s*:http-path:\s+(.*?)$(?m)')
_edition_re = re.compile(r'^(\s*)..\s+sentry:edition::\s*(.*?)$')
_docedition_re = re.compile(r'^..\s+sentry:docedition::\s*(.*?)$')
_url_var_re = re.compile(r'\{(.*?)\}')
EXTERNAL_DOCS_URL = 'https://docs.getsentry.com/hosted/'
API_BASE_URL = 'https://api.getsentry.com/'
def iter_url_parts(path):
last = 0
for match in _url_var_re.finditer(path):
before = path[last:match.start()]
if before:
yield False, before
yield True, match.group(1)
last = match.end()
after = path[last:]
if after:
yield False, after
def resolve_toctree(env, docname, builder, toctree, collapse=False):
def _toctree_add_classes(node):
for subnode in node.children:
if isinstance(subnode, (addnodes.compact_paragraph,
nodes.list_item,
nodes.bullet_list)):
_toctree_add_classes(subnode)
elif isinstance(subnode, nodes.reference):
# for <a>, identify which entries point to the current
# document and therefore may not be collapsed
if subnode['refuri'] == docname:
list_item = subnode.parent.parent
if not subnode['anchorname']:
# give the whole branch a 'current' class
# (useful for styling it differently)
branchnode = subnode
while branchnode:
branchnode['classes'].append('current')
branchnode = branchnode.parent
# mark the list_item as "on current page"
if subnode.parent.parent.get('iscurrent'):
# but only if it's not already done
return
while subnode:
subnode['iscurrent'] = True
subnode = subnode.parent
# Now mark all siblings as well and also give the
# innermost expansion an extra class.
list_item['classes'].append('active')
for node in list_item.parent.children:
node['classes'].append('relevant')
def _entries_from_toctree(toctreenode, parents, subtree=False):
refs = [(e[0], e[1]) for e in toctreenode['entries']]
entries = []
for (title, ref) in refs:
refdoc = None
if url_re.match(ref):
raise NotImplementedError('Not going to implement this (url)')
elif ref == 'env':
raise NotImplementedError('Not going to implement this (env)')
else:
if ref in parents:
env.warn(ref, 'circular toctree references '
'detected, ignoring: %s <- %s' %
(ref, ' <- '.join(parents)))
continue
refdoc = ref
toc = env.tocs[ref].deepcopy()
env.process_only_nodes(toc, builder, ref)
if title and toc.children and len(toc.children) == 1:
child = toc.children[0]
for refnode in child.traverse(nodes.reference):
if refnode['refuri'] == ref and \
not refnode['anchorname']:
refnode.children = [nodes.Text(title)]
if not toc.children:
# empty toc means: no titles will show up in the toctree
env.warn_node(
'toctree contains reference to document %r that '
'doesn\'t have a title: no link will be generated'
% ref, toctreenode)
# delete everything but the toplevel title(s)
# and toctrees
for toplevel in toc:
# nodes with length 1 don't have any children anyway
if len(toplevel) > 1:
subtrees = toplevel.traverse(addnodes.toctree)
toplevel[1][:] = subtrees
# resolve all sub-toctrees
for subtocnode in toc.traverse(addnodes.toctree):
i = subtocnode.parent.index(subtocnode) + 1
for item in _entries_from_toctree(subtocnode, [refdoc] +
parents, subtree=True):
subtocnode.parent.insert(i, item)
i += 1
subtocnode.parent.remove(subtocnode)
entries.extend(toc.children)
if not subtree:
ret = nodes.bullet_list()
ret += entries
return [ret]
return entries
tocentries = _entries_from_toctree(toctree, [])
if not tocentries:
return None
newnode = addnodes.compact_paragraph('', '')
newnode.extend(tocentries)
newnode['toctree'] = True
_toctree_add_classes(newnode)
for refnode in newnode.traverse(nodes.reference):
if not url_re.match(refnode['refuri']):
refnode.parent.parent['classes'].append('ref-' + refnode['refuri'])
refnode['refuri'] = builder.get_relative_uri(
docname, refnode['refuri']) + refnode['anchorname']
return newnode
def make_link_builder(app, base_page):
def link_builder(edition, to_current=False):
here = app.builder.get_target_uri(base_page)
if to_current:
uri = relative_uri(here, '../' + edition + '/' +
here.lstrip('/')) or './'
else:
root = app.builder.get_target_uri(app.env.config.master_doc) or './'
uri = relative_uri(here, root) or ''
if app.builder.name in ('sentryhtml', 'html'):
uri = (posixpath.dirname(uri or '.') or '.').rstrip('/') + \
'/../' + edition + '/index.html'
else:
uri = uri.rstrip('/') + '/../' + edition + '/'
return uri
return link_builder
def html_page_context(app, pagename, templatename, context, doctree):
# toc_parts = get_rendered_toctree(app.builder, pagename)
# context['full_toc'] = toc_parts['main']
def build_toc(split_toc=None):
return get_rendered_toctree(app.builder, pagename, collapse=False,
split_toc=split_toc)
context['build_toc'] = build_toc
context['link_to_edition'] = make_link_builder(app, pagename)
def render_sitemap():
return get_rendered_toctree(app.builder, 'sitemap',
collapse=False)['main']
context['render_sitemap'] = render_sitemap
context['sentry_doc_variant'] = app.env.config.sentry_doc_variant
def extract_toc(fulltoc, selectors):
entries = []
for refnode in fulltoc.traverse(nodes.reference):
container = refnode.parent.parent
if any(cls[:4] == 'ref-' and cls[4:] in selectors
for cls in container['classes']):
parent = container.parent
new_parent = parent.deepcopy()
del new_parent.children[:]
new_parent += container
entries.append(new_parent)
parent.remove(container)
if not parent.children:
parent.parent.remove(parent)
newnode = addnodes.compact_paragraph('', '')
newnode.extend(entries)
newnode['toctree'] = True
return newnode
def get_rendered_toctree(builder, docname, collapse=True, split_toc=None):
fulltoc = build_full_toctree(builder, docname, collapse=collapse)
rv = {}
def _render_toc(node):
return builder.render_partial(node)['fragment']
if split_toc:
for key, selectors in split_toc.iteritems():
rv[key] = _render_toc(extract_toc(fulltoc, selectors))
rv['main'] = _render_toc(fulltoc)
return rv
def build_full_toctree(builder, docname, collapse=True):
env = builder.env
doctree = env.get_doctree(env.config.master_doc)
toctrees = []
for toctreenode in doctree.traverse(addnodes.toctree):
toctrees.append(resolve_toctree(env, docname, builder, toctreenode,
collapse=collapse))
if not toctrees:
return None
result = toctrees[0]
for toctree in toctrees[1:]:
if toctree:
result.extend(toctree.children)
env.resolve_references(result, docname, builder)
return result
def parse_rst(state, content_offset, doc):
node = nodes.section()
# hack around title style bookkeeping
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
state.memo.title_styles = []
state.memo.section_level = 0
state.nested_parse(doc, content_offset, node, match_titles=1)
state.memo.title_styles = surrounding_title_styles
state.memo.section_level = surrounding_section_level
return node.children
def find_cached_api_json(env, filename):
return os.path.join(env.srcdir, '_apicache', filename)
def api_url_rule(text):
def add_url_thing(rv, value):
for is_var, part in iter_url_parts(value):
if is_var:
part = '{%s}' % part
node = nodes.emphasis(part, part)
else:
node = nodes.inline(part, part)
rv.append(node)
container = nodes.inline(classes=['url'])
domain_part = nodes.inline(classes=['domain', 'skip-latex'])
# add_url_thing(domain_part, API_BASE_URL.rstrip('/'))
container += domain_part
add_url_thing(container, text)
rv = nodes.inline(classes=['urlwrapper'])
rv += container
return rv
class URLPathField(Field):
def make_entry(self, fieldarg, content):
text = u''.join(x.rawsource for x in content)
return fieldarg, api_url_rule(text)
class AuthField(Field):
def make_entry(self, fieldarg, content):
rv = []
flags = set(x.strip() for x in
u''.join(x.rawsource for x in content).split(',')
if x.strip())
if 'required' in flags:
rv.append('required')
elif 'optional' in flags:
rv.append('optional')
else:
rv.append('unauthenticated')
if 'user-context-needed' in flags:
rv.append('user context needed')
text = ', '.join(rv)
node = nodes.inline(text, text)
return fieldarg, node
class ApiEndpointDirective(ObjectDescription):
option_spec = {
'noindex': directives.flag
}
doc_field_types = [
Field('http_method', label='Method', has_arg=False,
names=('http-method',)),
URLPathField('http_path', label='Path', has_arg=False,
names=('http-path',)),
TypedField('query_parameter', label='Query Parameters',
names=('qparam', 'query-parameter'),
typerolename='obj', typenames=('qparamtype',),
can_collapse=True),
TypedField('path_parameter', label='Path Parameters',
names=('pparam', 'path-parameter'),
typerolename='obj', typenames=('pparamtype',),
can_collapse=True),
TypedField('body_parameter', label='Parameters',
names=('param', 'parameter'),
typerolename='obj', typenames=('paramtype',),
can_collapse=True),
Field('returnvalue', label='Returns', has_arg=False,
names=('returns', 'return')),
Field('returntype', label='Return type', has_arg=False,
names=('rtype',)),
AuthField('auth', label='Authentication', has_arg=False,
names=('auth',)),
]
def needs_arglist(self):
return False
def handle_signature(self, sig, signode):
name = sig.strip()
fullname = name
content = '\n'.join(self.content)
method = _http_method_re.search(content)
path = _http_path_re.search(content)
if method and path:
prefix = method.group(1)
signode += addnodes.desc_type(prefix + ' ', prefix + ' ')
signode += api_url_rule(path.group(1))
return fullname
class ApiScenarioDirective(Directive):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
def get_scenario_info(self):
ident = self.arguments[0].encode('ascii', 'replace')
with open(find_cached_api_json(self.state.document.settings.env,
'scenarios/%s.json' % ident)) as f:
return json.load(f)
def iter_body(self, data, is_json=True):
if data is None:
return
if is_json:
data = json.dumps(data, indent=2)
for line in data.splitlines():
yield line.rstrip()
def write_request(self, doc, request_info):
doc.append('.. class:: api-request', '')
doc.append('', '')
doc.append('.. sourcecode:: http', '')
doc.append('', '')
doc.append(' %s %s HTTP/1.1' % (
request_info['method'],
request_info['path'],
), '')
special_headers = [
('Authorization', 'Basic ___ENCODED_API_KEY___'),
('Host', 'app.getsentry.com'),
]
for key, value in chain(special_headers,
sorted(request_info['headers'].items())):
doc.append(' %s: %s' % (key, value), '')
doc.append('', '')
for item in self.iter_body(request_info['data'],
request_info['is_json']):
doc.append(' ' + item, '')
def write_response(self, doc, response_info):
doc.append('.. class:: api-response', '')
doc.append('', '')
doc.append('.. sourcecode:: http', '')
doc.append('', '')
doc.append(' HTTP/1.1 %s %s' % (
response_info['status'],
response_info['reason'],
), '')
for key, value in sorted(response_info['headers'].items()):
doc.append(' %s: %s' % (key.title(), value), '')
doc.append('', '')
for item in self.iter_body(response_info['data'],
response_info['is_json']):
doc.append(' ' + item, '')
def run(self):
doc = ViewList()
info = self.get_scenario_info()
for request in info['requests']:
self.write_request(doc, request['request'])
doc.append('', '')
self.write_response(doc, request['response'])
doc.append('', '')
return parse_rst(self.state, self.content_offset, doc)
class SentryDomain(Domain):
name = 'sentry'
label = 'Sentry'
object_types = {
'api-endpoint': ObjType('api-endpoint', 'api-endpoint', 'obj'),
'type': ObjType('type', 'type', 'obj'),
}
directives = {
'api-endpoint': ApiEndpointDirective,
'api-scenario': ApiScenarioDirective,
}
def preprocess_source(app, docname, source):
source_lines = source[0].splitlines()
def _find_block(indent, lineno):
block_indent = len(indent.expandtabs())
rv = []
actual_indent = None
while lineno < end:
line = source_lines[lineno]
if not line.strip():
rv.append(u'')
else:
expanded_line = line.expandtabs()
indent = len(expanded_line) - len(expanded_line.lstrip())
if indent > block_indent:
if actual_indent is None or indent < actual_indent:
actual_indent = indent
rv.append(line)
else:
break
lineno += 1
if rv:
rv.append(u'')
if actual_indent:
rv = [x[actual_indent:] for x in rv]
return rv, lineno
result = []
lineno = 0
end = len(source_lines)
while lineno < end:
line = source_lines[lineno]
match = _edition_re.match(line)
if match is None:
# Skip sentry:docedition. We don't want those.
match = _docedition_re.match(line)
if match is None:
result.append(line)
lineno += 1
continue
lineno += 1
indent, tags = match.groups()
tags = set(x.strip() for x in tags.split(',') if x.strip())
should_include = app.env.config.sentry_doc_variant in tags
block_lines, lineno = _find_block(indent, lineno)
if should_include:
result.extend(block_lines)
source[:] = [u'\n'.join(result)]
def builder_inited(app):
# XXX: this currently means thigns only stay referenced after a
# deletion of a link after a clean build :(
if not hasattr(app.env, 'sentry_referenced_docs'):
app.env.sentry_referenced_docs = {}
def track_references(app, doctree):
docname = app.env.temp_data['docname']
rd = app.env.sentry_referenced_docs
for toctreenode in doctree.traverse(addnodes.toctree):
for e in toctreenode['entries']:
rd.setdefault(str(e[1]), set()).add(docname)
def is_referenced(docname, references):
if docname == 'index':
return True
seen = set([docname])
to_process = set(references.get(docname) or ())
while to_process:
if 'index' in to_process:
return True
next = to_process.pop()
seen.add(next)
for backlink in references.get(next) or ():
if backlink in seen:
continue
else:
to_process.add(backlink)
return False
class SphinxBuilderMixin(object):
build_wizard_fragment = False
@property
def add_permalinks(self):
return not self.build_wizard_fragment
def get_target_uri(self, *args, **kwargs):
rv = super(SphinxBuilderMixin, self).get_target_uri(*args, **kwargs)
if self.build_wizard_fragment:
rv = urljoin(EXTERNAL_DOCS_URL, rv)
return rv
def get_relative_uri(self, from_, to, typ=None):
if self.build_wizard_fragment:
return self.get_target_uri(to, typ)
return super(SphinxBuilderMixin, self).get_relative_uri(
from_, to, typ)
def write_doc(self, docname, doctree):
original_field_limit = self.docsettings.field_name_limit
try:
self.docsettings.field_name_limit = 120
if is_referenced(docname, self.app.env.sentry_referenced_docs):
return super(SphinxBuilderMixin, self).write_doc(docname, doctree)
else:
print 'skipping because unreferenced'
finally:
self.docsettings.field_name_limit = original_field_limit
def __iter_wizard_files(self):
for dirpath, dirnames, filenames in os.walk(self.srcdir,
followlinks=True):
dirnames[:] = [x for x in dirnames if x[:1] not in '_.']
for filename in filenames:
if filename == 'sentry-doc-config.json':
full_path = os.path.join(self.srcdir, dirpath)
base_path = full_path[len(self.srcdir):].strip('/\\') \
.replace(os.path.sep, '/')
yield os.path.join(full_path, filename), base_path
def __build_wizard_section(self, base_path, snippets):
trees = {}
rv = []
def _build_node(node):
original_header_level = self.docsettings.initial_header_level
# bump initial header level to two
self.docsettings.initial_header_level = 2
# indicate that we're building for the wizard fragements.
# This changes url generation and more.
self.build_wizard_fragment = True
# Embed pygments colors as inline styles
original_args = self.highlighter.formatter_args
self.highlighter.formatter_args = original_args.copy()
self.highlighter.formatter_args['noclasses'] = True
try:
sub_doc = document(self.docsettings,
doctree.reporter)
sub_doc += node
destination = StringOutput(encoding='utf-8')
self.current_docname = docname
self.docwriter.write(sub_doc, destination)
self.docwriter.assemble_parts()
rv.append(self.docwriter.parts['fragment'])
finally:
self.build_wizard_fragment = False
self.highlighter.formatter_args = original_args
self.docsettings.initial_header_level = original_header_level
for snippet in snippets:
if '#' not in snippet:
snippet_path = snippet
section_name = None
else:
snippet_path, section_name = snippet.split('#', 1)
docname = posixpath.join(base_path, snippet_path)
if docname in trees:
doctree = trees.get(docname)
else:
doctree = self.env.get_and_resolve_doctree(docname, self)
trees[docname] = doctree
if section_name is None:
_build_node(next(iter(doctree.traverse(section))))
else:
for sect in doctree.traverse(section):
if section_name in sect['ids']:
_build_node(sect)
return u'\n\n'.join(rv)
def __write_wizard(self, data, base_path):
for uid, framework_data in data.get('wizards', {}).iteritems():
try:
body = self.__build_wizard_section(base_path,
framework_data['snippets'])
except IOError as e:
print >> sys.stderr, 'Failed to build wizard "%s" (%s)' % (uid, e)
continue
fn = os.path.join(self.outdir, '_wizards', '%s.json' % uid)
try:
os.makedirs(os.path.dirname(fn))
except OSError:
pass
doc_link = framework_data.get('doc_link')
if doc_link is not None:
doc_link = urljoin(EXTERNAL_DOCS_URL,
posixpath.join(base_path, doc_link))
with open(fn, 'w') as f:
json.dump({
'name': framework_data.get('name') or uid.title(),
'is_framework': framework_data.get('is_framework', False),
'doc_link': doc_link,
'client_lib': framework_data.get('client_lib'),
'body': body
}, f)
f.write('\n')
def __write_wizards(self):
for filename, base_path in self.__iter_wizard_files():
with open(filename) as f:
data = json.load(f)
self.__write_wizard(data, base_path)
def finish(self):
super(SphinxBuilderMixin, self).finish()
self.__write_wizards()
class SentryStandaloneHTMLBuilder(SphinxBuilderMixin, StandaloneHTMLBuilder):
name = 'sentryhtml'
class SentryDirectoryHTMLBuilder(SphinxBuilderMixin, DirectoryHTMLBuilder):
name = 'sentrydirhtml'
def setup(app):
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
app.add_domain(SentryDomain)
app.connect('builder-inited', builder_inited)
app.connect('html-page-context', html_page_context)
app.connect('source-read', preprocess_source)
app.connect('doctree-read', track_references)
app.add_builder(SentryStandaloneHTMLBuilder)
app.add_builder(SentryDirectoryHTMLBuilder)
app.add_config_value('sentry_doc_variant', None, 'env')
def activate():
"""Changes the config to something that the sentry doc infrastructure
expects.
"""
frm = sys._getframe(1)
globs = frm.f_globals
globs.setdefault('sentry_doc_variant',
os.environ.get('SENTRY_DOC_VARIANT', 'self'))
globs['extensions'] = list(globs.get('extensions') or ()) + ['sentryext']
globs['primary_domain'] = 'std'
globs['exclude_patterns'] = list(globs.get('exclude_patterns')
or ()) + ['_sentryext']
| apache-2.0 | 5,946,610,400,902,372,000 | 34.858757 | 82 | 0.553175 | false |
pythonchelle/opencomparison | apps/apiv1/tests/test_resources.py | 3 | 1333 | import json
from django.test import TestCase
from django.core.urlresolvers import reverse
from apiv1.tests import data
class ResourcesV1Tests(TestCase):
base_kwargs = {'api_name': 'v1'}
def setUp(self):
data.load()
def test_01_category(self):
kwargs = {'resource_name': 'category'}
kwargs.update(self.base_kwargs)
# check 200's
list_url = reverse('api_dispatch_list', kwargs=kwargs)
response = self.client.get(list_url)
self.assertEqual(response.status_code, 200)
kwargs['pk'] = 'apps'
cat_url = reverse('api_dispatch_detail', kwargs=kwargs)
self.assertTrue(cat_url in response.content)
response = self.client.get(cat_url)
self.assertEqual(response.status_code, 200)
def test_02_grid(self):
kwargs = {'resource_name': 'grid'}
kwargs.update(self.base_kwargs)
# check 200's
list_url = reverse('api_dispatch_list', kwargs=kwargs)
response = self.client.get(list_url)
self.assertEqual(response.status_code, 200)
kwargs['pk'] = 'testing'
grid_url = reverse('api_dispatch_detail', kwargs=kwargs)
self.assertTrue(grid_url in response.content)
response = self.client.get(grid_url)
self.assertEqual(response.status_code, 200)
| mit | -2,551,496,421,112,930,300 | 36.027778 | 64 | 0.636909 | false |
martinblech/pyfpm | pyfpm/pattern.py | 2 | 10947 | """
This module holds the actual pattern implementations.
End users should not normally have to deal with it, except for constructing
patterns programatically without making use of the pattern syntax parser.
"""
import re
try:
# python 2.x base string
_basestring = basestring
except NameError:
# python 3.x base string
_basestring = str
class Match(object):
"""
Represents the result of matching successfully a pattern against an
object. The `ctx` attribute is a :class:`dict` that contains the value for
each bound name in the pattern, if any.
"""
def __init__(self, ctx=None, value=None):
if ctx is None:
ctx = {}
self.ctx = ctx
self.value = value
def __eq__(self, other):
return (isinstance(other, Match) and
self.__dict__ == other.__dict__)
def __repr__(self):
return 'Match(%s)' % self.ctx
class Pattern(object):
"""
Base Pattern class. Abstracts the behavior common to all pattern types,
such as name bindings, conditionals and operator overloading for combining
several patterns.
"""
def __init__(self):
self.bound_name = None
self.condition = None
def match(self, other, ctx=None):
"""
Match this pattern against an object. Operator: `<<`.
:param other: the object this pattern should be matched against.
:param ctx: optional context. If none, an empty one will be
automatically created.
:type ctx: dict
:returns: a :class:`Match` if successful, `None` otherwise.
"""
match = self._does_match(other, ctx)
if match:
ctx = match.ctx
value = match.value or other
if self.bound_name:
if ctx is None:
ctx = {}
try:
previous = ctx[self.bound_name]
if previous != value:
return None
except KeyError:
ctx[self.bound_name] = value
if self.condition is None or self.condition(**ctx):
return Match(ctx)
return None
def __lshift__(self, other):
return self.match(other)
def bind(self, name):
"""Bind this pattern to the given name. Operator: `%`."""
self.bound_name = name
return self
def __mod__(self, name):
return self.bind(name)
def if_(self, condition):
"""
Add a boolean condition to this pattern. Operator: `/`.
:param condition: must accept the match context as keyword
arguments and return a boolean-ish value.
:type condition: callable
"""
self.condition = condition
return self
def __div__(self, condition):
return self.if_(condition)
def __truediv__(self, condition):
return self.if_(condition)
def multiply(self, n):
"""
Build a :class:`ListPattern` that matches `n` instances of this pattern.
Operator: `*`.
Example:
>>> p = EqualsPattern(1).multiply(3)
>>> p.match((1, 1, 1))
Match({})
"""
return build(*([self]*n))
def __mul__(self, length):
return self.multiply(length)
def __rmul__(self, length):
return self.multiply(length)
def or_with(self, other):
"""
Build a new :class:`OrPattern` with this or the other pattern.
Operator: `|`.
Example:
>>> p = EqualsPattern(1).or_with(InstanceOfPattern(str))
>>> p.match('hello')
Match({})
>>> p.match(1)
Match({})
>>> p.match(2)
"""
patterns = []
for pattern in (self, other):
if isinstance(pattern, OrPattern):
patterns.extend(pattern.patterns)
else:
patterns.append(pattern)
return OrPattern(*patterns)
def __or__(self, other):
return self.or_with(other)
def head_tail_with(self, other):
"""
Head-tail concatenate this pattern with the other. The lhs pattern will
be the head and the other will be the tail. Operator: `+`.
Example:
>>> p = InstanceOfPattern(int).head_tail_with(ListPattern())
>>> p.match([1])
Match({})
>>> p.match([1, 2])
"""
return ListPattern(self, other)
def __add__(self, other):
return self.head_tail_with(other)
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join('='.join((str(k), repr(v))) for (k, v) in
self.__dict__.items() if v))
class AnyPattern(Pattern):
"""Pattern that matches anything."""
def _does_match(self, other, ctx):
return Match(ctx)
class EqualsPattern(Pattern):
"""Pattern that only matches objects that equal the given object."""
def __init__(self, obj):
super(EqualsPattern, self).__init__()
self.obj = obj
def _does_match(self, other, ctx):
if self.obj == other:
return Match(ctx)
else:
return None
class InstanceOfPattern(Pattern):
"""Pattern that only matches instances of the given class."""
def __init__(self, cls):
super(InstanceOfPattern, self).__init__()
self.cls = cls
def _does_match(self, other, ctx):
if isinstance(other, self.cls):
return Match(ctx)
else:
return None
_CompiledRegex = type(re.compile(''))
class RegexPattern(Pattern):
"""Pattern that only matches strings that match the given regex."""
def __init__(self, regex):
super(RegexPattern, self).__init__()
if not isinstance(regex, _CompiledRegex):
regex = re.compile(regex)
self.regex = regex
def _does_match(self, other, ctx):
re_match = self.regex.match(other)
if re_match:
return Match(ctx, re_match.groups())
return None
class ListPattern(Pattern):
"""Pattern that only matches iterables whose head matches `head_pattern` and
whose tail matches `tail_pattern`"""
def __init__(self, head_pattern=None, tail_pattern=None):
super(ListPattern, self).__init__()
if head_pattern is not None and tail_pattern is None:
tail_pattern = ListPattern()
self.head_pattern = head_pattern
self.tail_pattern = tail_pattern
def head_tail_with(self, other):
return ListPattern(self.head_pattern,
self.tail_pattern.head_tail_with(other))
def _does_match(self, other, ctx):
try:
if (self.head_pattern is None and
self.tail_pattern is None and
len(other) == 0):
return Match(ctx)
except TypeError:
return None
if isinstance(other, _basestring):
return None
try:
head, tail = other[0], other[1:]
except (IndexError, TypeError):
return None
if self.head_pattern is not None:
match = self.head_pattern.match(head, ctx)
if match:
ctx = match.ctx
match = self.tail_pattern.match(tail, ctx)
if match:
ctx = match.ctx
else:
return None
else:
return None
else:
if len(other):
return None
return Match(ctx)
class NamedTuplePattern(Pattern):
"""Pattern that only matches named tuples of the given class and whose
contents match the given patterns."""
def __init__(self, casecls, *initpatterns):
super(NamedTuplePattern, self).__init__()
self.casecls_pattern = InstanceOfPattern(casecls)
if (len(initpatterns) == 1 and
isinstance(initpatterns[0], ListPattern)):
self.initargs_pattern = initpatterns[0]
else:
self.initargs_pattern = build(*initpatterns, **dict(is_list=True))
def _does_match(self, other, ctx):
match = self.casecls_pattern.match(other, ctx)
if not match:
return None
ctx = match.ctx
return self.initargs_pattern.match(other, ctx)
class OrPattern(Pattern):
"""Pattern that matches whenever any of the inner patterns match."""
def __init__(self, *patterns):
if len(patterns) < 2:
raise ValueError('need at least two patterns')
super(OrPattern, self).__init__()
self.patterns = patterns
def _does_match(self, other, ctx):
for pattern in self.patterns:
if ctx is not None:
ctx_ = ctx.copy()
else:
ctx_ = None
match = pattern.match(other, ctx_)
if match:
return match
return None
def build(*args, **kwargs):
"""
Shorthand pattern factory.
Examples:
>>> build() == AnyPattern()
True
>>> build(1) == EqualsPattern(1)
True
>>> build('abc') == EqualsPattern('abc')
True
>>> build(str) == InstanceOfPattern(str)
True
>>> build(re.compile('.*')) == RegexPattern('.*')
True
>>> build(()) == build([]) == ListPattern()
True
>>> build([1]) == build((1,)) == ListPattern(EqualsPattern(1),
... ListPattern())
True
>>> build(int, str, 'a') == ListPattern(InstanceOfPattern(int),
... ListPattern(InstanceOfPattern(str),
... ListPattern(EqualsPattern('a'))))
True
>>> try:
... from collections import namedtuple
... MyTuple = namedtuple('MyTuple', 'a b c')
... build(MyTuple(1, 2, 3)) == NamedTuplePattern(MyTuple, 1, 2, 3)
... except ImportError:
... True
True
"""
arglen = len(args)
if arglen > 1:
head, tail = args[0], args[1:]
return ListPattern(build(head), build(*tail, **(dict(is_list=True))))
if arglen == 0:
return AnyPattern()
(arg,) = args
if kwargs.get('is_list', False):
return ListPattern(build(arg))
if isinstance(arg, Pattern):
return arg
if isinstance(arg, _CompiledRegex):
return RegexPattern(arg)
if isinstance(arg, tuple) and hasattr(arg, '_fields'):
return NamedTuplePattern(arg.__class__, *map(build, arg))
if isinstance(arg, type):
return InstanceOfPattern(arg)
if isinstance(arg, (tuple, list)):
if len(arg) == 0:
return ListPattern()
return build(*arg, **(dict(is_list=True)))
return EqualsPattern(arg)
| mit | -5,982,177,463,813,209,000 | 30.011331 | 80 | 0.54636 | false |
MarcosCommunity/odoo | comunity_modules/product_prices_update/wizard/wizard_update_prices.py | 3 | 7297 | # -*- coding: utf-8 -*-
from openerp import fields, models, api, _
from openerp.exceptions import Warning
from openerp import tools
class prices_update_wizard(models.TransientModel):
_name = 'product.prices_update_wizard'
price_type = fields.Selection(
[('list_price', 'Sale Price'), ('standard_price', 'Cost Price')],
required=True,
string='Price Type')
price_discount = fields.Float('Price Discoun')
price_surcharge = fields.Float(
'Price Surcharge', help='Specify the fixed amount to add or substract(if negative) to the amount calculated with the discount.')
price_round = fields.Float('Price Rounding', help="Sets the price so that it is a multiple of this value.\n"
"Rounding is applied after the discount and before the surcharge.\n"
"To have prices that end in 9.99, set rounding 10, surcharge -0.01"
)
check = fields.Boolean('Check before changing')
@api.multi
def change_prices(self, context=None):
active_ids = context.get('active_ids', [])
products_vals = []
if not active_ids:
raise Warning(_('You must select at least one product'))
if self.check is True:
actions = self.env.ref(
'product_prices_update.action_prices_update_wizard_result')
if actions:
action_read = actions.read()[0]
action_read['context'] = {
'product_tmpl_ids': active_ids,
'price_type': self.price_type,
'price_discount': self.price_discount,
'price_surcharge': self.price_surcharge,
'price_round': self.price_round,
}
return action_read
else:
for prodct in self.env['product.template'].browse(active_ids):
if self.price_type == 'list_price':
old_price = prodct.list_price
elif self.price_type == 'standard_price':
old_price = prodct.standard_price
else:
raise Warning(
_('Price type "%s" is not implemented') % (self.price_type))
new_price = self.calc_new_price(
old_price, self.price_discount,
self.price_surcharge, self.price_round)
vals = {
'product_tmpl': prodct,
'new_price': new_price,
}
products_vals.append(vals)
return self.update_prices(products_vals, self.price_type)
@api.model
def update_prices(self, products_vals, price_type):
product_ids = []
change_price_obj = self.pool.get("stock.change.standard.price")
for line in products_vals:
if line['product_tmpl'].cost_method == u'average' and self.price_type == 'standard_price':
new_change = change_price_obj.create(self.env.cr, self.env.uid, {"new_price": line['new_price']}, context=self.env.context)
context = {'active_id': line['product_tmpl'].id,
'active_ids': [line['product_tmpl'].id],
'active_model': 'product.template',
'lang': 'es_DO',
'params': {'_push_me': False,
'action': 176,
'limit': 80,
'model': 'product.template',
'page': 0,
'view_type': 'list'},
'search_disable_custom_filters': True,
'tz': 'America/Santo_Domingo',
'uid': 1}
change_price_obj.change_price(self.env.cr, self.env.uid, new_change, context=context)
else:
line['product_tmpl'].write({price_type: line['new_price']})
product_ids.append(line['product_tmpl'].id)
return {
'type': 'ir.actions.act_window',
'name': _('Products'),
'res_model': 'product.template',
'view_type': 'form',
'view_mode': 'tree,form',
'domain': [('id', 'in', product_ids)],
'target': 'current',
'nodestroy': True,
}
@api.model
def calc_new_price(
self, old_price, price_discount, price_surcharge, price_round):
new_price = old_price * \
(1.0 + (price_discount or 0.0))
if price_round:
new_price = tools.float_round(
new_price, precision_rounding=price_round)
if price_surcharge:
new_price += price_surcharge
return new_price
class prices_update_wizard_result_detail(models.TransientModel):
_name = 'product.prices_update_wizard_result_detail'
result_id = fields.Many2one(
'product.prices_update_wizard_result', 'Result')
product_tmpl_id = fields.Many2one(
'product.template', 'Product Template',
readonly=True)
old_price = fields.Float(
'Old Price',
readonly=True)
new_price = fields.Float(
'New Price',
required=True
)
class prices_update_wizard_result(models.TransientModel):
_name = 'product.prices_update_wizard_result'
@api.model
def _get_details(self):
ret = []
price_discount = self._context.get('price_discount', 0.0)
price_surcharge = self._context.get('price_surcharge', 0.0)
price_round = self._context.get('price_round', 0.0)
product_tmpl_ids = self._context.get('product_tmpl_ids', [])
price_type = self._context.get('price_type', False)
for product_tmpl in self.env['product.template'].browse(
product_tmpl_ids):
if price_type == 'list_price':
old_price = product_tmpl.list_price
elif price_type == 'standard_price':
old_price = product_tmpl.standard_price
else:
raise Warning(
_('Price type "%s" is not implemented') % (price_type))
vals = {
'product_tmpl_id': product_tmpl.id,
'old_price': old_price,
'new_price': self.env[
'product.prices_update_wizard'].calc_new_price(
old_price, price_discount,
price_surcharge, price_round),
}
ret.append(vals)
return ret
detail_ids = fields.One2many(
'product.prices_update_wizard_result_detail',
'result_id',
string='Products Detail',
default=_get_details,
)
@api.multi
def confirm(self):
products_vals = []
price_type = self._context.get('price_type', False)
for line in self.detail_ids:
vals = {
'product_tmpl': line.product_tmpl_id,
'new_price': line.new_price,
}
products_vals.append(vals)
return self.env['product.prices_update_wizard'].update_prices(
products_vals, price_type)
| agpl-3.0 | 643,656,491,633,140,200 | 40.697143 | 139 | 0.521036 | false |
orospakr/peephole | peephole/peephole_client.py | 1 | 5019 | #!/usr/bin/env python
# Peephole - a D-Bus service providing access to small LCD panels
# Copyright (C) 2007-2008 Andrew Clunis
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gobject
from gettext import gettext as _
import dbus
import dbus.service
import dbus.mainloop.glib
from dbus.exceptions import DBusException
import logging
import struct
import sys
import os
from optparse import OptionParser
from peephole.peepholed import PEEPHOLE_WELL_KNOWN_NAME
from peephole.dbus_settings import *
def getButtons(selected_lcd, bus):
button_paths = []
buttons = {}
button_paths = selected_lcd.GetButtons()
for path in button_paths:
button_proxy = bus.get_object(PEEPHOLE_WELL_KNOWN_NAME, path)
button = dbus.Interface(button_proxy, dbus_interface=BUTTON_INTERFACE)
button_name = button.GetName()
buttons[button_name] = button
return buttons
def main():
usage = "%prog: [--lcd=LCD], needs one of [--list] [--print-buttons]"
parser = OptionParser(usage)
parser.add_option("-L", "--lcd", dest="lcd",
help="LCD to interact with")
parser.add_option("-l", "--list", action="store_true",
dest="list",
help="Print list of LCDs in the system")
parser.add_option("-b", "--print-buttons", action="store_true",
dest="print_buttons",
help="Print button events on stdout as they occur")
parser.add_option("-B", "--button", dest="button",
help="Button to interact with, used with --set-button-backlight")
parser.add_option("-O", "--button-backlight-on", dest="button_backlight", action="store_true",
help="Turn on button's (specified by --button) backlight")
parser.add_option("-o", "--button-backlight-off", dest="button_backlight", action="store_false",
help="Turn off button's (specified by --button) backlight")
(options, args) = parser.parse_args()
if not (options.list or options.print_buttons or (options.button_backlight is not None)):
parser.error("You must specify an option.")
mainloop = gobject.MainLoop()
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
peep_proxy = bus.get_object(PEEPHOLE_WELL_KNOWN_NAME,
PEEPHOLE_PATH)
peep = dbus.Interface(peep_proxy, dbus_interface=PEEPHOLE_INTERFACE)
try:
lcd_paths = peep.GetLCDs()
except DBusException, e:
print "\nPeephole D-Bus service is unavailable. Possible causes: \n\
1. Missing D-Bus activation configuration -- alternatively, the daemon may \n\
also be invoked manually. \n\
2. Missing security policy (see README) \n\
3. Daemon was started, but no LCDs were detected."
sys.exit(-1)
lcds = {}
for path in lcd_paths:
lcd_proxy = bus.get_object(PEEPHOLE_WELL_KNOWN_NAME, path)
lcd = dbus.Interface(lcd_proxy, dbus_interface=LCD_INTERFACE)
lcd_name = lcd.GetName()
lcds[lcd_name] = lcd
if options.list:
for name, lcd in lcds.items():
print name
sys.exit(0)
selected_lcd = None
if options.lcd is not None:
if options.lcd not in lcds:
parser.error("That LCD does not exist.")
selected_lcd = lcds[options.lcd]
print "Selected: '%s'" % options.lcd
else:
for name, l in lcds.items():
print "Fell back to default LCD: '%s'" % name
selected_lcd = l
break
buttons = getButtons(selected_lcd, bus)
if options.button_backlight is not None:
if options.button is None:
parser.error("You must specify --button")
if options.button not in buttons:
parser.error("That button does not exist.")
button = buttons[options.button]
button.SetBacklight(options.button_backlight)
if options.print_buttons:
class PressReceiver(object):
def __init__(self, button, name):
self.button = button
self.name = name
def pressed(self):
print self.name
for name, btn in buttons.items():
receiver = PressReceiver(btn, name)
btn.connect_to_signal("Pressed", receiver.pressed)
mainloop.run()
sys.exit(0)
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 | -3,722,204,027,374,416,400 | 34.595745 | 100 | 0.635784 | false |
puchake/market-teller | src/data_handling/set_assembly.py | 1 | 7628 | import numpy as np
import datetime as dt
from os import listdir, path
def gather_mats(
split_mat, avg_5_mat, avg_25_mat, avg_50_mat, dates_mat, min_year
):
"""
Collects chosen columns from split and avg matrices and adds dates_mat
indicator data for each row (each day).
:param split_mat: original company data matrix
:param avg_5_mat: matrix with EMA of length 5 of closing prices
:param avg_25_mat: matrix with EMA of length 25 of closing prices
:param avg_50_mat: matrix with EMA of length 50 of closing prices
:param dates_mat: matrix of profit indicators for each date
:return: matrix of gathered data
"""
# Gather matrix columns indices.
gather_split_i = 0
gather_avg_5_i = 1
gather_avg_25_i = 2
gather_avg_50_i = 3
gather_volume_i = 4
gather_dates_indicator_i = 5
# Indices of date fragment columns in split matrix.
dates_indices = [1, 2, 3]
# Indices of elements in dates matrix.
all_i = 0
profit_i = 1
# Index of close price column and volume column.
close_i = 5
volume_i = 6
# Number of gathered values. Original close price + 3 averages profit
# indicator and volume will be collected.
gathered_row_len = 6
# Create gathered mat with row count of avg_50_mat as it is the shortest
# of all input matrices.
gathered_mat = np.zeros([avg_50_mat.shape[0], gathered_row_len])
for i in range(avg_50_mat.shape[0]):
# Gather split, avg_5, avg_25, avg_50 and volume columns.
gathered_mat[-(i + 1), gather_split_i] = split_mat[-(i + 1), close_i]
gathered_mat[-(i + 1), gather_avg_5_i] = avg_5_mat[-(i + 1), close_i]
gathered_mat[-(i + 1), gather_avg_25_i] = avg_25_mat[-(i + 1), close_i]
gathered_mat[-(i + 1), gather_avg_50_i] = avg_50_mat[-(i + 1), close_i]
gathered_mat[-(i + 1), gather_volume_i] = split_mat[-(i + 1), volume_i]
# Construct the date of current row and access dates matrix indicator.
date = dt.date(*(split_mat[-(i + 1), dates_indices].astype(np.int32)))
all_count = dates_mat[
date.year - min_year, date.month - 1,
date.day - 1, all_i
]
profit_count = dates_mat[
date.year - min_year, date.month - 1,
date.day - 1, profit_i
]
# Set indicator column element of current row to calculated indicator.
gathered_mat[-(i + 1), gather_dates_indicator_i] = profit_count / \
all_count
return gathered_mat
def label_mat(mat):
"""
Assign labels to each row of gathered matrix.
:param mat: previously gathered matrix
:return: labels for gathered matrix rows
"""
# Index and range of average used for labeling.
gather_avg_25_i = 2
avg_range = 25
# Labels for rising and falling price.
rising_i = 1
falling_i = 0
num_classes = 2
labels = np.zeros([mat.shape[0] - avg_range + 1, num_classes])
for i in range(mat.shape[0] - avg_range + 1):
# If average 25 day price rises after 24 days assign rising label, else
# assign falling label.
if mat[i, gather_avg_25_i] < mat[i + avg_range - 1, gather_avg_25_i]:
labels[i, rising_i] = 1.0
else:
labels[i, falling_i] = 1.0
return labels
def normalize_mat(mat):
"""
Bring all values in matrix to around -1, 1 range with mean 0.
:param mat: matrix of gathered data
:return: normalized matrix
"""
# Gather matrix columns indices.
gather_split_i = 0
gather_avg_5_i = 1
gather_avg_25_i = 2
gather_avg_50_i = 3
gather_volume_i = 4
gather_dates_indicator_i = 5
# Normalize prices. We want to keep relationship between prices
# (eg. avg_5 > split) untouched, so we use single set of max and mean for
# split and all averages.
prices_indices = [
gather_split_i, gather_avg_5_i, gather_avg_25_i, gather_avg_50_i
]
mat[:, prices_indices] /= np.max(mat[:, prices_indices])
mat[:, prices_indices] *= 2
mat[:, prices_indices] -= np.mean(mat[:, prices_indices])
# Normalize volume.
mat[:, gather_volume_i] /= np.max(mat[:, gather_volume_i])
mat[:, gather_volume_i] *= 2
mat[:, gather_volume_i] -= np.mean(mat[:, gather_volume_i])
# Subtract 1.0 from dates indicator multiplied by 2.0 as it is already in
# range 0.0, 1.0 and we don't want characteristic values to vary between
# matrices as it is data outside of one company scope.
dates_indicator_mean = 1.0
mat[:, gather_dates_indicator_i] *= 2
mat[:, gather_dates_indicator_i] -= dates_indicator_mean
return mat
def assemble_set(
split_in_dir_path, avg_5_in_dir_path, avg_25_in_dir_path,
avg_50_in_dir_path, dates_mat_path, min_year,
data_out_dir_path, labels_out_dir_path
):
"""
Gathers companies data, labels and normalizes it.
:param split_in_dir_path: path to dir containing split matrices
:param avg_5_in_dir_path: path to avg_5 matrices in dir
:param avg_25_in_dir_path: path to avg_25 matrices in dir
:param avg_50_in_dir_path: path to avg_50 matrices in dir
:param dates_mat_path: path to dates matrix
:param min_year: min year contained in companies data
:param data_out_dir_path: path to data output dir
:param labels_out_dir_path: path to labels output dir
:return: -
"""
# Minimal size of the gathered matrix.
labeling_range = 25
# Load dates matrix.
dates_mat = np.load(dates_mat_path)
for filename in listdir(split_in_dir_path):
# If company matrix exists in all variants.
if path.isfile(avg_5_in_dir_path + "/" + filename) and \
path.isfile(avg_25_in_dir_path + "/" + filename) and \
path.isfile(avg_50_in_dir_path + "/" + filename):
# Load all matrices.
split_mat = np.load(split_in_dir_path + "/" + filename)
avg_5_mat = np.load(avg_5_in_dir_path + "/" + filename)
avg_25_mat = np.load(avg_25_in_dir_path + "/" + filename)
avg_50_mat = np.load(avg_50_in_dir_path + "/" + filename)
# Gather data from them, label it and normalize if we have
# enough data to label it.
if avg_50_mat.shape[0] >= labeling_range:
gathered_mat = gather_mats(
split_mat, avg_5_mat, avg_25_mat,
avg_50_mat, dates_mat, min_year
)
labels = label_mat(gathered_mat)
labeled_rows = labels.shape[0]
normalized_mat = normalize_mat(gathered_mat[:labeled_rows])
# Save results.
np.save(data_out_dir_path + "/" + filename, normalized_mat)
np.save(labels_out_dir_path + "/" + filename, labels)
def main():
"""
Main function of this script.
:return: -
"""
# Path used in assembly and previously discovered min year value.
split_in_dir_path = "../../data/split"
avg_5_in_dir_path = "../../data/averaged_5"
avg_25_in_dir_path = "../../data/averaged_25"
avg_50_in_dir_path = "../../data/averaged_50"
dates_mat_path = "../../data/dates_matrix/dates_matrix.npy"
min_year = 1962
data_out_dir_path = "../../data/rnn_set/data"
labels_out_dir_path = "../../data/rnn_set/labels"
assemble_set(
split_in_dir_path, avg_5_in_dir_path, avg_25_in_dir_path,
avg_50_in_dir_path, dates_mat_path, min_year,
data_out_dir_path, labels_out_dir_path
)
if __name__ == "__main__":
main() | mit | 4,783,202,378,305,829,000 | 39.152632 | 79 | 0.601599 | false |
FlaPer87/django-nonrel | django/dispatch/dispatcher.py | 19 | 8313 | import weakref
from django.dispatch import saferef
WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref)
def _make_id(target):
if hasattr(target, 'im_func'):
return (id(target.im_self), id(target.im_func))
return id(target)
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receriverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
if weak is True, then receiver must be weak-referencable (more
precisely saferef.safeRef() must be able to create a reference
to the receiver).
Receivers must be able to accept keyword arguments.
If receivers have a dispatch_uid attribute, the receiver will
not be added if another receiver already exists with that
dispatch_uid.
sender
The sender to which the receiver should respond Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.DEBUG:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(receiver)
except TypeError:
try:
argspec = inspect.getargspec(receiver.__call__)
except (TypeError, AttributeError):
argspec = None
if argspec:
assert argspec[2] is not None, \
"Signal receivers must accept keyword arguments (**kwargs)."
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
receiver = saferef.safeRef(receiver, onDelete=self._remove_receiver)
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
def disconnect(self, receiver=None, sender=None, weak=True, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
weak
The weakref state to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
for index in xrange(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
del self.receivers[index]
break
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers:
return responses
for receiver in self._live_receivers(_make_id(sender)):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
if any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver.
"""
responses = []
if not self.receivers:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(_make_id(sender)):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception, err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _live_receivers(self, senderkey):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
none_senderkey = _make_id(None)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == none_senderkey or r_senderkey == senderkey:
if isinstance(receiver, WEAKREF_TYPES):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
receivers.append(receiver)
else:
receivers.append(receiver)
return receivers
def _remove_receiver(self, receiver):
"""
Remove dead receivers from connections.
"""
to_remove = []
for key, connected_receiver in self.receivers:
if connected_receiver == receiver:
to_remove.append(key)
for key in to_remove:
for idx, (r_key, _) in enumerate(self.receivers):
if r_key == key:
del self.receivers[idx]
| bsd-3-clause | 48,644,300,632,171,970 | 34.075949 | 83 | 0.560447 | false |
margaret/python-datamuse | datamuse/test_api.py | 1 | 1445 | import unittest
import datamuse
from datamuse import Datamuse
class DatamuseTestCase(unittest.TestCase):
def setUp(self):
self.api = Datamuse()
self.max = 5
# words endpoint
def test_sounds_like(self):
args = {'sl': 'orange', 'max': self.max}
data = self.api.words(**args)
self.assertTrue(type(data), list)
print("sounds like", data)
def test_rhymes(self):
args = {'rel_rhy': 'orange', 'max': self.max}
data = self.api.words(**args)
self.assertTrue(len(data) <= self.max)
print("rhyme", data)
def test_near_rhymes(self):
args = {'rel_nry': 'orange', 'max': self.max}
data = self.api.words(**args)
self.assertTrue(len(data) <= self.max)
print("near rhyme", data)
def test_bad_request(self):
args = {'foo':42}
with self.assertRaises(ValueError):
data = self.api.words(**args)
def test_set_max(self):
self.assertTrue(self.api.max, 100)
self.api.set_max_default(10)
self.assertEquals(self.api.max, 10)
data = self.api.words(ml='ringing in the ears')
self.assertEquals(len(data), 10)
def test_set_max_error(self):
with self.assertRaises(ValueError):
self.api.set_max_default(-2)
self.api.set_max_default(0)
self.api.set_max_default(1001)
if __name__ == "__main__":
unittest.main()
| mit | -978,083,975,301,407,400 | 28.489796 | 55 | 0.579931 | false |
jetskijoe/headphones | lib/unidecode/x0d5.py | 253 | 4680 | data = (
'pyuk', # 0x00
'pyut', # 0x01
'pyup', # 0x02
'pyuh', # 0x03
'peu', # 0x04
'peug', # 0x05
'peugg', # 0x06
'peugs', # 0x07
'peun', # 0x08
'peunj', # 0x09
'peunh', # 0x0a
'peud', # 0x0b
'peul', # 0x0c
'peulg', # 0x0d
'peulm', # 0x0e
'peulb', # 0x0f
'peuls', # 0x10
'peult', # 0x11
'peulp', # 0x12
'peulh', # 0x13
'peum', # 0x14
'peub', # 0x15
'peubs', # 0x16
'peus', # 0x17
'peuss', # 0x18
'peung', # 0x19
'peuj', # 0x1a
'peuc', # 0x1b
'peuk', # 0x1c
'peut', # 0x1d
'peup', # 0x1e
'peuh', # 0x1f
'pyi', # 0x20
'pyig', # 0x21
'pyigg', # 0x22
'pyigs', # 0x23
'pyin', # 0x24
'pyinj', # 0x25
'pyinh', # 0x26
'pyid', # 0x27
'pyil', # 0x28
'pyilg', # 0x29
'pyilm', # 0x2a
'pyilb', # 0x2b
'pyils', # 0x2c
'pyilt', # 0x2d
'pyilp', # 0x2e
'pyilh', # 0x2f
'pyim', # 0x30
'pyib', # 0x31
'pyibs', # 0x32
'pyis', # 0x33
'pyiss', # 0x34
'pying', # 0x35
'pyij', # 0x36
'pyic', # 0x37
'pyik', # 0x38
'pyit', # 0x39
'pyip', # 0x3a
'pyih', # 0x3b
'pi', # 0x3c
'pig', # 0x3d
'pigg', # 0x3e
'pigs', # 0x3f
'pin', # 0x40
'pinj', # 0x41
'pinh', # 0x42
'pid', # 0x43
'pil', # 0x44
'pilg', # 0x45
'pilm', # 0x46
'pilb', # 0x47
'pils', # 0x48
'pilt', # 0x49
'pilp', # 0x4a
'pilh', # 0x4b
'pim', # 0x4c
'pib', # 0x4d
'pibs', # 0x4e
'pis', # 0x4f
'piss', # 0x50
'ping', # 0x51
'pij', # 0x52
'pic', # 0x53
'pik', # 0x54
'pit', # 0x55
'pip', # 0x56
'pih', # 0x57
'ha', # 0x58
'hag', # 0x59
'hagg', # 0x5a
'hags', # 0x5b
'han', # 0x5c
'hanj', # 0x5d
'hanh', # 0x5e
'had', # 0x5f
'hal', # 0x60
'halg', # 0x61
'halm', # 0x62
'halb', # 0x63
'hals', # 0x64
'halt', # 0x65
'halp', # 0x66
'halh', # 0x67
'ham', # 0x68
'hab', # 0x69
'habs', # 0x6a
'has', # 0x6b
'hass', # 0x6c
'hang', # 0x6d
'haj', # 0x6e
'hac', # 0x6f
'hak', # 0x70
'hat', # 0x71
'hap', # 0x72
'hah', # 0x73
'hae', # 0x74
'haeg', # 0x75
'haegg', # 0x76
'haegs', # 0x77
'haen', # 0x78
'haenj', # 0x79
'haenh', # 0x7a
'haed', # 0x7b
'hael', # 0x7c
'haelg', # 0x7d
'haelm', # 0x7e
'haelb', # 0x7f
'haels', # 0x80
'haelt', # 0x81
'haelp', # 0x82
'haelh', # 0x83
'haem', # 0x84
'haeb', # 0x85
'haebs', # 0x86
'haes', # 0x87
'haess', # 0x88
'haeng', # 0x89
'haej', # 0x8a
'haec', # 0x8b
'haek', # 0x8c
'haet', # 0x8d
'haep', # 0x8e
'haeh', # 0x8f
'hya', # 0x90
'hyag', # 0x91
'hyagg', # 0x92
'hyags', # 0x93
'hyan', # 0x94
'hyanj', # 0x95
'hyanh', # 0x96
'hyad', # 0x97
'hyal', # 0x98
'hyalg', # 0x99
'hyalm', # 0x9a
'hyalb', # 0x9b
'hyals', # 0x9c
'hyalt', # 0x9d
'hyalp', # 0x9e
'hyalh', # 0x9f
'hyam', # 0xa0
'hyab', # 0xa1
'hyabs', # 0xa2
'hyas', # 0xa3
'hyass', # 0xa4
'hyang', # 0xa5
'hyaj', # 0xa6
'hyac', # 0xa7
'hyak', # 0xa8
'hyat', # 0xa9
'hyap', # 0xaa
'hyah', # 0xab
'hyae', # 0xac
'hyaeg', # 0xad
'hyaegg', # 0xae
'hyaegs', # 0xaf
'hyaen', # 0xb0
'hyaenj', # 0xb1
'hyaenh', # 0xb2
'hyaed', # 0xb3
'hyael', # 0xb4
'hyaelg', # 0xb5
'hyaelm', # 0xb6
'hyaelb', # 0xb7
'hyaels', # 0xb8
'hyaelt', # 0xb9
'hyaelp', # 0xba
'hyaelh', # 0xbb
'hyaem', # 0xbc
'hyaeb', # 0xbd
'hyaebs', # 0xbe
'hyaes', # 0xbf
'hyaess', # 0xc0
'hyaeng', # 0xc1
'hyaej', # 0xc2
'hyaec', # 0xc3
'hyaek', # 0xc4
'hyaet', # 0xc5
'hyaep', # 0xc6
'hyaeh', # 0xc7
'heo', # 0xc8
'heog', # 0xc9
'heogg', # 0xca
'heogs', # 0xcb
'heon', # 0xcc
'heonj', # 0xcd
'heonh', # 0xce
'heod', # 0xcf
'heol', # 0xd0
'heolg', # 0xd1
'heolm', # 0xd2
'heolb', # 0xd3
'heols', # 0xd4
'heolt', # 0xd5
'heolp', # 0xd6
'heolh', # 0xd7
'heom', # 0xd8
'heob', # 0xd9
'heobs', # 0xda
'heos', # 0xdb
'heoss', # 0xdc
'heong', # 0xdd
'heoj', # 0xde
'heoc', # 0xdf
'heok', # 0xe0
'heot', # 0xe1
'heop', # 0xe2
'heoh', # 0xe3
'he', # 0xe4
'heg', # 0xe5
'hegg', # 0xe6
'hegs', # 0xe7
'hen', # 0xe8
'henj', # 0xe9
'henh', # 0xea
'hed', # 0xeb
'hel', # 0xec
'helg', # 0xed
'helm', # 0xee
'helb', # 0xef
'hels', # 0xf0
'helt', # 0xf1
'help', # 0xf2
'helh', # 0xf3
'hem', # 0xf4
'heb', # 0xf5
'hebs', # 0xf6
'hes', # 0xf7
'hess', # 0xf8
'heng', # 0xf9
'hej', # 0xfa
'hec', # 0xfb
'hek', # 0xfc
'het', # 0xfd
'hep', # 0xfe
'heh', # 0xff
)
| gpl-3.0 | 6,608,159,422,335,880,000 | 17.139535 | 19 | 0.451496 | false |
KiChjang/servo | tests/wpt/web-platform-tests/tools/third_party/h2/examples/fragments/client_https_setup_fragment.py | 14 | 3887 | # -*- coding: utf-8 -*-
"""
Client HTTPS Setup
~~~~~~~~~~~~~~~~~~
This example code fragment demonstrates how to set up a HTTP/2 client that
negotiates HTTP/2 using NPN and ALPN. For the sake of maximum explanatory value
this code uses the synchronous, low-level sockets API: however, if you're not
using sockets directly (e.g. because you're using asyncio), you should focus on
the set up required for the SSLContext object. For other concurrency libraries
you may need to use other setup (e.g. for Twisted you'll need to use
IProtocolNegotiationFactory).
This code requires Python 3.5 or later.
"""
import h2.connection
import socket
import ssl
def establish_tcp_connection():
"""
This function establishes a client-side TCP connection. How it works isn't
very important to this example. For the purpose of this example we connect
to localhost.
"""
return socket.create_connection(('localhost', 443))
def get_http2_ssl_context():
"""
This function creates an SSLContext object that is suitably configured for
HTTP/2. If you're working with Python TLS directly, you'll want to do the
exact same setup as this function does.
"""
# Get the basic context from the standard library.
ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
# RFC 7540 Section 9.2: Implementations of HTTP/2 MUST use TLS version 1.2
# or higher. Disable TLS 1.1 and lower.
ctx.options |= (
ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
)
# RFC 7540 Section 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable
# compression.
ctx.options |= ssl.OP_NO_COMPRESSION
# RFC 7540 Section 9.2.2: "deployments of HTTP/2 that use TLS 1.2 MUST
# support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256". In practice, the
# blocklist defined in this section allows only the AES GCM and ChaCha20
# cipher suites with ephemeral key negotiation.
ctx.set_ciphers("ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20")
# We want to negotiate using NPN and ALPN. ALPN is mandatory, but NPN may
# be absent, so allow that. This setup allows for negotiation of HTTP/1.1.
ctx.set_alpn_protocols(["h2", "http/1.1"])
try:
ctx.set_npn_protocols(["h2", "http/1.1"])
except NotImplementedError:
pass
return ctx
def negotiate_tls(tcp_conn, context):
"""
Given an established TCP connection and a HTTP/2-appropriate TLS context,
this function:
1. wraps TLS around the TCP connection.
2. confirms that HTTP/2 was negotiated and, if it was not, throws an error.
"""
# Note that SNI is mandatory for HTTP/2, so you *must* pass the
# server_hostname argument.
tls_conn = context.wrap_socket(tcp_conn, server_hostname='localhost')
# Always prefer the result from ALPN to that from NPN.
# You can only check what protocol was negotiated once the handshake is
# complete.
negotiated_protocol = tls_conn.selected_alpn_protocol()
if negotiated_protocol is None:
negotiated_protocol = tls_conn.selected_npn_protocol()
if negotiated_protocol != "h2":
raise RuntimeError("Didn't negotiate HTTP/2!")
return tls_conn
def main():
# Step 1: Set up your TLS context.
context = get_http2_ssl_context()
# Step 2: Create a TCP connection.
connection = establish_tcp_connection()
# Step 3: Wrap the connection in TLS and validate that we negotiated HTTP/2
tls_connection = negotiate_tls(connection, context)
# Step 4: Create a client-side H2 connection.
http2_connection = h2.connection.H2Connection()
# Step 5: Initiate the connection
http2_connection.initiate_connection()
tls_connection.sendall(http2_connection.data_to_send())
# The TCP, TLS, and HTTP/2 handshakes are now complete. You can enter your
# main loop now.
| mpl-2.0 | -4,884,118,786,317,630,000 | 34.327273 | 79 | 0.699177 | false |
farhaanbukhsh/sympy | sympy/plotting/pygletplot/color_scheme.py | 85 | 12579 | from __future__ import print_function, division
from sympy import Basic, Symbol, symbols, lambdify
from util import interpolate, rinterpolate, create_bounds, update_bounds
from sympy.core.compatibility import range
class ColorGradient(object):
colors = [0.4, 0.4, 0.4], [0.9, 0.9, 0.9]
intervals = 0.0, 1.0
def __init__(self, *args):
if len(args) == 2:
self.colors = list(args)
self.intervals = [0.0, 1.0]
elif len(args) > 0:
if len(args) % 2 != 0:
raise ValueError("len(args) should be even")
self.colors = [args[i] for i in range(1, len(args), 2)]
self.intervals = [args[i] for i in range(0, len(args), 2)]
assert len(self.colors) == len(self.intervals)
def copy(self):
c = ColorGradient()
c.colors = [e[::] for e in self.colors]
c.intervals = self.intervals[::]
return c
def _find_interval(self, v):
m = len(self.intervals)
i = 0
while i < m - 1 and self.intervals[i] <= v:
i += 1
return i
def _interpolate_axis(self, axis, v):
i = self._find_interval(v)
v = rinterpolate(self.intervals[i - 1], self.intervals[i], v)
return interpolate(self.colors[i - 1][axis], self.colors[i][axis], v)
def __call__(self, r, g, b):
c = self._interpolate_axis
return c(0, r), c(1, g), c(2, b)
default_color_schemes = {} # defined at the bottom of this file
class ColorScheme(object):
def __init__(self, *args, **kwargs):
self.args = args
self.f, self.gradient = None, ColorGradient()
if len(args) == 1 and not isinstance(args[0], Basic) and callable(args[0]):
self.f = args[0]
elif len(args) == 1 and isinstance(args[0], str):
if args[0] in default_color_schemes:
cs = default_color_schemes[args[0]]
self.f, self.gradient = cs.f, cs.gradient.copy()
else:
self.f = lambdify('x,y,z,u,v', args[0])
else:
self.f, self.gradient = self._interpret_args(args, kwargs)
self._test_color_function()
if not isinstance(self.gradient, ColorGradient):
raise ValueError("Color gradient not properly initialized. "
"(Not a ColorGradient instance.)")
def _interpret_args(self, args, kwargs):
f, gradient = None, self.gradient
atoms, lists = self._sort_args(args)
s = self._pop_symbol_list(lists)
s = self._fill_in_vars(s)
# prepare the error message for lambdification failure
f_str = ', '.join(str(fa) for fa in atoms)
s_str = (str(sa) for sa in s)
s_str = ', '.join(sa for sa in s_str if sa.find('unbound') < 0)
f_error = ValueError("Could not interpret arguments "
"%s as functions of %s." % (f_str, s_str))
# try to lambdify args
if len(atoms) == 1:
fv = atoms[0]
try:
f = lambdify(s, [fv, fv, fv])
except TypeError:
raise f_error
elif len(atoms) == 3:
fr, fg, fb = atoms
try:
f = lambdify(s, [fr, fg, fb])
except TypeError:
raise f_error
else:
raise ValueError("A ColorScheme must provide 1 or 3 "
"functions in x, y, z, u, and/or v.")
# try to intrepret any given color information
if len(lists) == 0:
gargs = []
elif len(lists) == 1:
gargs = lists[0]
elif len(lists) == 2:
try:
(r1, g1, b1), (r2, g2, b2) = lists
except TypeError:
raise ValueError("If two color arguments are given, "
"they must be given in the format "
"(r1, g1, b1), (r2, g2, b2).")
gargs = lists
elif len(lists) == 3:
try:
(r1, r2), (g1, g2), (b1, b2) = lists
except Exception:
raise ValueError("If three color arguments are given, "
"they must be given in the format "
"(r1, r2), (g1, g2), (b1, b2). To create "
"a multi-step gradient, use the syntax "
"[0, colorStart, step1, color1, ..., 1, "
"colorEnd].")
gargs = [[r1, g1, b1], [r2, g2, b2]]
else:
raise ValueError("Don't know what to do with collection "
"arguments %s." % (', '.join(str(l) for l in lists)))
if gargs:
try:
gradient = ColorGradient(*gargs)
except Exception as ex:
raise ValueError(("Could not initialize a gradient "
"with arguments %s. Inner "
"exception: %s") % (gargs, str(ex)))
return f, gradient
def _pop_symbol_list(self, lists):
symbol_lists = []
for l in lists:
mark = True
for s in l:
if s is not None and not isinstance(s, Symbol):
mark = False
break
if mark:
lists.remove(l)
symbol_lists.append(l)
if len(symbol_lists) == 1:
return symbol_lists[0]
elif len(symbol_lists) == 0:
return []
else:
raise ValueError("Only one list of Symbols "
"can be given for a color scheme.")
def _fill_in_vars(self, args):
defaults = symbols('x,y,z,u,v')
if len(args) == 0:
return defaults
if not isinstance(args, (tuple, list)):
raise v_error
if len(args) == 0:
return defaults
for s in args:
if s is not None and not isinstance(s, Symbol):
raise v_error
# when vars are given explicitly, any vars
# not given are marked 'unbound' as to not
# be accidentally used in an expression
vars = [Symbol('unbound%i' % (i)) for i in range(1, 6)]
# interpret as t
if len(args) == 1:
vars[3] = args[0]
# interpret as u,v
elif len(args) == 2:
if args[0] is not None:
vars[3] = args[0]
if args[1] is not None:
vars[4] = args[1]
# interpret as x,y,z
elif len(args) >= 3:
# allow some of x,y,z to be
# left unbound if not given
if args[0] is not None:
vars[0] = args[0]
if args[1] is not None:
vars[1] = args[1]
if args[2] is not None:
vars[2] = args[2]
# interpret the rest as t
if len(args) >= 4:
vars[3] = args[3]
# ...or u,v
if len(args) >= 5:
vars[4] = args[4]
return vars
def _sort_args(self, args):
atoms, lists = [], []
for a in args:
if isinstance(a, (tuple, list)):
lists.append(a)
else:
atoms.append(a)
return atoms, lists
def _test_color_function(self):
if not callable(self.f):
raise ValueError("Color function is not callable.")
try:
result = self.f(0, 0, 0, 0, 0)
if len(result) != 3:
raise ValueError("length should be equal to 3")
except TypeError as te:
raise ValueError("Color function needs to accept x,y,z,u,v, "
"as arguments even if it doesn't use all of them.")
except AssertionError as ae:
raise ValueError("Color function needs to return 3-tuple r,g,b.")
except Exception as ie:
pass # color function probably not valid at 0,0,0,0,0
def __call__(self, x, y, z, u, v):
try:
return self.f(x, y, z, u, v)
except Exception as e:
return None
def apply_to_curve(self, verts, u_set, set_len=None, inc_pos=None):
"""
Apply this color scheme to a
set of vertices over a single
independent variable u.
"""
bounds = create_bounds()
cverts = list()
if callable(set_len):
set_len(len(u_set)*2)
# calculate f() = r,g,b for each vert
# and find the min and max for r,g,b
for _u in range(len(u_set)):
if verts[_u] is None:
cverts.append(None)
else:
x, y, z = verts[_u]
u, v = u_set[_u], None
c = self(x, y, z, u, v)
if c is not None:
c = list(c)
update_bounds(bounds, c)
cverts.append(c)
if callable(inc_pos):
inc_pos()
# scale and apply gradient
for _u in range(len(u_set)):
if cverts[_u] is not None:
for _c in range(3):
# scale from [f_min, f_max] to [0,1]
cverts[_u][_c] = rinterpolate(bounds[_c][0], bounds[_c][1],
cverts[_u][_c])
# apply gradient
cverts[_u] = self.gradient(*cverts[_u])
if callable(inc_pos):
inc_pos()
return cverts
def apply_to_surface(self, verts, u_set, v_set, set_len=None, inc_pos=None):
"""
Apply this color scheme to a
set of vertices over two
independent variables u and v.
"""
bounds = create_bounds()
cverts = list()
if callable(set_len):
set_len(len(u_set)*len(v_set)*2)
# calculate f() = r,g,b for each vert
# and find the min and max for r,g,b
for _u in range(len(u_set)):
column = list()
for _v in range(len(v_set)):
if verts[_u][_v] is None:
column.append(None)
else:
x, y, z = verts[_u][_v]
u, v = u_set[_u], v_set[_v]
c = self(x, y, z, u, v)
if c is not None:
c = list(c)
update_bounds(bounds, c)
column.append(c)
if callable(inc_pos):
inc_pos()
cverts.append(column)
# scale and apply gradient
for _u in range(len(u_set)):
for _v in range(len(v_set)):
if cverts[_u][_v] is not None:
# scale from [f_min, f_max] to [0,1]
for _c in range(3):
cverts[_u][_v][_c] = rinterpolate(bounds[_c][0],
bounds[_c][1], cverts[_u][_v][_c])
# apply gradient
cverts[_u][_v] = self.gradient(*cverts[_u][_v])
if callable(inc_pos):
inc_pos()
return cverts
def str_base(self):
return ", ".join(str(a) for a in self.args)
def __repr__(self):
return "%s" % (self.str_base())
x, y, z, t, u, v = symbols('x,y,z,t,u,v')
default_color_schemes['rainbow'] = ColorScheme(z, y, x)
default_color_schemes['zfade'] = ColorScheme(z, (0.4, 0.4, 0.97),
(0.97, 0.4, 0.4), (None, None, z))
default_color_schemes['zfade3'] = ColorScheme(z, (None, None, z),
[0.00, (0.2, 0.2, 1.0),
0.35, (0.2, 0.8, 0.4),
0.50, (0.3, 0.9, 0.3),
0.65, (0.4, 0.8, 0.2),
1.00, (1.0, 0.2, 0.2)])
default_color_schemes['zfade4'] = ColorScheme(z, (None, None, z),
[0.0, (0.3, 0.3, 1.0),
0.30, (0.3, 1.0, 0.3),
0.55, (0.95, 1.0, 0.2),
0.65, (1.0, 0.95, 0.2),
0.85, (1.0, 0.7, 0.2),
1.0, (1.0, 0.3, 0.2)])
| bsd-3-clause | 5,139,992,811,954,619,000 | 36.106195 | 83 | 0.445743 | false |
martinribelotta/micropython | drivers/onewire/onewire.py | 66 | 11789 | """
OneWire library ported to MicroPython by Jason Hildebrand.
TODO:
* implement and test parasite-power mode (as an init option)
* port the crc checks
The original upstream copyright and terms follow.
------------------------------------------------------------------------------
Copyright (c) 2007, Jim Studt (original old version - many contributors since)
OneWire has been maintained by Paul Stoffregen ([email protected]) since
January 2010.
26 Sept 2008 -- Robin James
Jim Studt's original library was modified by Josh Larios.
Tom Pollard, [email protected], contributed around May 20, 2008
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Much of the code was inspired by Derek Yerger's code, though I don't
think much of that remains. In any event that was..
(copyleft) 2006 by Derek Yerger - Free to distribute freely.
"""
import pyb
from pyb import disable_irq
from pyb import enable_irq
class OneWire:
def __init__(self, pin):
"""
Pass the data pin connected to your one-wire device(s), for example Pin('X1').
The one-wire protocol allows for multiple devices to be attached.
"""
self.data_pin = pin
self.write_delays = (1, 40, 40, 1)
self.read_delays = (1, 1, 40)
# cache a bunch of methods and attributes. This is necessary in _write_bit and
# _read_bit to achieve the timing required by the OneWire protocol.
self.cache = (pin.init, pin.value, pin.OUT_PP, pin.IN, pin.PULL_NONE)
pin.init(pin.IN, pin.PULL_UP)
def reset(self):
"""
Perform the onewire reset function.
Returns 1 if a device asserted a presence pulse, 0 otherwise.
If you receive 0, then check your wiring and make sure you are providing
power and ground to your devices.
"""
retries = 25
self.data_pin.init(self.data_pin.IN, self.data_pin.PULL_UP)
# We will wait up to 250uS for
# the bus to come high, if it doesn't then it is broken or shorted
# and we return a 0;
# wait until the wire is high... just in case
while True:
if self.data_pin.value():
break
retries -= 1
if retries == 0:
raise OSError("OneWire pin didn't go high")
pyb.udelay(10)
# pull the bus low for at least 480us
self.data_pin.low()
self.data_pin.init(self.data_pin.OUT_PP)
pyb.udelay(480)
# If there is a slave present, it should pull the bus low within 60us
i = pyb.disable_irq()
self.data_pin.init(self.data_pin.IN, self.data_pin.PULL_UP)
pyb.udelay(70)
presence = not self.data_pin.value()
pyb.enable_irq(i)
pyb.udelay(410)
return presence
def write_bit(self, value):
"""
Write a single bit.
"""
pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP = self.cache
self._write_bit(value, pin_init, pin_value, Pin_OUT_PP)
def _write_bit(self, value, pin_init, pin_value, Pin_OUT_PP):
"""
Write a single bit - requires cached methods/attributes be passed as arguments.
See also write_bit()
"""
d0, d1, d2, d3 = self.write_delays
udelay = pyb.udelay
if value:
# write 1
i = disable_irq()
pin_value(0)
pin_init(Pin_OUT_PP)
udelay(d0)
pin_value(1)
enable_irq(i)
udelay(d1)
else:
# write 0
i = disable_irq()
pin_value(0)
pin_init(Pin_OUT_PP)
udelay(d2)
pin_value(1)
enable_irq(i)
udelay(d3)
def write_byte(self, value):
"""
Write a byte. The pin will go tri-state at the end of the write to avoid
heating in a short or other mishap.
"""
pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP = self.cache
for i in range(8):
self._write_bit(value & 1, pin_init, pin_value, Pin_OUT_PP)
value >>= 1
pin_init(Pin_IN, Pin_PULL_UP)
def write_bytes(self, bytestring):
"""
Write a sequence of bytes.
"""
for byte in bytestring:
self.write_byte(byte)
def _read_bit(self, pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP):
"""
Read a single bit - requires cached methods/attributes be passed as arguments.
See also read_bit()
"""
d0, d1, d2 = self.read_delays
udelay = pyb.udelay
pin_init(Pin_IN, Pin_PULL_UP) # TODO why do we need this?
i = disable_irq()
pin_value(0)
pin_init(Pin_OUT_PP)
udelay(d0)
pin_init(Pin_IN, Pin_PULL_UP)
udelay(d1)
value = pin_value()
enable_irq(i)
udelay(d2)
return value
def read_bit(self):
"""
Read a single bit.
"""
pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP = self.cache
return self._read_bit(pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP)
def read_byte(self):
"""
Read a single byte and return the value as an integer.
See also read_bytes()
"""
pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP = self.cache
value = 0
for i in range(8):
bit = self._read_bit(pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP)
value |= bit << i
return value
def read_bytes(self, count):
"""
Read a sequence of N bytes.
The bytes are returned as a bytearray.
"""
s = bytearray(count)
for i in range(count):
s[i] = self.read_byte()
return s
def select_rom(self, rom):
"""
Select a specific device to talk to. Pass in rom as a bytearray (8 bytes).
"""
assert len(rom) == 8, "ROM must be 8 bytes"
self.reset()
self.write_byte(0x55) # ROM MATCH
self.write_bytes(rom)
def read_rom(self):
"""
Read the ROM - this works if there is only a single device attached.
"""
self.reset()
self.write_byte(0x33) # READ ROM
rom = self.read_bytes(8)
# TODO: check CRC of the ROM
return rom
def skip_rom(self):
"""
Send skip-rom command - this works if there is only one device attached.
"""
self.write_byte(0xCC) # SKIP ROM
def depower(self):
self.data_pin.init(self.data_pin.IN, self.data_pin.PULL_NONE)
def scan(self):
"""
Return a list of ROMs for all attached devices.
Each ROM is returned as a bytes object of 8 bytes.
"""
devices = []
self._reset_search()
while True:
rom = self._search()
if not rom:
return devices
devices.append(rom)
def _reset_search(self):
self.last_discrepancy = 0
self.last_device_flag = False
self.last_family_discrepancy = 0
self.rom = bytearray(8)
def _search(self):
# initialize for search
id_bit_number = 1
last_zero = 0
rom_byte_number = 0
rom_byte_mask = 1
search_result = 0
pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP = self.cache
# if the last call was not the last one
if not self.last_device_flag:
# 1-Wire reset
if not self.reset():
self._reset_search()
return None
# issue the search command
self.write_byte(0xF0)
# loop to do the search
while rom_byte_number < 8: # loop until through all ROM bytes 0-7
# read a bit and its complement
id_bit = self._read_bit(pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP)
cmp_id_bit = self._read_bit(pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP)
# check for no devices on 1-wire
if (id_bit == 1) and (cmp_id_bit == 1):
break
else:
# all devices coupled have 0 or 1
if (id_bit != cmp_id_bit):
search_direction = id_bit # bit write value for search
else:
# if this discrepancy if before the Last Discrepancy
# on a previous next then pick the same as last time
if (id_bit_number < self.last_discrepancy):
search_direction = (self.rom[rom_byte_number] & rom_byte_mask) > 0
else:
# if equal to last pick 1, if not then pick 0
search_direction = (id_bit_number == self.last_discrepancy)
# if 0 was picked then record its position in LastZero
if search_direction == 0:
last_zero = id_bit_number
# check for Last discrepancy in family
if last_zero < 9:
self.last_family_discrepancy = last_zero
# set or clear the bit in the ROM byte rom_byte_number
# with mask rom_byte_mask
if search_direction == 1:
self.rom[rom_byte_number] |= rom_byte_mask
else:
self.rom[rom_byte_number] &= ~rom_byte_mask
# serial number search direction write bit
#print('sd', search_direction)
self.write_bit(search_direction)
# increment the byte counter id_bit_number
# and shift the mask rom_byte_mask
id_bit_number += 1
rom_byte_mask <<= 1
# if the mask is 0 then go to new SerialNum byte rom_byte_number and reset mask
if rom_byte_mask == 0x100:
rom_byte_number += 1
rom_byte_mask = 1
# if the search was successful then
if not (id_bit_number < 65):
# search successful so set last_discrepancy,last_device_flag,search_result
self.last_discrepancy = last_zero
# check for last device
if self.last_discrepancy == 0:
self.last_device_flag = True
search_result = True
# if no device found then reset counters so next 'search' will be like a first
if not search_result or not self.rom[0]:
self._reset_search()
return None
else:
return bytes(self.rom)
| mit | 4,351,900,132,377,236,500 | 34.08631 | 97 | 0.556536 | false |
jashug/Lightning | serverutil.py | 1 | 2368 | """Utility functions for the server.
This includes the interface from the server implementation to the
payment channel and lightning network APIs.
requires_auth -- decorator which makes a view function require authentication
authenticate_before_request -- a before_request callback for auth
api_factory -- returns a flask Blueprint or equivalent, along with a decorator
making functions availiable as RPCs, and a base class for
SQLAlchemy Declarative database models.
Signals:
WALLET_NOTIFY: sent when bitcoind tells us it has a transaction.
- tx = txid
BLOCK_NOTIFY: send when bitcoind tells us it has a block
- block = block hash
"""
import os.path
from functools import wraps
from flask import Flask, current_app, Response, request, Blueprint
from blinker import Namespace
from jsonrpc.backend.flask import JSONRPCAPI
import bitcoin.core.serialize
from jsonrpcproxy import SmartDispatcher
app = Flask('lightningd')
SIGNALS = Namespace()
WALLET_NOTIFY = SIGNALS.signal('WALLET_NOTIFY')
BLOCK_NOTIFY = SIGNALS.signal('BLOCK_NOTIFY')
# Copied from http://flask.pocoo.org/snippets/8/
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
return (username == current_app.config['rpcuser'] and
password == current_app.config['rpcpassword'])
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(view):
"""Require basic authentication on requests to this view.
Also only accept requests from localhost.
"""
@wraps(view)
def decorated(*args, **kwargs):
"""Decorated version of view that checks authentication."""
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
if request.remote_addr != "127.0.0.1":
return Response("Access outside 127.0.0.1 forbidden", 403)
return view(*args, **kwargs)
return decorated
def authenticate_before_request():
"""before_request callback to perform authentication."""
return requires_auth(lambda: None)()
| mit | -6,842,408,270,781,522,000 | 34.878788 | 78 | 0.71326 | false |
jolevq/odoopub | addons/report_webkit/wizard/report_webkit_actions.py | 382 | 6537 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Vincent Renaville
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.tools.translate import _
from openerp.osv import fields, osv
class report_webkit_actions(osv.osv_memory):
_name = "report.webkit.actions"
_description = "Webkit Actions"
_columns = {
'print_button':fields.boolean('Add print button', help="Check this to add a Print action for this Report in the sidebar of the corresponding document types"),
'open_action':fields.boolean('Open added action', help="Check this to view the newly added internal print action after creating it (technical view) "),
}
_defaults = {
'print_button': lambda *a: True,
'open_action': lambda *a: False,
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" Changes the view dynamically
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: New arch of view.
"""
if not context: context = {}
res = super(report_webkit_actions, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if not record_id or (active_model and active_model != 'ir.actions.report.xml'):
return res
report = self.pool['ir.actions.report.xml'].browse(
cr,
uid,
context.get('active_id'),
context=context
)
ir_values_obj = self.pool['ir.values']
ids = ir_values_obj.search(
cr,
uid,
[('value','=',report.type+','+str(context.get('active_id')))]
)
if ids:
res['arch'] = '''<form string="Add Print Buttons">
<label string="Report Action already exist for this report."/>
</form>
'''
return res
def do_action(self, cr, uid, ids, context=None):
""" This Function Open added Action.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of report.webkit.actions's ID
@param context: A standard dictionary
@return: Dictionary of ir.values form.
"""
if context is None:
context = {}
report_obj = self.pool['ir.actions.report.xml']
for current in self.browse(cr, uid, ids, context=context):
report = report_obj.browse(
cr,
uid,
context.get('active_id'),
context=context
)
if current.print_button:
ir_values_obj = self.pool['ir.values']
res = ir_values_obj.set(
cr,
uid,
'action',
'client_print_multi',
report.report_name,
[report.model],
'ir.actions.report.xml,%d' % context.get('active_id', False),
isobject=True
)
else:
ir_values_obj = self.pool['ir.values']
res = ir_values_obj.set(
cr,
uid,
'action',
'client_print_multi',
report.report_name,
[report.model,0],
'ir.actions.report.xml,%d' % context.get('active_id', False),
isobject=True
)
if res[0]:
if not current.open_action:
return {'type': 'ir.actions.act_window_close'}
return {
'name': _('Client Actions Connections'),
'view_type': 'form',
'view_mode': 'form',
'res_id' : res[0],
'res_model': 'ir.values',
'view_id': False,
'type': 'ir.actions.act_window',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,401,475,155,584,591,400 | 45.035211 | 165 | 0.47866 | false |
qaugustijn/stk-code | tools/update_characteristics.py | 16 | 2103 | #!/usr/bin/env python3
#
# SuperTuxKart - a fun racing game with go-kart
# Copyright (C) 2006-2015 SuperTuxKart-Team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# This script uses create_kart_properties.py to create code and then replaces
# the code in the source files. The parts in the source are marked with tags, that
# contain the argument that has to be passed to create_kart_properties.py.
# The script has to be run from the root directory of this project.
import os
import re
import subprocess
from create_kart_properties import functions
def main():
# Check, if it runs in the root directory
if not os.path.isfile("tools/update_characteristics.py"):
print("Please run this script in the root directory of the project.")
exit(1)
for operation, function in functions.items():
result = subprocess.Popen("tools/create_kart_properties.py " +
operation, shell = True,
stdout = subprocess.PIPE).stdout.read().decode('UTF-8')
with open("src/" + function[2], "r") as f:
text = f.read()
# Replace the text by using look behinds and look forwards
text = re.sub("(?<=/\* \<characteristics-start " + operation +
"\> \*/\\n)(.|\n)*(?=\\n\s*/\* <characteristics-end " + operation + "> \*/)", result, text)
with open("src/" + function[2], "w") as f:
f.write(text)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,202,708,781,186,728,000 | 41.06 | 103 | 0.676652 | false |
mkuron/espresso | src/python/espressomd/lbboundaries.py | 1 | 2672 | # Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .script_interface import ScriptObjectRegistry, ScriptInterfaceHelper, script_interface_register
import espressomd.code_info
if any(i in espressomd.code_info.features() for i in ["LB_BOUNDARIES", "LB_BOUNDARIES_GPU"]):
@script_interface_register
class LBBoundaries(ScriptObjectRegistry):
"""
Creates a set of lattice Boltzmann boundaries.
"""
_so_name = "LBBoundaries::LBBoundaries"
def add(self, *args, **kwargs):
"""
Adds a boundary to the set.
Either a valid boundary is an argument,
or a valid set of parameters to create a boundary.
"""
if len(args) == 1:
if isinstance(args[0], LBBoundary):
lbboundary = args[0]
else:
raise TypeError(
"Either a LBBoundary object or key-value pairs for the parameters of a LBBoundary object need to be passed.")
else:
lbboundary = LBBoundary(**kwargs)
self.call_method("add", object=lbboundary)
return lbboundary
def remove(self, lbboundary):
"""
Removes a boundary from the set.
Parameters
----------
lbboundary : :obj:`LBBoundary`
The boundary to be removed from the set.
"""
self.call_method("remove", object=lbboundary)
def clear(self):
"""
Removes all boundaries.
"""
self.call_method("clear")
def size(self):
return self.call_method("size")
def empty(self):
return self.call_method("empty")
@script_interface_register
class LBBoundary(ScriptInterfaceHelper):
"""
Creates a LB boundary.
"""
_so_name = "LBBoundaries::LBBoundary"
_so_bind_methods = ("get_force",)
| gpl-3.0 | 1,940,086,444,955,502,600 | 29.363636 | 133 | 0.59768 | false |
jeasoft/odoo | comunity_modules/house_booking/__openerp__.py | 4 | 1098 | # -*- coding: utf-8 -*-
{
'name' : 'Booking management',
'version' : '1.2',
'author' : 'Alicia FLOREZ & Sébastien CHAZALLET',
'category': 'Sales Management',
'summary': 'Management of house, guestroom or hotel bookings.',
'description' : """
Manage your bookings
====================
This module is used by :
- hotels
- guest houses
- guest rooms
Manage rental schedule, bookings, arrivals and departure.
Use it with its WebSite App and allow your customers to rent online !
In further versions, will manage quotations, invoices, and seasons.
""",
'website': 'http://www.inspyration.fr',
'images' : [],
'depends' : ['base', 'mail', 'crm'],
'data': [
'security/booking_security.xml',
'security/ir.model.access.csv',
'views/res_config_view.xml',
'views/booking_view.xml',
'report/voucher.xml',
'views/email.xml',
],
'js': [
],
'qweb' : [
],
'css':[
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': True,
}
| agpl-3.0 | 3,958,434,852,969,881,600 | 21.387755 | 71 | 0.561531 | false |
arokem/scipy | scipy/sparse/linalg/tests/test_norm.py | 1 | 5427 | """Test functions for the sparse.linalg.norm module
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import norm as npnorm
from numpy.testing import assert_allclose
from pytest import raises as assert_raises
import scipy.sparse
from scipy.sparse.linalg import norm as spnorm
class TestNorm(object):
def setup_method(self):
a = np.arange(9) - 4
b = a.reshape((3, 3))
self.b = scipy.sparse.csr_matrix(b)
def test_matrix_norm(self):
# Frobenius norm is the default
assert_allclose(spnorm(self.b), 7.745966692414834)
assert_allclose(spnorm(self.b, 'fro'), 7.745966692414834)
assert_allclose(spnorm(self.b, np.inf), 9)
assert_allclose(spnorm(self.b, -np.inf), 2)
assert_allclose(spnorm(self.b, 1), 7)
assert_allclose(spnorm(self.b, -1), 6)
# _multi_svd_norm is not implemented for sparse matrix
assert_raises(NotImplementedError, spnorm, self.b, 2)
assert_raises(NotImplementedError, spnorm, self.b, -2)
def test_matrix_norm_axis(self):
for m, axis in ((self.b, None), (self.b, (0, 1)), (self.b.T, (1, 0))):
assert_allclose(spnorm(m, axis=axis), 7.745966692414834)
assert_allclose(spnorm(m, 'fro', axis=axis), 7.745966692414834)
assert_allclose(spnorm(m, np.inf, axis=axis), 9)
assert_allclose(spnorm(m, -np.inf, axis=axis), 2)
assert_allclose(spnorm(m, 1, axis=axis), 7)
assert_allclose(spnorm(m, -1, axis=axis), 6)
def test_vector_norm(self):
v = [4.5825756949558398, 4.2426406871192848, 4.5825756949558398]
for m, a in (self.b, 0), (self.b.T, 1):
for axis in a, (a, ), a-2, (a-2, ):
assert_allclose(spnorm(m, 1, axis=axis), [7, 6, 7])
assert_allclose(spnorm(m, np.inf, axis=axis), [4, 3, 4])
assert_allclose(spnorm(m, axis=axis), v)
assert_allclose(spnorm(m, ord=2, axis=axis), v)
assert_allclose(spnorm(m, ord=None, axis=axis), v)
def test_norm_exceptions(self):
m = self.b
assert_raises(TypeError, spnorm, m, None, 1.5)
assert_raises(TypeError, spnorm, m, None, [2])
assert_raises(ValueError, spnorm, m, None, ())
assert_raises(ValueError, spnorm, m, None, (0, 1, 2))
assert_raises(ValueError, spnorm, m, None, (0, 0))
assert_raises(ValueError, spnorm, m, None, (0, 2))
assert_raises(ValueError, spnorm, m, None, (-3, 0))
assert_raises(ValueError, spnorm, m, None, 2)
assert_raises(ValueError, spnorm, m, None, -3)
assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', 0)
assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', (0, 1))
class TestVsNumpyNorm(object):
_sparse_types = (
scipy.sparse.bsr_matrix,
scipy.sparse.coo_matrix,
scipy.sparse.csc_matrix,
scipy.sparse.csr_matrix,
scipy.sparse.dia_matrix,
scipy.sparse.dok_matrix,
scipy.sparse.lil_matrix,
)
_test_matrices = (
(np.arange(9) - 4).reshape((3, 3)),
[
[1, 2, 3],
[-1, 1, 4]],
[
[1, 0, 3],
[-1, 1, 4j]],
)
def test_sparse_matrix_norms(self):
for sparse_type in self._sparse_types:
for M in self._test_matrices:
S = sparse_type(M)
assert_allclose(spnorm(S), npnorm(M))
assert_allclose(spnorm(S, 'fro'), npnorm(M, 'fro'))
assert_allclose(spnorm(S, np.inf), npnorm(M, np.inf))
assert_allclose(spnorm(S, -np.inf), npnorm(M, -np.inf))
assert_allclose(spnorm(S, 1), npnorm(M, 1))
assert_allclose(spnorm(S, -1), npnorm(M, -1))
def test_sparse_matrix_norms_with_axis(self):
for sparse_type in self._sparse_types:
for M in self._test_matrices:
S = sparse_type(M)
for axis in None, (0, 1), (1, 0):
assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
for ord in 'fro', np.inf, -np.inf, 1, -1:
assert_allclose(spnorm(S, ord, axis=axis),
npnorm(M, ord, axis=axis))
# Some numpy matrix norms are allergic to negative axes.
for axis in (-2, -1), (-1, -2), (1, -2):
assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
assert_allclose(spnorm(S, 'f', axis=axis),
npnorm(M, 'f', axis=axis))
assert_allclose(spnorm(S, 'fro', axis=axis),
npnorm(M, 'fro', axis=axis))
def test_sparse_vector_norms(self):
for sparse_type in self._sparse_types:
for M in self._test_matrices:
S = sparse_type(M)
for axis in (0, 1, -1, -2, (0, ), (1, ), (-1, ), (-2, )):
assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
for ord in None, 2, np.inf, -np.inf, 1, 0.5, 0.42:
assert_allclose(spnorm(S, ord, axis=axis),
npnorm(M, ord, axis=axis))
| bsd-3-clause | -6,420,942,241,783,860,000 | 42.071429 | 79 | 0.533628 | false |
dafei2015/hugula | Client/tools/site-packages/PIL/MicImagePlugin.py | 13 | 2228 | #
# The Python Imaging Library.
# $Id: MicImagePlugin.py 2134 2004-10-06 08:55:20Z fredrik $
#
# Microsoft Image Composer support for PIL
#
# Notes:
# uses TiffImagePlugin.py to read the actual image streams
#
# History:
# 97-01-20 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.1"
import string
import Image, TiffImagePlugin
from OleFileIO import *
#
# --------------------------------------------------------------------
def _accept(prefix):
return prefix[:8] == MAGIC
##
# Image plugin for Microsoft's Image Composer file format.
class MicImageFile(TiffImagePlugin.TiffImageFile):
format = "MIC"
format_description = "Microsoft Image Composer"
def _open(self):
# read the OLE directory and see if this is a likely
# to be a Microsoft Image Composer file
try:
self.ole = OleFileIO(self.fp)
except IOError:
raise SyntaxError, "not an MIC file; invalid OLE file"
# find ACI subfiles with Image members (maybe not the
# best way to identify MIC files, but what the... ;-)
self.images = []
for file in self.ole.listdir():
if file[1:] and file[0][-4:] == ".ACI" and file[1] == "Image":
self.images.append(file)
# if we didn't find any images, this is probably not
# an MIC file.
if not self.images:
raise SyntaxError, "not an MIC file; no image entries"
self.__fp = self.fp
self.frame = 0
if len(self.images) > 1:
self.category = Image.CONTAINER
self.seek(0)
def seek(self, frame):
try:
filename = self.images[frame]
except IndexError:
raise EOFError, "no such frame"
self.fp = self.ole.openstream(filename)
TiffImagePlugin.TiffImageFile._open(self)
self.frame = frame
def tell(self):
return self.frame
#
# --------------------------------------------------------------------
Image.register_open("MIC", MicImageFile, _accept)
Image.register_extension("MIC", ".mic")
| mit | 8,036,012,872,591,752,000 | 22.208333 | 74 | 0.574057 | false |
pascalguru/florincoin | qa/rpc-tests/getblocktemplate_longpoll.py | 163 | 3683 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = AuthServiceProxy(node.url, timeout=600)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(BitcoinTestFramework):
'''
Test longpolling with getblocktemplate.
'''
def run_test(self):
print "Warning: this test will take about 70 seconds in the best case. Be patient."
self.nodes[0].setgenerate(True, 10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].setgenerate(True, 1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].setgenerate(True, 1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), Decimal("0.0"), Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| mit | 1,128,812,502,512,409,200 | 39.032609 | 113 | 0.652729 | false |
liavkoren/djangoDev | tests/serializers/models.py | 29 | 3090 | # -*- coding: utf-8 -*-
"""
42. Serialization
``django.core.serializers`` provides interfaces to converting Django
``QuerySet`` objects to and from "flat" data (i.e. strings).
"""
from __future__ import unicode_literals
from decimal import Decimal
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
author = models.ForeignKey(Author)
headline = models.CharField(max_length=50)
pub_date = models.DateTimeField()
categories = models.ManyToManyField(Category)
class Meta:
ordering = ('pub_date',)
def __str__(self):
return self.headline
@python_2_unicode_compatible
class AuthorProfile(models.Model):
author = models.OneToOneField(Author, primary_key=True)
date_of_birth = models.DateField()
def __str__(self):
return "Profile of %s" % self.author
@python_2_unicode_compatible
class Actor(models.Model):
name = models.CharField(max_length=20, primary_key=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Movie(models.Model):
actor = models.ForeignKey(Actor)
title = models.CharField(max_length=50)
price = models.DecimalField(max_digits=6, decimal_places=2, default=Decimal('0.00'))
class Meta:
ordering = ('title',)
def __str__(self):
return self.title
class Score(models.Model):
score = models.FloatField()
@python_2_unicode_compatible
class Team(object):
def __init__(self, title):
self.title = title
def __str__(self):
raise NotImplementedError("Not so simple")
def to_string(self):
return "%s" % self.title
class TeamField(six.with_metaclass(models.SubfieldBase, models.CharField)):
def __init__(self):
super(TeamField, self).__init__(max_length=100)
def get_db_prep_save(self, value, connection):
return six.text_type(value.title)
def to_python(self, value):
if isinstance(value, Team):
return value
return Team(value)
def value_to_string(self, obj):
return self._get_val_from_obj(obj).to_string()
def deconstruct(self):
name, path, args, kwargs = super(TeamField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
@python_2_unicode_compatible
class Player(models.Model):
name = models.CharField(max_length=50)
rank = models.IntegerField()
team = TeamField()
def __str__(self):
return '%s (%d) playing for %s' % (self.name, self.rank, self.team.to_string())
| bsd-3-clause | 1,495,655,246,450,982,700 | 22.587786 | 88 | 0.652427 | false |
klebercode/lionsclub | eventi/subscriptions/tests/test_views_subscribe.py | 1 | 3025 | # coding: utf-8
from django.test import TestCase
from django.core.urlresolvers import reverse as r
from eventex.subscriptions.forms import SubscriptionForm
from eventex.subscriptions.models import Subscription
class SubscribeTest(TestCase):
def setUp(self):
self.resp = self.client.get(r('subscriptions:subscribe'))
def test_get(self):
"""
Get /inscricao/ must return status code 200.
"""
self.assertEqual(200, self.resp.status_code)
def test_template(self):
"""
Response should be rendered by template.
"""
self.assertTemplateUsed(self.resp, 'subscriptions/subscription_form.html')
def test_html(self):
"""
Html must contain input controls.
"""
self.assertContains(self.resp, '<form')
self.assertContains(self.resp, '<input', 7)
self.assertContains(self.resp, 'type="text"', 4)
self.assertContains(self.resp, 'type="email"')
self.assertContains(self.resp, 'type="submit"')
def test_csrf(self):
"""
Html must contain csrf token.
"""
self.assertContains(self.resp, 'csrfmiddlewaretoken')
def test_has_form(self):
"""
Context must have the subscription form.
"""
form = self.resp.context['form']
self.assertIsInstance(form, SubscriptionForm)
class SubscribePostTest(TestCase):
def setUp(self):
data = dict(name='Henrique Bastos', cpf='12345678901',
email='[email protected]', phone='21-96186180')
self.resp = self.client.post(r('subscriptions:subscribe'), data)
def test_post(self):
"""
Valid POST should redirect to /inscricao/1/.
"""
self.assertEqual(302, self.resp.status_code)
def test_save(self):
"""
Valid POST must be saved.
"""
self.assertTrue(Subscription.objects.exists())
class SubscribeInvalidPost(TestCase):
def setUp(self):
data = dict(name='Henrique Bastos', cpf='000000000012',
email='[email protected]', phone='21-96186180')
self.resp = self.client.post(r('subscriptions:subscribe'), data)
def test_post(self):
"""
Invalid POST should not redirect.
"""
self.assertEqual(200, self.resp.status_code)
def test_form_errors(self):
"""
Form must contain errors.
"""
self.assertTrue(self.resp.context['form'].errors)
def test_dont_save(self):
"""
Do not save data.
"""
self.assertFalse(Subscription.objects.exists())
class TemplateRegressionTest(TestCase):
def test_template_has_non_field_errors(self):
"""
Check if non_field_errors are shown in template.
"""
invalid_data = dict(name='Henrique Bastos', cpf='12345678901')
response = self.client.post(r('subscriptions:subscribe'), invalid_data)
self.assertContains(response, '<ul class="errorlist">') | mit | 2,913,360,455,850,260,000 | 29.26 | 82 | 0.614545 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.