repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
sc3/cookcountyjail | scraper/inmates_scraper.py | 1 | 1938 | from monitor import MONITOR_VERBOSE_DMSG_LEVEL
from concurrent_base import ConcurrentBase
WORKERS_TO_START = 25
CCJ_INMATE_DETAILS_URL = 'http://www2.cookcountysheriff.org/search2/details.asp?jailnumber='
class InmatesScraper(ConcurrentBase):
def __init__(self, http, inmates, inmate_details_class, monitor, workers_to_start=WORKERS_TO_START):
super(InmatesScraper, self).__init__(monitor, workers_to_start)
self._http = http
self._inmates = inmates
self._inmate_details_class = inmate_details_class
def create_if_exists(self, arg):
self._put(self._create_if_exists, arg)
def _create_if_exists(self, inmate_id):
self._debug('check for inmate - %s' % inmate_id, MONITOR_VERBOSE_DMSG_LEVEL)
worked, inmate_details_in_html = self._http.get(CCJ_INMATE_DETAILS_URL + inmate_id)
if worked:
self._inmates.add(inmate_id, self._inmate_details_class(inmate_details_in_html))
def resurrect_if_found(self, inmate_id):
self._put(self._resurrect_if_found, inmate_id)
def _resurrect_if_found(self, inmate_id):
self._debug('check if really discharged inmate %s' % inmate_id, MONITOR_VERBOSE_DMSG_LEVEL)
worked, inmate_details_in_html = self._http.get(CCJ_INMATE_DETAILS_URL + inmate_id)
if worked:
self._debug('resurrected discharged inmate %s' % inmate_id, MONITOR_VERBOSE_DMSG_LEVEL)
self._inmates.update(inmate_id, self._inmate_details_class(inmate_details_in_html))
def update_inmate_status(self, inmate_id):
self._put(self._update_inmate_status, inmate_id)
def _update_inmate_status(self, inmate_id):
worked, inmate_details_in_html = self._http.get(CCJ_INMATE_DETAILS_URL + inmate_id)
if worked:
self._inmates.update(inmate_id, self._inmate_details_class(inmate_details_in_html))
else:
self._inmates.discharge(inmate_id)
| gpl-3.0 | -4,363,237,560,352,292,400 | 42.066667 | 104 | 0.674923 | false |
jcchoiling/learningPython | s13/Day12/rabbit_rpc_server.py | 1 | 1296 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Janice Cheng
"""
把诘个服务器端改成在一个类中
"""
import pika
import subprocess
credentials = pika.PlainCredentials('janice', 'janice123')
parameters = pika.ConnectionParameters('172.16.201.134', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='rpc_queue')
# 把这里改成 subprocess 来执行这条命令
def cmd_func(cmd):
cmd_data = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
return cmd_data.stdout.read()
def on_request(ch, method, props, body):
n = body.decode()
print(" [.] Calling (%s)" % n)
response = cmd_func(n)
response = str(response,encoding='utf-8')
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=response)
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='rpc_queue') #在对列里获取数据
print(" [x] Awaiting RPC requests")
channel.start_consuming() | gpl-3.0 | 7,917,972,075,697,951,000 | 25.717391 | 95 | 0.648208 | false |
Roel/Gyrid | gyrid/core.py | 1 | 1416 | #-*- coding: utf-8 -*-
#
# This file belongs to Gyrid.
#
# Gyrid is a mobile device scanner.
# Copyright (C) 2013 Roel Huybrechts
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
def threaded(f):
"""
Wrapper to start a function within a new thread.
@param f The function to run inside the thread.
"""
def wrapper(*args):
t = threading.Thread(target=f, args=args)
t.start()
return wrapper
class ScanProtocol(object):
def __init__(self, mgr):
self.mgr = mgr
def hardware_added(self):
pass
def hardware_removed(self):
pass
class Scanner(object):
def __init__(self, mgr, protocol):
self.mgr = mgr
self.protocol = protocol
def start_scanning(self):
pass
def stop_scanning(self):
pass
| gpl-3.0 | -5,951,183,939,177,816,000 | 25.716981 | 71 | 0.673729 | false |
Hossein-Noroozpour/PyHGEE | core/HGEMesh.py | 1 | 1774 | # coding=utf-8
"""
Module for handling OpenGL buffers.
"""
__author__ = "Hossein Noroozpour"
from OpenGL import GL
import ctypes
class Mesh():
"""
A class that hold mesh information about an actor
"""
def __init__(self, elements, indices):
temp_list = [0]
# noinspection PyCallingNonCallable
self.vbo = (ctypes.c_uint32 * 1)(*temp_list)
# noinspection PyCallingNonCallable
self.ibo = (ctypes.c_uint32 * 1)(*temp_list)
GL.glGenBuffers(1, self.vbo)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
# noinspection PyCallingNonCallable
GL.glBufferData(
GL.GL_ARRAY_BUFFER,
len(elements) * 4,
(ctypes.c_float * len(elements))(*elements),
GL.GL_STATIC_DRAW
)
GL.glGenBuffers(1, self.ibo)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.ibo)
# noinspection PyCallingNonCallable
GL.glBufferData(
GL.GL_ELEMENT_ARRAY_BUFFER,
len(indices) * 4,
(ctypes.c_uint32 * len(indices))(*indices),
GL.GL_STATIC_DRAW
)
self.indices_number = ctypes.c_uint32(len(indices))
def __del__(self):
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, ctypes.c_uint32(0))
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, ctypes.c_uint32(0))
GL.glDeleteBuffers(1, self.vbo)
GL.glDeleteBuffers(1, self.ibo)
def bind(self):
"""
Bind itself.
"""
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.ibo)
def draw(self):
"""
Draw.
"""
GL.glDrawElements(GL.GL_TRIANGLES, self.indices_number, GL.GL_UNSIGNED_INT, ctypes.c_uint32(0)) | mit | -570,652,836,376,594,700 | 30.140351 | 103 | 0.590192 | false |
MatthewWilkes/reportlab | tests/test_platypus_pleaseturnover.py | 1 | 9102 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
"""Tests pleaseTurnOver, pageBreakBefore, frameBreakBefore, keepWithNext...
"""
__version__='''$Id$'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import sys
import unittest
from reportlab.platypus.flowables import Flowable, PTOContainer, KeepInFrame
from reportlab.lib.units import cm
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.colors import toColor, black
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.tables import Table
from reportlab.platypus.frames import Frame
from reportlab.lib.randomtext import randomText
from reportlab.platypus.doctemplate import PageTemplate, BaseDocTemplate, FrameBreak
def myMainPageFrame(canvas, doc):
"The page frame used for all PDF documents."
canvas.saveState()
canvas.setFont('Times-Roman', 12)
pageNumber = canvas.getPageNumber()
canvas.drawString(10*cm, cm, str(pageNumber))
canvas.restoreState()
def _showDoc(fn,story):
pageTemplate = PageTemplate('normal', [Frame(72, 440, 170, 284, id='F1'),
Frame(326, 440, 170, 284, id='F2'),
Frame(72, 72, 170, 284, id='F3'),
Frame(326, 72, 170, 284, id='F4'),
], myMainPageFrame)
doc = BaseDocTemplate(outputfile(fn),
pageTemplates = pageTemplate,
showBoundary = 1,
)
doc.multiBuild(story)
text2 ='''We have already seen that the natural general principle that will
subsume this case cannot be arbitrary in the requirement that branching
is not tolerated within the dominance scope of a complex symbol.
Notice, incidentally, that the speaker-hearer's linguistic intuition is
to be regarded as the strong generative capacity of the theory. A
consequence of the approach just outlined is that the descriptive power
of the base component does not affect the structure of the levels of
acceptability from fairly high (e.g. (99a)) to virtual gibberish (e.g.
(98d)). By combining adjunctions and certain deformations, a
descriptively adequate grammar cannot be arbitrary in the strong
generative capacity of the theory.'''
text1='''
On our assumptions, a descriptively adequate grammar delimits the strong
generative capacity of the theory. For one thing, the fundamental error
of regarding functional notions as categorial is to be regarded as a
corpus of utterance tokens upon which conformity has been defined by the
paired utterance test. A majority of informed linguistic specialists
agree that the appearance of parasitic gaps in domains relatively
inaccessible to ordinary extraction is necessary to impose an
interpretation on the requirement that branching is not tolerated within
the dominance scope of a complex symbol. It may be, then, that the
speaker-hearer's linguistic intuition appears to correlate rather
closely with the ultimate standard that determines the accuracy of any
proposed grammar. Analogously, the notion of level of grammaticalness
may remedy and, at the same time, eliminate a general convention
regarding the forms of the grammar.'''
text0 = '''To characterize a linguistic level L,
this selectionally introduced contextual
feature delimits the requirement that
branching is not tolerated within the
dominance scope of a complex
symbol. Notice, incidentally, that the
notion of level of grammaticalness
does not affect the structure of the
levels of acceptability from fairly high
(e.g. (99a)) to virtual gibberish (e.g.
(98d)). Suppose, for instance, that a
subset of English sentences interesting
on quite independent grounds appears
to correlate rather closely with an
important distinction in language use.
Presumably, this analysis of a
formative as a pair of sets of features is
not quite equivalent to the system of
base rules exclusive of the lexicon. We
have already seen that the appearance
of parasitic gaps in domains relatively
inaccessible to ordinary extraction
does not readily tolerate the strong
generative capacity of the theory.'''
def _ptoTestCase(self):
"""PTO stands for Please Turn Over and is a means for
specifying content to be inserted when stuff goes over a page.
This makes one long multi-page paragraph."""
# Build story.
story = []
def fbreak(story=story):
story.append(FrameBreak())
styleSheet = getSampleStyleSheet()
H1 = styleSheet['Heading1']
H1.pageBreakBefore = 0
H1.keepWithNext = 0
bt = styleSheet['BodyText']
pto = ParagraphStyle('pto',parent=bt)
pto.alignment = TA_RIGHT
pto.fontSize -= 1
def ColorParagraph(c,text,style):
return Paragraph('<para color="%s">%s</para>' % (c,text),style)
def ptoblob(blurb,content,trailer=None,header=None, story=story, H1=H1):
if type(content) not in (type([]),type(())): content = [content]
story.append(PTOContainer([Paragraph(blurb,H1)]+list(content),trailer,header))
t0 = [ColorParagraph('blue','Please turn over', pto )]
h0 = [ColorParagraph('blue','continued from previous page', pto )]
t1 = [ColorParagraph('red','Please turn over(inner)', pto )]
h1 = [ColorParagraph('red','continued from previous page(inner)', pto )]
ptoblob('First Try at a PTO',[Paragraph(text0,bt)],t0,h0)
fbreak()
c1 = Table([('alignment', 'align\012alignment'),
('bulletColor', 'bulletcolor\012bcolor'),
('bulletFontName', 'bfont\012bulletfontname'),
('bulletFontSize', 'bfontsize\012bulletfontsize'),
('bulletIndent', 'bindent\012bulletindent'),
('firstLineIndent', 'findent\012firstlineindent'),
('fontName', 'face\012fontname\012font'),
('fontSize', 'size\012fontsize'),
('leading', 'leading'),
('leftIndent', 'leftindent\012lindent'),
('rightIndent', 'rightindent\012rindent'),
('spaceAfter', 'spaceafter\012spacea'),
('spaceBefore', 'spacebefore\012spaceb'),
('textColor', 'fg\012textcolor\012color')],
style = [
('VALIGN',(0,0),(-1,-1),'TOP'),
('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
],
)
ptoblob('PTO with a table inside',c1,t0,h0)
fbreak()
ptoblob('A long PTO',[Paragraph(text0+' '+text1,bt)],t0,h0)
fbreak()
ptoblob('2 PTO (inner split)',[ColorParagraph('pink',text0,bt),PTOContainer([ColorParagraph(black,'Inner Starts',H1),ColorParagraph('yellow',text2,bt),ColorParagraph('black','Inner Ends',H1)],t1,h1),ColorParagraph('magenta',text1,bt)],t0,h0)
_showDoc('test_platypus_pto.pdf',story)
def _KeepInFrameTestCase(self,mode,offset=0):
story = []
def fbreak(story=story):
story.append(FrameBreak())
styleSheet = getSampleStyleSheet()
H1 = styleSheet['Heading1']
H1.pageBreakBefore = 0
H1.keepWithNext = 0
bt = styleSheet['BodyText']
def subStory(texts):
style = [
('VALIGN',(0,0),(-1,-1),'TOP'),
('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
]
return ([Paragraph(t,bt) for t in texts]
+[Table([('alignment', a.lower())],style = style,hAlign=a)
for a in ('LEFT','RIGHT','CENTER')])
def allModesKIF(just,ifb=True,width=170):
if ifb: fbreak()
story.append(KeepInFrame(width-offset,284-offset,subStory(texts=(text0,)),mode=mode,hAlign=just))
fbreak()
story.append(KeepInFrame(width-offset,284-offset,subStory(texts=(text0,text1)),mode=mode,hAlign=just))
fbreak()
story.append(KeepInFrame(width-offset,284-offset,subStory(texts=(text0,text1,text2)),mode=mode,hAlign=just))
allModesKIF('LEFT',False)
allModesKIF('LEFT',width=100)
allModesKIF('CENTRE',width=100)
allModesKIF('RIGHT',width=100)
_showDoc('test_platypus_KeepInFrame%s.pdf'%mode,story)
class TestCases(unittest.TestCase):
"Test multi-page splitting of paragraphs (eyeball-test)."
def test0(self):
_ptoTestCase(self)
def test1(self):
_KeepInFrameTestCase(self,mode="shrink")
def test2(self):
_KeepInFrameTestCase(self,mode="overflow")
def test3(self):
_KeepInFrameTestCase(self,mode="truncate")
def test4(self):
from reportlab.platypus.doctemplate import LayoutError
self.assertRaises(LayoutError, _KeepInFrameTestCase,*(self,"error"))
def test5(self):
_KeepInFrameTestCase(self,"shrink",0)
def makeSuite():
return makeSuiteForClasses(TestCases)
#noruntests
if __name__ == "__main__": #NORUNTESTS
if 'debug' in sys.argv:
_KeepInFrameTestCase(None)
else:
unittest.TextTestRunner().run(makeSuite())
printLocation()
| bsd-3-clause | -1,114,615,133,043,673,600 | 42.342857 | 245 | 0.683366 | false |
ajbouh/tfi | src/tfi/driver/tf/doc.py | 1 | 12857 | import tfi.json
import tensorflow as tf
import os.path
import tfi.data
import tfi.doc
from google.protobuf.json_format import ParseDict
from tfi.parse.docstring import GoogleDocstring
def _detect_method_documentation(*, bibliographer, model, method_name, method, signature_def):
# NOTE(adamb) Since we don't want to be parsing rst here, we'll just rewrite
# it to include detected citations. Expect that this rst will be parsed
# for real when rendering HTML.
docstr = GoogleDocstring(obj=method).result()
docstr_sections = docstr['sections']
text_sections = [v for k, v in docstr_sections if k == 'text']
overview = "\n".join([l for t in text_sections for l in t])
docstr['args'] = _enrich_docs_with_tensor_info(docstr['args'], signature_def.inputs)
docstr['returns'] = _enrich_docs_with_tensor_info(docstr['returns'], signature_def.outputs)
return tfi.doc.MethodDocumentation(
name=method_name,
overview=bibliographer.rewrite(overview),
inputs=docstr['args'],
outputs=docstr['returns'],
examples=[
tfi.doc.MethodDataDocumentation.generate(
method=getattr(model, method_name),
inputs={
input_name: eval("\n".join(input_val_lines), {}, {'m': model, 'tfi': tfi})
for input_name, _, input_val_lines in docstr['example args']
},
),
],
)
def detect_model_documentation(model):
source = tfi.doc.ModelSource.detect(model)
bibliographer = tfi.doc.Bibliographer()
def maybeattr(o, attr, default=None):
return getattr(o, attr) if o and hasattr(o, attr) else default
# NOTE(adamb) Since we don't want to be parsing rst here, we'll just rewrite
# it to include detected citations. Expect that this rst will be parsed
# for real when rendering HTML.
model_docstr = GoogleDocstring(obj=model).result()
model_docstr_sections = model_docstr['sections']
text_sections = [v for k, v in model_docstr_sections if k == 'text']
overview = "\n".join([l for t in text_sections for l in t])
return tfi.doc.ModelDocumentation(
name=maybeattr(model, '__name__', type(model).__name__),
hyperparameters=maybeattr(model, '__tfi_hyperparameters__', []),
overview=bibliographer.rewrite(overview),
implementation_notes=[],
authors=[
*[
{
"name": author['name'],
"url": author['url'],
"role_noun": "Commits",
"role_url": author['commits_url'],
}
for author in maybeattr(source, 'authors', [])
],
],
source=source,
facets_overview_proto=maybeattr(model, '__tfi_facets_overview_proto__'),
methods=[
_detect_method_documentation(
model=model,
bibliographer=bibliographer,
method_name=method_name,
method=getattr(model, method_name),
signature_def=signature_def,
)
for method_name, signature_def in maybeattr(model, '__tfi_signature_defs__').items()
],
references=bibliographer.references(),
)
def _tensor_info_str(tensor):
if tensor.shape.ndims is None:
return '%s ?' % tensor.dtype.name
return '%s <%s>' % (
tensor.dtype.name,
', '.join(['?' if n is None else str(n) for n in tensor.shape.as_list()]),
)
def _enrich_docs_with_tensor_info(doc_fields, tensor_dict):
existing = {k: v for k, _, v in doc_fields}
return [
(name, _tensor_info_str(tensor), existing.get(name, ''))
for name, tensor in tensor_dict.items()
]
class MethodDocumentationLayout(object):
def __init__(self, base_path, assets_extra_path):
self.assets_extra_path = assets_extra_path
self.metadata_path = os.path.join(base_path, 'metadata.json')
self._base_path = base_path
def file(self, subpath):
return os.path.join(self._base_path, subpath)
class ModelDocumentationLayout(object):
def __init__(self, model_dir):
self.basename = os.path.basename(model_dir)
self.assets_extra_path = os.path.join(model_dir, 'assets.extra')
self.doc_path = os.path.join(self.assets_extra_path, 'doc')
self.metadata_path = os.path.join(self.doc_path, 'metadata.json')
self.methods_path = os.path.join(self.doc_path, 'methods')
def method(self, method_name):
return MethodDocumentationLayout(
os.path.join(self.methods_path, method_name),
self.assets_extra_path,
)
def _read_json_else(path, default):
if not os.path.exists(path):
return default
with open(path) as f:
return tfi.json.load(f)
def _write_json(path, obj):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w') as f:
tfi.json.dump(obj, f)
class MethodDataDocumentationCodec(object):
def __init__(self, method_layout):
self._layout = method_layout
def write(self, method_example):
_write_json(
self._layout.file('inputs.json'),
{
name: value
for name, value in method_example.inputs().items()
}
)
if method_example.outputs() is not None:
_write_json(
self._layout.file('outputs.json'),
{
name: value
for name, value in method_example.outputs().items()
}
)
def read(self, signature_def):
return tfi.doc.MethodDataDocumentation(
inputs=self._detect(
lambda: self._read_json_tf_example_from(
signature_def.inputs,
'inputs.pb.json',
),
lambda: self._read_json_example_from(
signature_def.inputs,
'inputs.json',
),
),
outputs=self._detect(
lambda: self._read_json_tf_example_from(
signature_def.outputs,
'outputs.pb.json',
),
lambda: self._read_json_example_from(
signature_def.outputs,
'outputs.json',
),
),
# 'example result': _detect(
# lambda: _read_json_tf_example_from(
# signature_def.outputs,
# 'outputs.pb.json',
# ),
# lambda: _read_json_example_from(
# signature_def.outputs,
# 'outputs.json',
# ),
# ),
)
def _detect(self, *fns):
for fn in fns:
result = fn()
if result is not None:
return result
def _feature_for_tensor_info(self, tensor_info):
tensor_shape = tensor_info.tensor_shape.dim[1:]
dtype = tf.DType(tensor_info.dtype)
if tensor_shape[-1].size != -1:
return tf.FixedLenFeature(dtype=dtype, shape=[dim.size for dim in tensor_shape])
return tf.VarLenFeature(dtype=dtype)
def _read_json_tf_example_from(self, tensor_infos, subpath):
path = self._layout.file(subpath)
if not os.path.exists(path):
return None
with open(path) as f:
example_dict = tfi.json.load(f)
with tf.Session(graph=tf.Graph()) as session:
example_features = {
name: self._feature_for_tensor_info(tensor_info)
for name, tensor_info in tensor_infos.items()
}
return session.run(
tf.parse_single_example(
ParseDict(example_dict, tf.train.Example()).SerializeToString(),
features=example_features))
def _read_json_example_from(self, tensor_infos, subpath):
path = self._layout.file(subpath)
if not os.path.exists(path):
return None
with open(path) as f:
return tfi.data.json(
f.read(),
assets_extra_root=self._layout.assets_extra_path)
class MethodDocumentationCodec(object):
def __init__(self, method_name, method_layout):
self._name = method_name
self._layout = method_layout
def write(self, method_doc):
metadata = {
'documentation': {
'inputs': {
name: doc
for name, tensor_info, doc in method_doc.inputs()
},
'outputs': {
name: doc
for name, tensor_info, doc in method_doc.outputs()
},
},
}
MethodDataDocumentationCodec(self._layout).write(method_doc.examples()[0]),
_write_json(self._layout.metadata_path, metadata)
def read(self, signature_def):
metadata = _read_json_else(self._layout.metadata_path, {})
doc = metadata.get('documentation', {})
doc_inputs = doc.get('inputs', {})
doc_outputs = doc.get('outputs', {})
return tfi.doc.MethodDocumentation(
name=self._name,
overview=metadata.get('overview', None),
inputs=[
(name, self._tensor_info_str(ti), doc_inputs.get(name, ''))
for name, ti in signature_def.inputs.items()
],
outputs=[
(name, self._tensor_info_str(ti), doc_outputs.get(name, ''))
for name, ti in signature_def.outputs.items()
],
examples=[
MethodDataDocumentationCodec(self._layout).read(signature_def),
],
)
def _tensor_info_str(self, tensor_info):
if tensor_info.tensor_shape.unknown_rank:
return '%s ?' % tf.as_dtype(tensor_info.dtype).name
return '%s <%s>' % (
tf.as_dtype(tensor_info.dtype).name,
', '.join([
'?' if dim.size == -1 else str(dim.size)
for dim in tensor_info.tensor_shape.dim
]),
)
class ModelDocumentationCodec(object):
def __init__(self, path):
self._layout = ModelDocumentationLayout(path)
def _method_codecs(self, method_names):
return [
(
method_name,
MethodDocumentationCodec(
method_name,
self._layout.method(method_name),
)
)
for method_name in method_names
]
def write(self, model_doc):
metadata = {
'name': model_doc.name(),
'overview': model_doc.overview(),
'hyperparameters': [
(name, str(val_type), val, docs)
for name, val_type, val, docs in model_doc.hyperparameters()
],
'authors': model_doc.authors(),
'references': model_doc.references(),
'implementation_notes': model_doc.implementation_notes(),
'source': model_doc.source(),
'facets_overview_proto': None, # model_doc.facets_overview_proto(),
}
methods = model_doc.methods()
for method_name, method_codec in self._method_codecs(methods.keys()):
method_codec.write(methods[method_name])
_write_json(self._layout.metadata_path, metadata)
def read(self, signature_defs):
metadata = _read_json_else(self._layout.metadata_path, {})
return tfi.doc.ModelDocumentation(
# TODO(adamb) Should be transformed to the below structure, with val_type_str -> val_type
# (name, val_type, val, docs)
hyperparameters=metadata.get('hyperparameters', []),
name=metadata.get('name', self._layout.basename),
overview=metadata.get('overview', None),
methods=[
method_codec.read(signature_defs[method_name])
for method_name, method_codec in self._method_codecs(signature_defs.keys())
],
authors=metadata.get('authors', []),
references=metadata.get('references', {}),
implementation_notes=metadata.get('implementation_notes', []),
source=metadata.get('source', []),
facets_overview_proto=None,
)
def read(path, signature_defs):
return ModelDocumentationCodec(path).read(signature_defs)
def write(path, model_doc):
return ModelDocumentationCodec(path).write(model_doc) | mit | 4,932,963,218,364,086,000 | 35.842407 | 101 | 0.542584 | false |
commaai/openpilot | selfdrive/manager/helpers.py | 1 | 1058 | import os
import sys
import fcntl
import errno
import signal
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
| mit | -6,387,018,691,051,045,000 | 26.842105 | 98 | 0.648393 | false |
ONSdigital/ras-frontstage | frontstage/error_handlers.py | 1 | 2963 | import logging
from flask import render_template, request, url_for
from flask_wtf.csrf import CSRFError
from requests.exceptions import ConnectionError
from structlog import wrap_logger
from werkzeug.utils import redirect
from frontstage import app
from frontstage.common.session import Session
from frontstage.exceptions.exceptions import (
ApiError,
IncorrectAccountAccessError,
InvalidEqPayLoad,
JWTValidationError,
)
logger = wrap_logger(logging.getLogger(__name__))
@app.errorhandler(400)
def client_error(error):
logger.info("Client error", url=request.url, status_code=error.code)
return render_template("errors/400-error.html"), 400
@app.errorhandler(404)
def not_found_error(error):
logger.info("Not found error", url=request.url, status_code=error.code)
return render_template("errors/404-error.html"), 404
@app.errorhandler(CSRFError)
def handle_csrf_error(error):
logger.warning("CSRF token has expired", error_message=error.description, status_code=error.code)
session_key = request.cookies.get("authorization")
session_handler = Session.from_session_key(session_key)
encoded_jwt = session_handler.get_encoded_jwt()
if not encoded_jwt:
return render_template("errors/400-error.html"), 400
else:
return redirect(url_for("sign_in_bp.logout", csrf_error=True, next=request.url))
@app.errorhandler(ApiError)
def api_error(error):
logger.error(
error.message or "Api failed to retrieve required data",
url=request.url,
status_code=500,
api_url=error.url,
api_status_code=error.status_code,
**error.kwargs
)
return render_template("errors/500-error.html"), 500
@app.errorhandler(ConnectionError)
def connection_error(error):
logger.error("Failed to connect to external service", url=request.url, status_code=500, api_url=error.request.url)
return render_template("errors/500-error.html"), 500
@app.errorhandler(JWTValidationError)
def jwt_validation_error(error):
logger.error("JWT validation error", url=request.url, status_code=403)
return render_template("errors/403-error.html"), 403
@app.errorhandler(Exception)
def server_error(error):
logger.error("Generic exception generated", exc_info=error, url=request.url, status_code=500)
return render_template("errors/500-error.html"), getattr(error, "code", 500)
@app.errorhandler(InvalidEqPayLoad)
def eq_error(error):
logger.error("Failed to generate EQ URL", error=error.message, url=request.url, status_code=500)
return render_template("errors/500-error.html"), 500
@app.errorhandler(IncorrectAccountAccessError)
def secure_message_forbidden_error(error):
logger.info(
"Attempt to access secure message without correct session permission",
url=request.url,
message=error.message,
thread_id=error.thread,
)
return render_template("errors/403-incorrect-account-error.html")
| mit | 3,384,516,966,222,632,400 | 31.56044 | 118 | 0.733378 | false |
manmedia/PythonToggleCharacterCases | switchLetterCase.py | 1 | 4097 | #
# A module capable of changing alphabet letter cases.
#
# It uses very generic Python functionality to ensure
# backward compatibility.
#
#
# The programme processes a set of characters by default
# If no character is entered for processing, the programme
# simply exists. This can be turned off by setting 'a' to 1
# (for all vowels) or 2 (for all consonants).
#
#
#
import os;
import sys;
import re;
import string;
from re import sub;
#
#! Get parsed arguments
#
def get_parsed_args():
# Pre-allocate
parser = "";
args = "";
if sys.version_info < (2,7):
from optparse import OptionParser
parser = OptionParser();
parser.add_option("-i", "--input_path", type=str, help="Input file path with extension");
parser.add_option("-o", "--output_path", type=str, help="Output file path with extension");
parser.add_option("-a", "--all_chars", type=int, help="Switch a type of characters (all vowels or cons.), disable=0, vowel=1, cons=2", default=0);
parser.add_option("-c", "--c", type=str, help="Characters to process (comma-separated list, no whitespace)", default="");
else:
from argparse import ArgumentParser
parser = ArgumentParser();
parser.add_argument("-i", "--input_path", type=str, help="Input file path with extension");
parser.add_argument("-o", "--output_path", type=str, help="Output file path with extension");
parser.add_argument("-a", "--all_chars", type=int, help="Switch a type of characters (all vowels or cons.), disable=0, vowel=1, cons=2", default=0);
parser.add_argument("-c", "--c", type=str, help="Characters to process (comma-separated list, no whitespace)", default="");
args = parser.parse_args();
args = vars(args);
##print(option)
##print(args)
##print(type(option))
##print(option.c)
##print(option.all_chars)
##print(option.input_path)
##print(option.output_path)
# Safety assertions
assert (args['all_chars'] >= 0 and args['all_chars'] <= 2), \
"Invalid value! programme exiting!\n type python switchLetterCase.py -h for information on arguments"
# If nothing to process, programme will exit
if (args['all_chars'] == 0) and \
((args['c'] == "") or \
(args['c'] == " ") or \
args['all_chars'] is None or \
all([x is ',' for x in args['c']])):
print(".....Nothing to process, programme exiting.\n\n");
sys.exit(0);
return args;
#
#! Main processor function
#
def process_files(args):
try:
# Get handlers
f1 = open(args['input_path'], 'r')
f2 = open(args['output_path'], 'w');
# Initial setup
line_to_write = ""
if (args['all_chars'] == 0): # process characters in the list
gg = "".join(args['c'])
for line in f1:
g = [y.upper() if y in gg else y.lower() if y.upper() in gg else y for y in line];
line_to_write = "".join(g);
f2.write(line_to_write);
elif (args['all_chars'] == 1): # process vowels only
vowels = sub('[^aeiou]+','',string.ascii_lowercase)
for line in f1:
g = [y.upper() if y in vowels else y.lower() if y.upper() in vowels else y for y in line];
line_to_write = "".join(g);
f2.write(line_to_write);
elif (args['all_chars'] == 0): # process consonants in the list
consonants = sub('[aeiou]+','',string.ascii_lowercase)
for line in f1:
g = [y.upper() if y in gg else y.lower() if y.upper() in gg else y for y in line];
line_to_write = "".join(g);
f2.write(line_to_write);
# Print some INFO
print("All characters toggled! Terminating programme......\n\n");
f1.close();
f2.close();
except (Exception, BaseException, IOError, ValueError, WindowsError) as e:
print(e);
finally:
del f1, f2
| apache-2.0 | -4,100,419,698,377,441,000 | 31.259843 | 156 | 0.569685 | false |
rajul/tvb-framework | tvb/tests/framework/adapters/visualizers/sensorsviewer_test.py | 1 | 6778 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
.. moduleauthor:: Lia Domide <[email protected]>
"""
import os
import unittest
import tvb_data.obj
import tvb_data.sensors
from tvb.adapters.uploaders.sensors_importer import Sensors_Importer
from tvb.adapters.visualizers.sensors import SensorsViewer
from tvb.core.entities.file.files_helper import FilesHelper
from tvb.datatypes.sensors import SensorsEEG, SensorsMEG, SensorsInternal
from tvb.datatypes.surfaces import EEGCap, EEG_CAP, FACE
from tvb.tests.framework.core.test_factory import TestFactory
from tvb.tests.framework.datatypes.datatypes_factory import DatatypesFactory
from tvb.tests.framework.core.base_testcase import TransactionalTestCase
class SensorViewersTest(TransactionalTestCase):
"""
Unit-tests for Sensors viewers.
"""
EXPECTED_KEYS_INTERNAL = {'urlMeasurePoints': None, 'urlMeasurePointsLabels': None, 'noOfMeasurePoints': 103,
'minMeasure': 0, 'maxMeasure': 103, 'urlMeasure': None, 'shelfObject': None}
EXPECTED_KEYS_EEG = EXPECTED_KEYS_INTERNAL.copy()
EXPECTED_KEYS_EEG.update({'urlVertices': None, 'urlTriangles': None, 'urlLines': None, 'urlNormals': None,
'noOfMeasurePoints': 62, 'maxMeasure': 62})
EXPECTED_KEYS_MEG = EXPECTED_KEYS_EEG.copy()
EXPECTED_KEYS_MEG.update({'noOfMeasurePoints': 151, 'maxMeasure': 151})
def setUp(self):
"""
Sets up the environment for running the tests;
creates a test user, a test project, a connectivity and a surface;
imports a CFF data-set
"""
self.datatypeFactory = DatatypesFactory()
self.test_project = self.datatypeFactory.get_project()
self.test_user = self.datatypeFactory.get_user()
## Import Shelf Face Object
face_path = os.path.join(os.path.dirname(tvb_data.obj.__file__), 'face_surface.obj')
TestFactory.import_surface_obj(self.test_user, self.test_project, face_path, FACE)
def tearDown(self):
"""
Clean-up tests data
"""
FilesHelper().remove_project_structure(self.test_project.name)
def test_launch_EEG(self):
"""
Check that all required keys are present in output from EegSensorViewer launch.
"""
## Import Sensors
zip_path = os.path.join(os.path.dirname(tvb_data.sensors.__file__), 'EEG_unit_vectors_BrainProducts_62.txt.bz2')
TestFactory.import_sensors(self.test_user, self.test_project, zip_path, Sensors_Importer.EEG_SENSORS)
sensors = TestFactory.get_entity(self.test_project, SensorsEEG())
## Import EEGCap
cap_path = os.path.join(os.path.dirname(tvb_data.obj.__file__), 'eeg_cap.obj')
TestFactory.import_surface_obj(self.test_user, self.test_project, cap_path, EEG_CAP)
eeg_cap_surface = TestFactory.get_entity(self.test_project, EEGCap())
viewer = SensorsViewer()
viewer.current_project_id = self.test_project.id
## Launch with EEG Cap selected
result = viewer.launch(sensors, eeg_cap_surface)
self.assert_compliant_dictionary(self.EXPECTED_KEYS_EEG, result)
for key in ['urlVertices', 'urlTriangles', 'urlLines', 'urlNormals']:
self.assertIsNotNone(result[key], "Value at key %s should not be None" % key)
## Launch without EEG Cap
result = viewer.launch(sensors)
self.assert_compliant_dictionary(self.EXPECTED_KEYS_EEG, result)
for key in ['urlVertices', 'urlTriangles', 'urlLines', 'urlNormals']:
self.assertTrue(not result[key] or result[key] == "[]",
"Value at key %s should be None or empty, but is %s" % (key, result[key]))
def test_launch_MEG(self):
"""
Check that all required keys are present in output from MEGSensorViewer launch.
"""
zip_path = os.path.join(os.path.dirname(tvb_data.sensors.__file__), 'meg_channels_reg13.txt.bz2')
TestFactory.import_sensors(self.test_user, self.test_project, zip_path, Sensors_Importer.MEG_SENSORS)
sensors = TestFactory.get_entity(self.test_project, SensorsMEG())
viewer = SensorsViewer()
viewer.current_project_id = self.test_project.id
result = viewer.launch(sensors)
self.assert_compliant_dictionary(self.EXPECTED_KEYS_MEG, result)
def test_launch_internal(self):
"""
Check that all required keys are present in output from InternalSensorViewer launch.
"""
zip_path = os.path.join(os.path.dirname(tvb_data.sensors.__file__), 'internal_39.txt.bz2')
TestFactory.import_sensors(self.test_user, self.test_project, zip_path, Sensors_Importer.INTERNAL_SENSORS)
sensors = TestFactory.get_entity(self.test_project, SensorsInternal())
viewer = SensorsViewer()
viewer.current_project_id = self.test_project.id
result = viewer.launch(sensors)
self.assert_compliant_dictionary(self.EXPECTED_KEYS_INTERNAL, result)
def suite():
"""
Gather all the tests in a test suite.
"""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(SensorViewersTest))
return test_suite
if __name__ == "__main__":
#So you can run tests from this package individually.
TEST_RUNNER = unittest.TextTestRunner()
TEST_SUITE = suite()
TEST_RUNNER.run(TEST_SUITE) | gpl-2.0 | 5,474,133,759,831,826,000 | 40.084848 | 120 | 0.689141 | false |
sio2project/oioioi | oioioi/programs/migrations/0001_initial.py | 1 | 12159 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
import oioioi.base.fields
import oioioi.contests.fields
import oioioi.filetracker.fields
import oioioi.problems.models
import oioioi.programs.models
class Migration(migrations.Migration):
dependencies = [
('contests', '0001_initial'),
('problems', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CompilationReport',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', oioioi.base.fields.EnumField(max_length=64, choices=[(b'?', 'Pending'), (b'OK', 'OK'), (b'ERR', 'Error'), (b'CE', 'Compilation failed'), (b'RE', 'Runtime error'), (b'WA', 'Wrong answer'), (b'TLE', 'Time limit exceeded'), (b'MLE', 'Memory limit exceeded'), (b'OLE', 'Output limit exceeded'), (b'SE', 'System error'), (b'RV', 'Rule violation'), (b'INI_OK', 'Initial tests: OK'), (b'INI_ERR', 'Initial tests: failed'), (b'TESTRUN_OK', 'No error'), (b'MSE', 'Outgoing message size limit exceeded'), (b'MCE', 'Outgoing message count limit exceeded'), (b'IGN', 'Ignored')])),
('compiler_output', models.TextField()),
('submission_report', models.ForeignKey(to='contests.SubmissionReport', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GroupReport',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('group', models.CharField(max_length=30)),
('score', oioioi.contests.fields.ScoreField(max_length=255, null=True, blank=True)),
('max_score', oioioi.contests.fields.ScoreField(max_length=255, null=True, blank=True)),
('status', oioioi.base.fields.EnumField(max_length=64, choices=[(b'?', 'Pending'), (b'OK', 'OK'), (b'ERR', 'Error'), (b'CE', 'Compilation failed'), (b'RE', 'Runtime error'), (b'WA', 'Wrong answer'), (b'TLE', 'Time limit exceeded'), (b'MLE', 'Memory limit exceeded'), (b'OLE', 'Output limit exceeded'), (b'SE', 'System error'), (b'RV', 'Rule violation'), (b'INI_OK', 'Initial tests: OK'), (b'INI_ERR', 'Initial tests: failed'), (b'TESTRUN_OK', 'No error'), (b'MSE', 'Outgoing message size limit exceeded'), (b'MCE', 'Outgoing message count limit exceeded'), (b'IGN', 'Ignored')])),
('submission_report', models.ForeignKey(to='contests.SubmissionReport', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LibraryProblemData',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('libname', models.CharField(help_text='Filename library should be given during compilation', max_length=30, verbose_name='libname')),
],
options={
'verbose_name': 'library problem data',
'verbose_name_plural': 'library problem data',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ModelSolution',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30, verbose_name='name')),
('source_file', oioioi.filetracker.fields.FileField(upload_to=oioioi.problems.models.make_problem_filename, verbose_name='source')),
('kind', oioioi.base.fields.EnumField(max_length=64, verbose_name='kind', choices=[(b'NORMAL', 'Model solution'), (b'SLOW', 'Slow solution'), (b'INCORRECT', 'Incorrect solution')])),
('order_key', models.IntegerField(default=0)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OutputChecker',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('exe_file', oioioi.filetracker.fields.FileField(upload_to=oioioi.problems.models.make_problem_filename, null=True, verbose_name='checker executable file', blank=True)),
],
options={
'verbose_name': 'output checker',
'verbose_name_plural': 'output checkers',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProgramSubmission',
fields=[
('submission_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='contests.Submission', on_delete=models.CASCADE)),
('source_file', oioioi.filetracker.fields.FileField(upload_to=oioioi.programs.models.make_submission_filename)),
('source_length', models.IntegerField(null=True, verbose_name='Source code length', blank=True)),
],
options={
},
bases=('contests.submission',),
),
migrations.CreateModel(
name='ModelProgramSubmission',
fields=[
('programsubmission_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='programs.ProgramSubmission', on_delete=models.CASCADE)),
],
options={
},
bases=('programs.programsubmission',),
),
migrations.CreateModel(
name='ReportActionsConfig',
fields=[
('problem', models.OneToOneField(related_name='report_actions_config', primary_key=True, serialize=False, to='problems.Problem', verbose_name='problem instance', on_delete=models.CASCADE)),
('can_user_generate_outs', models.BooleanField(default=False, verbose_name='Allow users to generate their outs on tests from visible reports.')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30, verbose_name='name')),
('input_file', oioioi.filetracker.fields.FileField(upload_to=oioioi.problems.models.make_problem_filename, null=True, verbose_name='input', blank=True)),
('output_file', oioioi.filetracker.fields.FileField(upload_to=oioioi.problems.models.make_problem_filename, null=True, verbose_name='output/hint', blank=True)),
('kind', oioioi.base.fields.EnumField(max_length=64, verbose_name='kind', choices=[(b'NORMAL', 'Normal test'), (b'EXAMPLE', 'Example test')])),
('group', models.CharField(max_length=30, verbose_name='group')),
('time_limit', models.IntegerField(null=True, verbose_name='time limit (ms)', validators=[oioioi.programs.models.validate_time_limit])),
('memory_limit', models.IntegerField(null=True, verbose_name='memory limit (KiB)', blank=True)),
('max_score', models.IntegerField(default=10, verbose_name='score')),
('order', models.IntegerField(default=0)),
('problem', models.ForeignKey(to='problems.Problem', on_delete=models.CASCADE)),
],
options={
'ordering': ['order'],
'verbose_name': 'test',
'verbose_name_plural': 'tests',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TestReport',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', oioioi.base.fields.EnumField(max_length=64, choices=[(b'?', 'Pending'), (b'OK', 'OK'), (b'ERR', 'Error'), (b'CE', 'Compilation failed'), (b'RE', 'Runtime error'), (b'WA', 'Wrong answer'), (b'TLE', 'Time limit exceeded'), (b'MLE', 'Memory limit exceeded'), (b'OLE', 'Output limit exceeded'), (b'SE', 'System error'), (b'RV', 'Rule violation'), (b'INI_OK', 'Initial tests: OK'), (b'INI_ERR', 'Initial tests: failed'), (b'TESTRUN_OK', 'No error'), (b'MSE', 'Outgoing message size limit exceeded'), (b'MCE', 'Outgoing message count limit exceeded'), (b'IGN', 'Ignored')])),
('comment', models.CharField(max_length=255, blank=True)),
('score', oioioi.contests.fields.ScoreField(max_length=255, null=True, blank=True)),
('time_used', models.IntegerField(blank=True)),
('output_file', oioioi.filetracker.fields.FileField(null=True, upload_to=oioioi.programs.models.make_output_filename, blank=True)),
('test_name', models.CharField(max_length=30)),
('test_group', models.CharField(max_length=30)),
('test_time_limit', models.IntegerField(null=True, blank=True)),
('test_max_score', models.IntegerField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserOutGenStatus',
fields=[
('testreport', models.OneToOneField(related_name='userout_status', primary_key=True, serialize=False, to='programs.TestReport', on_delete=models.CASCADE)),
('status', oioioi.base.fields.EnumField(default=b'?', max_length=64, choices=[(b'?', 'Pending'), (b'OK', 'OK'), (b'ERR', 'Error'), (b'CE', 'Compilation failed'), (b'RE', 'Runtime error'), (b'WA', 'Wrong answer'), (b'TLE', 'Time limit exceeded'), (b'MLE', 'Memory limit exceeded'), (b'OLE', 'Output limit exceeded'), (b'SE', 'System error'), (b'RV', 'Rule violation'), (b'INI_OK', 'Initial tests: OK'), (b'INI_ERR', 'Initial tests: failed'), (b'TESTRUN_OK', 'No error'), (b'MSE', 'Outgoing message size limit exceeded'), (b'MCE', 'Outgoing message count limit exceeded'), (b'IGN', 'Ignored')])),
('visible_for_user', models.BooleanField(default=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='testreport',
name='submission_report',
field=models.ForeignKey(to='contests.SubmissionReport', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='testreport',
name='test',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='programs.Test', null=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='test',
unique_together=set([('problem', 'name')]),
),
migrations.AddField(
model_name='outputchecker',
name='problem',
field=models.OneToOneField(to='problems.Problem', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='modelsolution',
name='problem',
field=models.ForeignKey(to='problems.Problem', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='modelprogramsubmission',
name='model_solution',
field=models.ForeignKey(to='programs.ModelSolution', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='libraryproblemdata',
name='problem',
field=models.OneToOneField(to='problems.Problem', on_delete=models.CASCADE),
preserve_default=True,
),
]
| gpl-3.0 | 440,767,678,400,920,770 | 58.024272 | 610 | 0.582531 | false |
Azure/azure-storage-python | azure-storage-common/azure/storage/common/_error.py | 1 | 9025 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from sys import version_info
if version_info < (3,):
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
def _to_str(value):
return _str(value) if value is not None else None
from azure.common import (
AzureHttpError,
AzureConflictHttpError,
AzureMissingResourceHttpError,
AzureException,
)
from ._constants import (
_ENCRYPTION_PROTOCOL_V1,
)
_ERROR_CONFLICT = 'Conflict ({0})'
_ERROR_NOT_FOUND = 'Not found ({0})'
_ERROR_UNKNOWN = 'Unknown error ({0})'
_ERROR_STORAGE_MISSING_INFO = \
'You need to provide an account name and either an account_key or sas_token when creating a storage service.'
_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES = \
'The emulator does not support the file service.'
_ERROR_ACCESS_POLICY = \
'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \
'instance'
_ERROR_PARALLEL_NOT_SEEKABLE = 'Parallel operations require a seekable stream.'
_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'
_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM = '{0} should be of type bytes or a readable file-like/io.IOBase stream object.'
_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
_ERROR_VALUE_SHOULD_BE_STREAM = '{0} should be a file-like/io.IOBase type stream object with a read method.'
_ERROR_VALUE_NONE = '{0} should not be None.'
_ERROR_VALUE_NONE_OR_EMPTY = '{0} should not be None or empty.'
_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'
_ERROR_START_END_NEEDED_FOR_MD5 = \
'Both end_range and start_range need to be specified ' + \
'for getting content MD5.'
_ERROR_RANGE_TOO_LARGE_FOR_MD5 = \
'Getting content MD5 for a range greater than 4MB ' + \
'is not supported.'
_ERROR_MD5_MISMATCH = \
'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'
_ERROR_TOO_MANY_ACCESS_POLICIES = \
'Too many access policies provided. The server does not support setting more than 5 access policies on a single resource.'
_ERROR_OBJECT_INVALID = \
'{0} does not define a complete interface. Value of {1} is either missing or invalid.'
_ERROR_UNSUPPORTED_ENCRYPTION_VERSION = \
'Encryption version is not supported.'
_ERROR_DECRYPTION_FAILURE = \
'Decryption failed'
_ERROR_ENCRYPTION_REQUIRED = \
'Encryption required but no key was provided.'
_ERROR_DECRYPTION_REQUIRED = \
'Decryption required but neither key nor resolver was provided.' + \
' If you do not want to decypt, please do not set the require encryption flag.'
_ERROR_INVALID_KID = \
'Provided or resolved key-encryption-key does not match the id of key used to encrypt.'
_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM = \
'Specified encryption algorithm is not supported.'
_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = 'The require_encryption flag is set, but encryption is not supported' + \
' for this method.'
_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM = 'Unknown key wrap algorithm.'
_ERROR_DATA_NOT_ENCRYPTED = 'Encryption required, but received data does not contain appropriate metatadata.' + \
'Data was either not encrypted or metadata has been lost.'
def _dont_fail_on_exist(error):
''' don't throw exception if the resource exists.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, AzureConflictHttpError):
return False
else:
raise error
def _dont_fail_not_exist(error):
''' don't throw exception if the resource doesn't exist.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, AzureMissingResourceHttpError):
return False
else:
raise error
def _http_error_handler(http_error):
''' Simple error handler for azure.'''
message = str(http_error)
error_code = None
if 'x-ms-error-code' in http_error.respheader:
error_code = http_error.respheader['x-ms-error-code']
message += ' ErrorCode: ' + error_code
if http_error.respbody is not None:
message += '\n' + http_error.respbody.decode('utf-8-sig')
ex = AzureHttpError(message, http_error.status)
ex.error_code = error_code
raise ex
def _validate_type_bytes(param_name, param):
if not isinstance(param, bytes):
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
def _validate_type_bytes_or_stream(param_name, param):
if not (isinstance(param, bytes) or hasattr(param, 'read')):
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name))
def _validate_not_none(param_name, param):
if param is None:
raise ValueError(_ERROR_VALUE_NONE.format(param_name))
def _validate_content_match(server_md5, computed_md5):
if server_md5 != computed_md5:
raise AzureException(_ERROR_MD5_MISMATCH.format(server_md5, computed_md5))
def _validate_access_policies(identifiers):
if identifiers and len(identifiers) > 5:
raise AzureException(_ERROR_TOO_MANY_ACCESS_POLICIES)
def _validate_key_encryption_key_wrap(kek):
# Note that None is not callable and so will fail the second clause of each check.
if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
def _validate_key_encryption_key_unwrap(kek):
if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
if not hasattr(kek, 'unwrap_key') or not callable(kek.unwrap_key):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
def _validate_encryption_required(require_encryption, kek):
if require_encryption and (kek is None):
raise ValueError(_ERROR_ENCRYPTION_REQUIRED)
def _validate_decryption_required(require_encryption, kek, resolver):
if (require_encryption and (kek is None) and
(resolver is None)):
raise ValueError(_ERROR_DECRYPTION_REQUIRED)
def _validate_encryption_protocol_version(encryption_protocol):
if not (_ENCRYPTION_PROTOCOL_V1 == encryption_protocol):
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
def _validate_kek_id(kid, resolved_id):
if not (kid == resolved_id):
raise ValueError(_ERROR_INVALID_KID)
def _validate_encryption_unsupported(require_encryption, key_encryption_key):
if require_encryption or (key_encryption_key is not None):
raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
def _validate_user_delegation_key(user_delegation_key):
_validate_not_none('user_delegation_key.signed_oid', user_delegation_key.signed_oid)
_validate_not_none('user_delegation_key.signed_tid', user_delegation_key.signed_tid)
_validate_not_none('user_delegation_key.signed_start', user_delegation_key.signed_start)
_validate_not_none('user_delegation_key.signed_expiry', user_delegation_key.signed_expiry)
_validate_not_none('user_delegation_key.signed_version', user_delegation_key.signed_version)
_validate_not_none('user_delegation_key.signed_service', user_delegation_key.signed_service)
_validate_not_none('user_delegation_key.value', user_delegation_key.value)
# wraps a given exception with the desired exception type
def _wrap_exception(ex, desired_type):
msg = ""
if len(ex.args) > 0:
msg = ex.args[0]
if version_info >= (3,):
# Automatic chaining in Python 3 means we keep the trace
return desired_type(msg)
else:
# There isn't a good solution in 2 for keeping the stack trace
# in general, or that will not result in an error in 3
# However, we can keep the previous error type and message
# TODO: In the future we will log the trace
return desired_type('{}: {}'.format(ex.__class__.__name__, msg))
class AzureSigningError(AzureException):
"""
Represents a fatal error when attempting to sign a request.
In general, the cause of this exception is user error. For example, the given account key is not valid.
Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info.
"""
pass
| mit | 5,352,687,308,642,604,000 | 40.399083 | 126 | 0.688753 | false |
Nonse/Feel_Like | reservations/models.py | 1 | 3171 | from django.db import models
class Reservation(models.Model):
start_time = models.DateTimeField()
end_time = models.DateTimeField()
customer = models.ForeignKey('Customer')
coach = models.ForeignKey('Coach')
product = models.ForeignKey('Product')
location = models.CharField(max_length=200)
location_price = models.DecimalField(max_digits=10, decimal_places=2)
participants = models.IntegerField()
amount = models.DecimalField(max_digits=10, decimal_places=2)
invoice = models.ForeignKey('Invoice', null=True, blank=True, on_delete=models.SET_NULL)
def __unicode__(self):
return u'%s-%s' % (self.start_time.strftime('%Y-%m-%d, %H:%M'), self.end_time.strftime('%H:%M'))
class Customer(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
email = models.EmailField(max_length=254)
street_address = models.CharField(max_length=200)
postcode = models.CharField(max_length=5) #Finnish postal code length
city = models.CharField(max_length=100)
phone = models.CharField(max_length=100, null=True, blank=True)
discount = models.DecimalField(max_digits=5, decimal_places=2) #999,99 max
class Meta:
ordering = ['last_name', 'first_name']
def __unicode__(self): #unicode for (finnish) letters
return u'%s, %s' % (self.last_name, self.first_name)
class Coach(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
phone = models.CharField(max_length=100)
class Meta:
verbose_name_plural = 'Coaches'
def __unicode__(self):
return u'%s, %s' % (self.last_name, self.first_name)
class Product(models.Model):
name = models.CharField(max_length=100)
price = models.DecimalField(max_digits=10, decimal_places=2)
vat = models.DecimalField(max_digits=10, decimal_places=2)
def __unicode__(self):
return u'%s' % (self.name)
class Invoice(models.Model):
date = models.DateField()
total = models.DecimalField(max_digits=10, decimal_places=2)
customer = models.ForeignKey('Customer', null=True, blank=True)
ref_number = models.CharField(max_length=20, null=True, blank=True)
due_date = models.DateField(null=True, blank=True)
company = models.ForeignKey('Company')
def __unicode__(self):
return u'{}: {}'.format(
self.date.strftime('%Y-%m-%d'), self.customer
)
class Company(models.Model):
name = models.CharField(max_length=100)
street_address = models.CharField(max_length=200)
postcode = models.CharField(max_length=5) #Finnish postal code length
city = models.CharField(max_length=100)
contact_person = models.CharField(max_length=100)
phone = models.CharField(max_length=100)
business_id = models.CharField(max_length=100, null=True, blank=True)
iban = models.CharField(max_length=100, null=True, blank=True)
location_vat = models.DecimalField(max_digits=10, decimal_places=2)
class Meta:
verbose_name_plural = 'Company'
def __unicode__(self):
return u'%s' % (self.name)
| apache-2.0 | 4,771,702,382,574,460,000 | 35.448276 | 104 | 0.675497 | false |
shoaibkamil/asp | asp/jit/asp_module.py | 1 | 19070 | import codepy, codepy.jit, codepy.toolchain, codepy.bpl, codepy.cuda
from asp.util import *
import asp.codegen.cpp_ast as cpp_ast
import pickle
from variant_history import *
import sqlite3
import asp
import scala_module
class ASPDB(object):
def __init__(self, specializer, persistent=False):
"""
specializer must be specified so we avoid namespace collisions.
"""
self.specializer = specializer
if persistent:
# create db file or load db
# create a per-user cache directory
import tempfile, os
if os.name == 'nt':
username = os.environ['USERNAME']
else:
username = os.environ['LOGNAME']
self.cache_dir = tempfile.gettempdir() + "/asp_cache_" + username
if not os.access(self.cache_dir, os.F_OK):
os.mkdir(self.cache_dir)
self.db_file = self.cache_dir + "/aspdb.sqlite3"
self.connection = sqlite3.connect(self.db_file)
self.connection.execute("PRAGMA temp_store = MEMORY;")
self.connection.execute("PRAGMA synchronous = OFF;")
else:
self.db_file = None
self.connection = sqlite3.connect(":memory:")
def create_specializer_table(self):
self.connection.execute('create table '+self.specializer+' (fname text, variant text, key text, perf real)')
self.connection.commit()
def close(self):
self.connection.close()
def table_exists(self):
"""
Test if a table corresponding to this specializer exists.
"""
cursor = self.connection.cursor()
cursor.execute('select name from sqlite_master where name="%s"' % self.specializer)
result = cursor.fetchall()
return len(result) > 0
def insert(self, fname, variant, key, value):
if (not self.table_exists()):
self.create_specializer_table()
self.connection.execute('insert into '+self.specializer+' values (?,?,?,?)',
(fname, variant, key, value))
self.connection.commit()
def get(self, fname, variant=None, key=None):
"""
Return a list of entries. If key and variant not specified, all entries from
fname are returned.
"""
if (not self.table_exists()):
self.create_specializer_table()
return []
cursor = self.connection.cursor()
query = "select * from %s where fname=?" % (self.specializer,)
params = (fname,)
if variant:
query += " and variant=?"
params += (variant,)
if key:
query += " and key=?"
params += (key,)
cursor.execute(query, params)
return cursor.fetchall()
def update(self, fname, variant, key, value):
"""
Updates an entry in the db. Overwrites the timing information with value.
If the entry does not exist, does an insert.
"""
if (not self.table_exists()):
self.create_specializer_table()
self.insert(fname, variant, key, value)
return
# check if the entry exists
query = "select count(*) from "+self.specializer+" where fname=? and variant=? and key=?;"
cursor = self.connection.cursor()
cursor.execute(query, (fname, variant, key))
count = cursor.fetchone()[0]
# if it exists, do an update, otherwise do an insert
if count > 0:
query = "update "+self.specializer+" set perf=? where fname=? and variant=? and key=?"
self.connection.execute(query, (value, fname, variant, key))
self.connection.commit()
else:
self.insert(fname, variant, key, value)
def delete(self, fname, variant, key):
"""
Deletes an entry from the db.
"""
if (not self.table_exists()):
return
query = "delete from "+self.specializer+" where fname=? and variant=? and key=?"
self.connection.execute(query, (fname, variant, key))
self.connection.commit()
def destroy_db(self):
"""
Delete the database.
"""
if not self.db_file:
return True
import os
try:
self.close()
os.remove(self.db_file)
except:
return False
else:
return True
class SpecializedFunction(object):
"""
Class that encapsulates a function that is specialized. It keeps track of variants,
their timing information, which backend, functions to determine if a variant
can run, as well as a function to generate keys from parameters.
The signature for any run_check function is run(*args, **kwargs).
The signature for the key function is key(self, *args, **kwargs), where the args/kwargs are
what are passed to the specialized function.
"""
def __init__(self, name, backend, db, variant_names=[], variant_funcs=[], run_check_funcs=[],
key_function=None, call_policy=None):
self.name = name
self.backend = backend
self.db = db
self.variant_names = []
self.variant_funcs = []
self.run_check_funcs = []
self.call_policy = call_policy
if variant_names != [] and run_check_funcs == []:
run_check_funcs = [lambda *args,**kwargs: True]*len(variant_names)
for x in xrange(len(variant_names)):
self.add_variant(variant_names[x], variant_funcs[x], run_check_funcs[x])
if key_function:
self.key = key_function
def key(self, *args, **kwargs):
"""
Function to generate keys. This should almost always be overridden by a specializer, to make
sure the information stored in the key is actually useful.
"""
import hashlib
return hashlib.md5(str(args)+str(kwargs)).hexdigest()
def add_variant(self, variant_name, variant_func, run_check_func=lambda *args,**kwargs: True):
"""
Add a variant of this function. Must have same call signature. Variant names must be unique.
The variant_func parameter should be a CodePy Function object or a string defining the function.
The run_check_func parameter should be a lambda function with signature run(*args,**kwargs).
"""
if variant_name in self.variant_names:
raise Exception("Attempting to add a variant with an already existing name %s to %s" %
(variant_name, self.name))
self.variant_names.append(variant_name)
self.variant_funcs.append(variant_func)
self.run_check_funcs.append(run_check_func)
if isinstance(self.backend.module, scala_module.ScalaModule):
self.backend.module.add_to_module(variant_func)
self.backend.module.add_to_init(variant_name)
elif isinstance(variant_func, basestring):
if isinstance(self.backend.module, codepy.cuda.CudaModule):#HACK because codepy's CudaModule doesn't have add_to_init()
self.backend.module.boost_module.add_to_module([cpp_ast.Line(variant_func)])
self.backend.module.boost_module.add_to_init([cpp_ast.Statement("boost::python::def(\"%s\", &%s)" % (variant_name, variant_name))])
else:
self.backend.module.add_to_module([cpp_ast.Line(variant_func)])
if self.call_policy == "python_gc":
self.backend.module.add_to_init([cpp_ast.Statement("boost::python::def(\"%s\", &%s, boost::python::return_value_policy<boost::python::manage_new_object>())" % (variant_name, variant_name))])
else:
self.backend.module.add_to_init([cpp_ast.Statement("boost::python::def(\"%s\", &%s)" % (variant_name, variant_name))])
else:
self.backend.module.add_function(variant_func)
self.backend.dirty = True
def pick_next_variant(self, *args, **kwargs):
"""
Logic to pick the next variant to run. If all variants have been run, then this should return the
fastest variant.
"""
# get variants that have run
already_run = self.db.get(self.name, key=self.key(*args, **kwargs))
if already_run == []:
already_run_variant_names = []
else:
already_run_variant_names = map(lambda x: x[1], already_run)
# which variants haven't yet run
candidates = set(self.variant_names) - set(already_run_variant_names)
# of these candidates, which variants *can* run
for x in candidates:
if self.run_check_funcs[self.variant_names.index(x)](*args, **kwargs):
return x
# if none left, pick fastest from those that have already run
return sorted(already_run, lambda x,y: cmp(x[3],y[3]))[0][1]
def __call__(self, *args, **kwargs):
"""
Calling an instance of SpecializedFunction will actually call either the next variant to test,
or the already-determined best variant.
"""
if self.backend.dirty:
self.backend.compile()
which = self.pick_next_variant(*args, **kwargs)
import time
start = time.time()
ret_val = self.backend.get_compiled_function(which).__call__(*args, **kwargs)
elapsed = time.time() - start
#FIXME: where should key function live?
#print "doing update with %s, %s, %s, %s" % (self.name, which, self.key(args, kwargs), elapsed)
self.db.update(self.name, which, self.key(*args, **kwargs), elapsed)
#TODO: Should we use db.update instead of db.insert to avoid O(N) ops on already_run_variant_names = map(lambda x: x[1], already_run)?
return ret_val
class HelperFunction(SpecializedFunction):
"""
HelperFunction defines a SpecializedFunction that is not timed, and usually not called directly
(although it can be).
"""
def __init__(self, name, func, backend):
self.name = name
self.backend = backend
self.variant_names, self.variant_funcs, self.run_check_funcs = [], [], []
self.call_policy = None
self.add_variant(name, func)
def __call__(self, *args, **kwargs):
if self.backend.dirty:
self.backend.compile()
return self.backend.get_compiled_function(self.name).__call__(*args, **kwargs)
class ASPBackend(object):
"""
Class to encapsulate a backend for Asp. A backend is the combination of a CodePy module
(which contains the actual functions) and a CodePy compiler toolchain.
"""
def __init__(self, module, toolchain, cache_dir, host_toolchain=None):
self.module = module
self.toolchain = toolchain
self.host_toolchain = host_toolchain
self.compiled_module = None
self.cache_dir = cache_dir
self.dirty = True
self.compilable = True
def compile(self):
"""
Trigger a compile of this backend. Note that CUDA needs to know about the C++
backend as well.
"""
if not self.compilable: return
if isinstance(self.module, codepy.cuda.CudaModule):
self.compiled_module = self.module.compile(self.host_toolchain,
self.toolchain,
debug=True, cache_dir=self.cache_dir)
else:
self.compiled_module = self.module.compile(self.toolchain,
debug=True, cache_dir=self.cache_dir)
self.dirty = False
def get_compiled_function(self, name):
"""
Return a callable for a raw compiled function (that is, this must be a variant name rather than
a function name).
"""
try:
func = getattr(self.compiled_module, name)
except:
raise AttributeError("Function %s not found in compiled module." % (name,))
return func
class ASPModule(object):
"""
ASPModule is the main coordination class for specializers. A specializer creates an ASPModule to contain
all of its specialized functions, and adds functions/libraries/etc to the ASPModule.
ASPModule uses ASPBackend instances for each backend, ASPDB for its backing db for recording timing info,
and instances of SpecializedFunction and HelperFunction for specialized and helper functions, respectively.
"""
#FIXME: specializer should be required.
def __init__(self, specializer="default_specializer", cache_dir=None, use_cuda=False, use_cilk=False, use_tbb=False, use_pthreads=False, use_scala=False):
self.specialized_functions= {}
self.helper_method_names = []
self.db = ASPDB(specializer)
if cache_dir:
self.cache_dir = cache_dir
else:
# create a per-user cache directory
import tempfile, os
if os.name == 'nt':
username = os.environ['USERNAME']
else:
username = os.environ['LOGNAME']
self.cache_dir = tempfile.gettempdir() + "/asp_cache_" + username
if not os.access(self.cache_dir, os.F_OK):
os.mkdir(self.cache_dir)
self.backends = {}
self.backends["c++"] = ASPBackend(codepy.bpl.BoostPythonModule(),
codepy.toolchain.guess_toolchain(),
self.cache_dir)
if use_cuda:
self.backends["cuda"] = ASPBackend(codepy.cuda.CudaModule(self.backends["c++"].module),
codepy.toolchain.guess_nvcc_toolchain(),
self.cache_dir,
self.backends["c++"].toolchain)
self.backends['cuda'].module.add_to_preamble([cpp_ast.Include('cuda.h', True)]) # codepy.CudaModule doesn't do this automatically for some reason
self.backends['cuda'].module.add_to_preamble([cpp_ast.Include('cuda_runtime.h', True)]) # codepy.CudaModule doesn't do this automatically for some reason
self.backends['c++'].module.add_to_preamble([cpp_ast.Include('cuda_runtime.h', True)]) # codepy.CudaModule doesn't do this automatically for some reason
self.backends["cuda"].toolchain.cflags += ["-shared"]
if use_cilk:
self.backends["cilk"] = self.backends["c++"]
self.backends["cilk"].toolchain.cc = "icc"
if use_tbb:
self.backends["tbb"] = self.backends["c++"]
self.backends["tbb"].toolchain.cflags += ["-ltbb"]
if use_pthreads:
self.backends["pthreads"] = self.backends["c++"]
self.backends["pthreads"].toolchain.cflags += ["-pthread"]
if use_scala:
self.backends["scala"] = ASPBackend(scala_module.ScalaModule(),
scala_module.ScalaToolchain(),
self.cache_dir)
def add_library(self, feature, include_dirs, library_dirs=[], libraries=[], backend="c++"):
self.backends[backend].toolchain.add_library(feature, include_dirs, library_dirs, libraries)
def add_cuda_arch_spec(self, arch):
archflag = '-arch='
if 'sm_' not in arch: archflag += 'sm_'
archflag += arch
self.backends["cuda"].toolchain.cflags += [archflag]
def add_header(self, include_file, brackets=False, backend="c++"):
"""
Add a header (e.g. #include "foo.h") to the module source file.
With brackets=True, it will be C++-style #include <foo> instead.
"""
self.backends[backend].module.add_to_preamble([cpp_ast.Include(include_file, brackets)])
def add_to_preamble(self, pa, backend="c++"):
if isinstance(pa, basestring):
pa = [cpp_ast.Line(pa)]
self.backends[backend].module.add_to_preamble(pa)
def add_to_init(self, stmt, backend="c++"):
if isinstance(stmt, str):
stmt = [cpp_ast.Line(stmt)]
if backend == "cuda":
self.backends[backend].module.boost_module.add_to_init(stmt) #HACK because codepy's CudaModule doesn't have add_to_init()
else:
self.backends[backend].module.add_to_init(stmt)
def add_to_module(self, block, backend="c++"):
if isinstance(block, basestring):
block = [cpp_ast.Line(block)]
self.backends[backend].module.add_to_module(block)
def add_function(self, fname, funcs, variant_names=[], run_check_funcs=[], key_function=None,
backend="c++", call_policy=None):
"""
Add a specialized function to the Asp module. funcs can be a list of variants, but then
variant_names is required (also a list). Each item in funcs should be a string function or
a cpp_ast FunctionDef.
"""
if not isinstance(funcs, list):
funcs = [funcs]
variant_names = [fname]
self.specialized_functions[fname] = SpecializedFunction(fname, self.backends[backend], self.db, variant_names,
variant_funcs=funcs,
run_check_funcs=run_check_funcs,
key_function=key_function,
call_policy=call_policy)
def add_helper_function(self, fname, func, backend="c++"):
"""
Add a helper function, which is a specialized function that it not timed and has a single variant.
"""
self.specialized_functions[fname] = HelperFunction(fname, func, self.backends[backend])
def expose_class(self, classname, backend="c++"):
"""
Expose a class or struct from C++ to Python, letting us pass instances back and forth
between Python and C++.
TODO: allow exposing *functions* within the class
"""
self.backends[backend].module.add_to_init([cpp_ast.Line("boost::python::class_<%s>(\"%s\");\n" % (classname, classname))])
def __getattr__(self, name):
if name in self.specialized_functions:
return self.specialized_functions[name]
else:
raise AttributeError("No method %s found; did you add it to this ASPModule?" % name)
def generate(self):
"""
Utility function for, during development, dumping out the generated
source from all the underlying backends.
"""
src = ""
for x in self.backends.keys():
src += "\nSource code for backend '" + x + "':\n"
src += str(self.backends[x].module.generate())
return src
| bsd-3-clause | 484,791,479,455,267,650 | 40.187905 | 210 | 0.581122 | false |
aptivate/ihpresultsweb | ihp/submissions/indicator_funcs.py | 1 | 4263 | from models import AgencyCountries, Country8DPFix, CountryExclusion, NotApplicable, Rating
from consts import NA_STR
base_selector = lambda q : q.baseline_value
cur_selector = lambda q : q.latest_value
def float_or_none(x):
if NotApplicable.objects.is_not_applicable(x):
return NA_STR
try:
return float(x)
except:
return None
def _sum_values(qs, selector):
nas = [q for q in qs if float_or_none(selector(q)) == NA_STR]
nones = [q for q in qs if float_or_none(selector(q)) == None]
qs = [q for q in qs if float_or_none(selector(q)) not in [None, NA_STR]]
if len(qs) == 0:
return None if len(nones) > 0 else NA_STR
return sum([float(selector(q)) for q in qs])
def func_8dpfix(qs, agency, selector, q):
qs_countries = [q.submission.country for q in qs]
countries = Country8DPFix.objects.filter(agency=agency, country__in=qs_countries)
denom = float(len(agency.countries.filter(country__in=qs_countries)))
if selector == base_selector:
num = len([country for country in countries if country.baseline_progress == Rating.TICK])
elif selector == cur_selector:
num = len([country for country in countries if country.latest_progress == Rating.TICK])
if denom > 0:
return num / denom * 100
else:
return None
def count_factory(value):
def count_value(qs, agency_or_country, selector, q):
qs = [qq for qq in qs if qq.question_number==q]
if len(qs) == 0:
return 0
if selector == base_selector:
return len([q for q in qs if q.baseline_value.lower() == value.lower()])
elif selector == cur_selector:
return len([q for q in qs if q.latest_value.lower() == value.lower()])
return count_value
def country_perc_factory(value):
def perc_value(qs, agency, selector, q):
# In some countries certain processes do not exists
# the watchlist reduces the denominator if the agency
# is active in such a country for a particular question
count_value = count_factory(value)
num_countries = float(len(qs))
count = count_value(qs, agency, selector, q)
return count / num_countries * 100 if num_countries > 0 else NA_STR
return perc_value
def equals_or_zero(val):
def test(qs, agency_or_country, selector, q):
value = val.lower()
qs = [qq for qq in qs if qq.question_number==q]
try:
assert len(qs) == 1
if selector(qs[0]) == None:
_val = 0
else:
_val = 100 if selector(qs[0]).lower() == value else 0
return _val
except AssertionError:
return None
return test
def equals_yes_or_no(val):
def test(qs, agency_or_country, selector, q):
value = val.lower()
qs = [qq for qq in qs if qq.question_number==q]
assert len(qs) == 1
if selector(qs[0]) == None:
_val = ""
else:
_val = "y" if selector(qs[0]).lower() == value else "n"
return _val
return test
def combine_yesnos(qs, agency_or_country, selector, *args):
values = []
for arg in args:
qs1 = [q for q in qs if q.question_number==arg]
if selector(qs1[0]) == None:
val = " "
else:
val = "y" if selector(qs1[0]).lower() == "yes" else "n"
values.append(val)
return "".join(values)
def calc_numdenom(qs, agency_or_country, selector, numq, denomq):
den = _sum_values([q for q in qs if q.question_number==denomq], selector)
num = _sum_values([q for q in qs if q.question_number==numq], selector)
if den in [NA_STR, None] or num in [NA_STR, None]:
return den
ratio = NA_STR
if den > 0: ratio = num / den * 100
return ratio
def calc_one_minus_numdenom(qs, agency_or_country, selector, numq, denomq):
ratio = calc_numdenom(qs, agency_or_country, selector, numq, denomq)
ratio = 100 - ratio if ratio not in [NA_STR, None] else ratio
return ratio
def sum_values(qs, agency_or_country, selector, *args):
qs = [q for q in qs if q.question_number in args]
return _sum_values(qs, selector)
| gpl-3.0 | -8,658,099,590,199,005,000 | 31.295455 | 97 | 0.600516 | false |
SSJohns/osf.io | website/project/views/node.py | 1 | 44261 | # -*- coding: utf-8 -*-
import logging
import httplib as http
import math
from itertools import islice
from flask import request
from modularodm import Q
from modularodm.exceptions import ModularOdmException, ValidationValueError
from framework import status
from framework.utils import iso8601format
from framework.mongo import StoredObject
from framework.flask import redirect
from framework.auth.decorators import must_be_logged_in, collect_auth
from framework.exceptions import HTTPError, PermissionsError
from website import language
from website.util import paths
from website.util import rubeus
from website.exceptions import NodeStateError
from website.project import new_node, new_private_link
from website.project.decorators import (
must_be_contributor_or_public_but_not_anonymized,
must_be_contributor_or_public,
must_be_valid_project,
must_have_permission,
must_not_be_registration,
http_error_if_disk_saving_mode
)
from website.tokens import process_token_or_pass
from website.util.permissions import ADMIN, READ, WRITE, CREATOR_PERMISSIONS
from website.util.rubeus import collect_addon_js
from website.project.model import has_anonymous_link, get_pointer_parent, NodeUpdateError, validate_title
from website.project.forms import NewNodeForm
from website.project.metadata.utils import serialize_meta_schemas
from website.models import Node, Pointer, WatchConfig, PrivateLink, Comment
from website import settings
from website.views import _render_nodes, find_bookmark_collection, validate_page_num
from website.profile import utils
from website.project.licenses import serialize_node_license_record
from website.util.sanitize import strip_html
from website.util import rapply
r_strip_html = lambda collection: rapply(collection, strip_html)
logger = logging.getLogger(__name__)
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def edit_node(auth, node, **kwargs):
post_data = request.json
edited_field = post_data.get('name')
value = post_data.get('value', '')
new_val = None
if edited_field == 'title':
try:
node.set_title(value, auth=auth)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
new_val = node.title
elif edited_field == 'description':
node.set_description(value, auth=auth)
new_val = node.description
elif edited_field == 'category':
node.category = new_val = value
try:
node.save()
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
return {
'status': 'success',
'newValue': new_val # Used by x-editable widget to reflect changes made by sanitizer
}
##############################################################################
# New Project
##############################################################################
@must_be_logged_in
def project_new(**kwargs):
return {}
@must_be_logged_in
def project_new_post(auth, **kwargs):
user = auth.user
data = request.get_json()
title = strip_html(data.get('title'))
title = title.strip()
category = data.get('category', 'project')
template = data.get('template')
description = strip_html(data.get('description'))
new_project = {}
if template:
original_node = Node.load(template)
changes = {
'title': title,
'category': category,
'template_node': original_node,
}
if description:
changes['description'] = description
project = original_node.use_as_template(
auth=auth,
changes={
template: changes,
}
)
else:
try:
project = new_node(category, title, user, description)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
new_project = _view_project(project, auth)
return {
'projectUrl': project.url,
'newNode': new_project['node'] if new_project else None
}, http.CREATED
@must_be_logged_in
@must_be_valid_project
def project_new_from_template(auth, node, **kwargs):
new_node = node.use_as_template(
auth=auth,
changes=dict(),
)
return {'url': new_node.url}, http.CREATED, None
##############################################################################
# New Node
##############################################################################
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def project_new_node(auth, node, **kwargs):
form = NewNodeForm(request.form)
user = auth.user
if form.validate():
try:
new_component = new_node(
title=strip_html(form.title.data),
user=user,
category=form.category.data,
parent=node,
)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
redirect_url = node.url
message = (
'Your component was created successfully. You can keep working on the project page below, '
'or go to the new <u><a href={component_url}>component</a></u>.'
).format(component_url=new_component.url)
if form.inherit_contributors.data and node.has_permission(user, WRITE):
for contributor in node.contributors:
perm = CREATOR_PERMISSIONS if contributor is user else node.get_permissions(contributor)
new_component.add_contributor(contributor, permissions=perm, auth=auth)
new_component.save()
redirect_url = new_component.url + 'contributors/'
message = (
'Your component was created successfully. You can edit the contributor permissions below, '
'work on your <u><a href={component_url}>component</a></u> or return to the <u> '
'<a href="{project_url}">project page</a></u>.'
).format(component_url=new_component.url, project_url=node.url)
status.push_status_message(message, kind='info', trust=True)
return {
'status': 'success',
}, 201, None, redirect_url
else:
# TODO: This function doesn't seem to exist anymore?
status.push_errors_to_status(form.errors)
raise HTTPError(http.BAD_REQUEST, redirect_url=node.url)
@must_be_logged_in
@must_be_valid_project
def project_before_fork(auth, node, **kwargs):
user = auth.user
prompts = node.callback('before_fork', user=user)
if node.has_pointers_recursive:
prompts.append(
language.BEFORE_FORK_HAS_POINTERS.format(
category=node.project_or_component
)
)
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
def project_before_template(auth, node, **kwargs):
prompts = []
for addon in node.get_addons():
if 'node' in addon.config.configs:
if addon.to_json(auth.user)['addon_full_name']:
prompts.append(addon.to_json(auth.user)['addon_full_name'])
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
@http_error_if_disk_saving_mode
def node_fork_page(auth, node, **kwargs):
try:
fork = node.fork_node(auth)
except PermissionsError:
raise HTTPError(
http.FORBIDDEN,
redirect_url=node.url
)
message = '{} has been successfully forked.'.format(
node.project_or_component.capitalize()
)
status.push_status_message(message, kind='success', trust=False)
return fork.url
@must_be_valid_project
@must_be_contributor_or_public_but_not_anonymized
def node_registrations(auth, node, **kwargs):
return _view_project(node, auth, primary=True)
@must_be_valid_project
@must_be_contributor_or_public_but_not_anonymized
def node_forks(auth, node, **kwargs):
return _view_project(node, auth, primary=True)
@must_be_valid_project
@must_be_logged_in
@must_have_permission(READ)
def node_setting(auth, node, **kwargs):
auth.user.update_affiliated_institutions_by_email_domain()
auth.user.save()
ret = _view_project(node, auth, primary=True)
addons_enabled = []
addon_enabled_settings = []
for addon in node.get_addons():
addons_enabled.append(addon.config.short_name)
if 'node' in addon.config.configs:
config = addon.to_json(auth.user)
# inject the MakoTemplateLookup into the template context
# TODO inject only short_name and render fully client side
config['template_lookup'] = addon.config.template_lookup
config['addon_icon_url'] = addon.config.icon_url
addon_enabled_settings.append(config)
addon_enabled_settings = sorted(addon_enabled_settings, key=lambda addon: addon['addon_full_name'].lower())
ret['addon_categories'] = settings.ADDON_CATEGORIES
ret['addons_available'] = sorted([
addon
for addon in settings.ADDONS_AVAILABLE
if 'node' in addon.owners
and addon.short_name not in settings.SYSTEM_ADDED_ADDONS['node'] and addon.short_name != 'wiki'
], key=lambda addon: addon.full_name.lower())
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.owners and addon.short_name not in settings.SYSTEM_ADDED_ADDONS['node'] and addon.short_name == 'wiki':
ret['wiki'] = addon
break
ret['addons_enabled'] = addons_enabled
ret['addon_enabled_settings'] = addon_enabled_settings
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
ret['addon_js'] = collect_node_config_js(node.get_addons())
ret['include_wiki_settings'] = node.include_wiki_settings(auth.user)
ret['comments'] = {
'level': node.comment_level,
}
ret['categories'] = settings.NODE_CATEGORY_MAP
ret['categories'].update({
'project': 'Project'
})
return ret
def collect_node_config_js(addons):
"""Collect webpack bundles for each of the addons' node-cfg.js modules. Return
the URLs for each of the JS modules to be included on the node addons config page.
:param list addons: List of node's addon config records.
"""
js_modules = []
for addon in addons:
js_path = paths.resolve_addon_path(addon.config, 'node-cfg.js')
if js_path:
js_modules.append(js_path)
return js_modules
@must_have_permission(WRITE)
@must_not_be_registration
def node_choose_addons(auth, node, **kwargs):
node.config_addons(request.json, auth)
@must_be_valid_project
@must_have_permission(READ)
def node_contributors(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
ret['contributors'] = utils.serialize_contributors(node.contributors, node)
ret['adminContributors'] = utils.serialize_contributors(node.admin_contributors, node, admin=True)
return ret
@must_have_permission(ADMIN)
def configure_comments(node, **kwargs):
comment_level = request.json.get('commentLevel')
if not comment_level:
node.comment_level = None
elif comment_level in ['public', 'private']:
node.comment_level = comment_level
else:
raise HTTPError(http.BAD_REQUEST)
node.save()
##############################################################################
# View Project
##############################################################################
@must_be_valid_project(retractions_valid=True)
@must_be_contributor_or_public
@process_token_or_pass
def view_project(auth, node, **kwargs):
primary = '/api/v1' not in request.path
ret = _view_project(node, auth, primary=primary)
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
# Collect the URIs to the static assets for addons that have widgets
ret['addon_widget_js'] = list(collect_addon_js(
node,
filename='widget-cfg.js',
config_entry='widget'
))
ret.update(rubeus.collect_addon_assets(node))
return ret
# Reorder components
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def project_reorder_components(node, **kwargs):
"""Reorders the components in a project's component list.
:param-json list new_list: List of strings that include node IDs and
node type delimited by ':'.
"""
# TODO(sloria): Change new_list parameter to be an array of objects
# {
# 'newList': {
# {'key': 'abc123', 'type': 'node'}
# }
# }
new_list = [
tuple(n.split(':'))
for n in request.json.get('new_list', [])
]
nodes_new = [
StoredObject.get_collection(schema).load(key)
for key, schema in new_list
]
valid_nodes = [
n for n in node.nodes
if not n.is_deleted
]
deleted_nodes = [
n for n in node.nodes
if n.is_deleted
]
if len(valid_nodes) == len(nodes_new) and set(valid_nodes) == set(nodes_new):
node.nodes = nodes_new + deleted_nodes
node.save()
return {}
logger.error('Got invalid node list in reorder components')
raise HTTPError(http.BAD_REQUEST)
##############################################################################
@must_be_valid_project
@must_be_contributor_or_public
def project_statistics(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
ret['node']['keenio_read_key'] = node.keenio_read_key
return ret
@must_be_valid_project
@must_be_contributor_or_public
def project_statistics_redirect(auth, node, **kwargs):
return redirect(node.web_url_for('project_statistics', _guid=True))
###############################################################################
# Make Private/Public
###############################################################################
@must_be_valid_project
@must_have_permission(ADMIN)
def project_before_set_public(node, **kwargs):
prompt = node.callback('before_make_public')
return {
'prompts': prompt
}
@must_be_valid_project
@must_have_permission(ADMIN)
def project_set_privacy(auth, node, **kwargs):
permissions = kwargs.get('permissions')
if permissions is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.set_privacy(permissions, auth)
except NodeStateError as e:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short="Can't change privacy",
message_long=e.message
))
return {
'status': 'success',
'permissions': permissions,
}
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def watch_post(auth, node, **kwargs):
user = auth.user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.watch(watch_config)
except ValueError: # Node is already being watched
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': node.watches.count()
}
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def unwatch_post(auth, node, **kwargs):
user = auth.user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.unwatch(watch_config)
except ValueError: # Node isn't being watched
raise HTTPError(http.BAD_REQUEST)
return {
'status': 'success',
'watchCount': node.watches.count()
}
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def togglewatch_post(auth, node, **kwargs):
'''View for toggling watch mode for a node.'''
# TODO: refactor this, watch_post, unwatch_post (@mambocab)
user = auth.user
watch_config = WatchConfig(
node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False)
)
try:
if user.is_watching(node):
user.unwatch(watch_config)
else:
user.watch(watch_config)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': node.watches.count(),
'watched': user.is_watching(node)
}
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def update_node(auth, node, **kwargs):
# in node.update() method there is a key list node.WRITABLE_WHITELIST only allow user to modify
# category, title, and description which can be edited by write permission contributor
data = r_strip_html(request.get_json())
try:
updated_field_names = node.update(data, auth=auth)
except NodeUpdateError as e:
raise HTTPError(400, data=dict(
message_short="Failed to update attribute '{0}'".format(e.key),
message_long=e.reason
))
# Need to cast tags to a string to make them JSON-serialiable
updated_fields_dict = {
key: getattr(node, key) if key != 'tags' else [str(tag) for tag in node.tags]
for key in updated_field_names
if key != 'logs' and key != 'date_modified'
}
node.save()
return {'updated_fields': updated_fields_dict}
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def component_remove(auth, node, **kwargs):
"""Remove component, and recursively remove its children. If node has a
parent, add log and redirect to parent; else redirect to user dashboard.
"""
try:
node.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Error',
'message_long': 'Could not delete component: ' + e.message
},
)
node.save()
message = '{} has been successfully deleted.'.format(
node.project_or_component.capitalize()
)
status.push_status_message(message, kind='success', trust=False)
parent = node.parent_node
if parent and parent.can_view(auth):
redirect_url = node.node__parent[0].url
else:
redirect_url = '/dashboard/'
return {
'url': redirect_url,
}
@must_be_valid_project
@must_have_permission(ADMIN)
def remove_private_link(*args, **kwargs):
link_id = request.json['private_link_id']
try:
link = PrivateLink.load(link_id)
link.is_deleted = True
link.save()
except ModularOdmException:
raise HTTPError(http.NOT_FOUND)
# TODO: Split into separate functions
def _render_addon(node):
widgets = {}
configs = {}
js = []
css = []
for addon in node.get_addons():
configs[addon.config.short_name] = addon.config.to_json()
js.extend(addon.config.include_js.get('widget', []))
css.extend(addon.config.include_css.get('widget', []))
js.extend(addon.config.include_js.get('files', []))
css.extend(addon.config.include_css.get('files', []))
return widgets, configs, js, css
def _should_show_wiki_widget(node, user):
has_wiki = bool(node.get_addon('wiki'))
wiki_page = node.get_wiki_page('home', None)
if not node.has_permission(user, 'write'):
return has_wiki and wiki_page and wiki_page.html(node)
else:
return has_wiki
def _view_project(node, auth, primary=False):
"""Build a JSON object containing everything needed to render
project.view.mako.
"""
user = auth.user
parent = node.find_readable_antecedent(auth)
if user:
bookmark_collection = find_bookmark_collection(user)
bookmark_collection_id = bookmark_collection._id
in_bookmark_collection = bookmark_collection.pointing_at(node._primary_key) is not None
else:
in_bookmark_collection = False
bookmark_collection_id = ''
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
anonymous = has_anonymous_link(node, auth)
widgets, configs, js, css = _render_addon(node)
redirect_url = node.url + '?view_only=None'
disapproval_link = ''
if (node.is_pending_registration and node.has_permission(user, ADMIN)):
disapproval_link = node.root.registration_approval.stashed_urls.get(user._id, {}).get('reject', '')
if (node.is_pending_embargo and node.has_permission(user, ADMIN)):
disapproval_link = node.root.embargo.stashed_urls.get(user._id, {}).get('reject', '')
# Before page load callback; skip if not primary call
if primary:
for addon in node.get_addons():
messages = addon.before_page_load(node, user) or []
for message in messages:
status.push_status_message(message, kind='info', dismissible=False, trust=True)
data = {
'node': {
'disapproval_link': disapproval_link,
'id': node._primary_key,
'title': node.title,
'category': node.category_display,
'category_short': node.category,
'node_type': node.project_or_component,
'description': node.description or '',
'license': serialize_node_license_record(node.license),
'url': node.url,
'api_url': node.api_url,
'absolute_url': node.absolute_url,
'redirect_url': redirect_url,
'display_absolute_url': node.display_absolute_url,
'update_url': node.api_url_for('update_node'),
'in_dashboard': in_bookmark_collection,
'is_public': node.is_public,
'is_archiving': node.archiving,
'date_created': iso8601format(node.date_created),
'date_modified': iso8601format(node.logs[-1].date) if node.logs else '',
'tags': [tag._primary_key for tag in node.tags],
'children': bool(node.nodes_active),
'is_registration': node.is_registration,
'is_pending_registration': node.is_pending_registration,
'is_retracted': node.is_retracted,
'is_pending_retraction': node.is_pending_retraction,
'retracted_justification': getattr(node.retraction, 'justification', None),
'embargo_end_date': node.embargo_end_date.strftime('%A, %b. %d, %Y') if node.embargo_end_date else False,
'is_pending_embargo': node.is_pending_embargo,
'is_embargoed': node.is_embargoed,
'is_pending_embargo_termination': node.is_embargoed and (
node.embargo_termination_approval and
node.embargo_termination_approval.is_pending_approval
),
'registered_from_url': node.registered_from.url if node.is_registration else '',
'registered_date': iso8601format(node.registered_date) if node.is_registration else '',
'root_id': node.root._id if node.root else None,
'registered_meta': node.registered_meta,
'registered_schemas': serialize_meta_schemas(node.registered_schema),
'registration_count': node.registrations_all.count(),
'is_fork': node.is_fork,
'forked_from_id': node.forked_from._primary_key if node.is_fork else '',
'forked_from_display_absolute_url': node.forked_from.display_absolute_url if node.is_fork else '',
'forked_date': iso8601format(node.forked_date) if node.is_fork else '',
'fork_count': node.forks.count(),
'templated_count': node.templated_list.count(),
'watched_count': node.watches.count(),
'private_links': [x.to_json() for x in node.private_links_active],
'link': view_only_link,
'anonymous': anonymous,
'points': len(node.get_points(deleted=False, folders=False)),
'comment_level': node.comment_level,
'has_comments': bool(Comment.find(Q('node', 'eq', node))),
'has_children': bool(Comment.find(Q('node', 'eq', node))),
'identifiers': {
'doi': node.get_identifier_value('doi'),
'ark': node.get_identifier_value('ark'),
},
'institutions': get_affiliated_institutions(node) if node else [],
'alternative_citations': [citation.to_json() for citation in node.alternative_citations],
'has_draft_registrations': node.has_active_draft_registrations,
'contributors': [contributor._id for contributor in node.contributors],
'is_public_files_collection': node.is_public_files_collection
},
'parent_node': {
'exists': parent is not None,
'id': parent._primary_key if parent else '',
'title': parent.title if parent else '',
'category': parent.category_display if parent else '',
'url': parent.url if parent else '',
'api_url': parent.api_url if parent else '',
'absolute_url': parent.absolute_url if parent else '',
'registrations_url': parent.web_url_for('node_registrations') if parent else '',
'is_public': parent.is_public if parent else '',
'is_contributor': parent.is_contributor(user) if parent else '',
'can_view': parent.can_view(auth) if parent else False
},
'user': {
'is_contributor': node.is_contributor(user),
'is_admin': node.has_permission(user, ADMIN),
'is_admin_parent': parent.is_admin_parent(user) if parent else False,
'can_edit': (node.can_edit(auth)
and not node.is_registration),
'has_read_permissions': node.has_permission(user, READ),
'permissions': node.get_permissions(user) if user else [],
'is_watching': user.is_watching(node) if user else False,
'id': user._id if user else None,
'username': user.username if user else None,
'fullname': user.fullname if user else '',
'can_comment': node.can_comment(auth),
'show_wiki_widget': _should_show_wiki_widget(node, user),
'dashboard_id': bookmark_collection_id,
'institutions': get_affiliated_institutions(user) if user else [],
},
'badges': _get_badge(user),
# TODO: Namespace with nested dicts
'addons_enabled': node.get_addon_names(),
'addons': configs,
'addon_widgets': widgets,
'addon_widget_js': js,
'addon_widget_css': css,
'node_categories': [
{'value': key, 'display_name': value}
for key, value in settings.NODE_CATEGORY_MAP.iteritems()
]
}
return data
def get_affiliated_institutions(obj):
ret = []
for institution in obj.affiliated_institutions:
ret.append({
'name': institution.name,
'logo_path': institution.logo_path,
'id': institution._id,
})
return ret
def _get_badge(user):
if user:
badger = user.get_addon('badges')
if badger:
return {
'can_award': badger.can_award,
'badges': badger.get_badges_json()
}
return {}
def _get_children(node, auth, indent=0):
children = []
for child in node.nodes_primary:
if not child.is_deleted and child.has_permission(auth.user, ADMIN):
children.append({
'id': child._primary_key,
'title': child.title,
'indent': indent,
'is_public': child.is_public,
'parent_id': child.parent_id,
})
children.extend(_get_children(child, auth, indent + 1))
return children
@must_be_valid_project
@must_have_permission(ADMIN)
def private_link_table(node, **kwargs):
data = {
'node': {
'absolute_url': node.absolute_url,
'private_links': [x.to_json() for x in node.private_links_active],
}
}
return data
@collect_auth
@must_be_valid_project
@must_have_permission(ADMIN)
def get_editable_children(auth, node, **kwargs):
children = _get_children(node, auth)
return {
'node': {'id': node._id, 'title': node.title, 'is_public': node.is_public},
'children': children,
}
@must_be_valid_project
def get_recent_logs(node, **kwargs):
logs = list(reversed(node.logs._to_primary_keys()))[:3]
return {'logs': logs}
def _get_summary(node, auth, primary=True, link_id=None, show_path=False):
# TODO(sloria): Refactor this or remove (lots of duplication with _view_project)
summary = {
'id': link_id if link_id else node._id,
'primary': primary,
'is_registration': node.is_registration,
'is_fork': node.is_fork,
'is_pending_registration': node.is_pending_registration,
'is_retracted': node.is_retracted,
'is_pending_retraction': node.is_pending_retraction,
'embargo_end_date': node.embargo_end_date.strftime('%A, %b. %d, %Y') if node.embargo_end_date else False,
'is_pending_embargo': node.is_pending_embargo,
'is_embargoed': node.is_embargoed,
'archiving': node.archiving,
}
if node.can_view(auth):
summary.update({
'can_view': True,
'can_edit': node.can_edit(auth),
'primary_id': node._id,
'url': node.url,
'primary': primary,
'api_url': node.api_url,
'title': node.title,
'category': node.category,
'node_type': node.project_or_component,
'is_fork': node.is_fork,
'is_registration': node.is_registration,
'anonymous': has_anonymous_link(node, auth),
'registered_date': node.registered_date.strftime('%Y-%m-%d %H:%M UTC')
if node.is_registration
else None,
'forked_date': node.forked_date.strftime('%Y-%m-%d %H:%M UTC')
if node.is_fork
else None,
'ua_count': None,
'ua': None,
'non_ua': None,
'addons_enabled': node.get_addon_names(),
'is_public': node.is_public,
'parent_title': node.parent_node.title if node.parent_node else None,
'parent_is_public': node.parent_node.is_public if node.parent_node else False,
'show_path': show_path,
'nlogs': len(node.logs),
})
else:
summary['can_view'] = False
# TODO: Make output format consistent with _view_project
return {
'summary': summary,
}
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_summary(auth, node, **kwargs):
primary = kwargs.get('primary')
link_id = kwargs.get('link_id')
show_path = kwargs.get('show_path', False)
return _get_summary(
node, auth, primary=primary, link_id=link_id, show_path=show_path
)
@must_be_contributor_or_public
def get_readable_descendants(auth, node, **kwargs):
descendants = []
for child in node.nodes:
if request.args.get('permissions'):
perm = request.args['permissions'].lower().strip()
if perm not in child.get_permissions(auth.user):
continue
if child.is_deleted:
continue
elif child.can_view(auth):
descendants.append(child)
elif not child.primary:
if node.has_permission(auth.user, 'write'):
descendants.append(child)
continue
else:
for descendant in child.find_readable_descendants(auth):
descendants.append(descendant)
return _render_nodes(descendants, auth)
def node_child_tree(user, node_ids):
""" Format data to test for node privacy settings for use in treebeard.
"""
items = []
for node_id in node_ids:
node = Node.load(node_id)
assert node, '{} is not a valid Node.'.format(node_id)
can_read = node.has_permission(user, READ)
can_read_children = node.has_permission_on_children(user, 'read')
if not can_read and not can_read_children:
continue
contributors = []
for contributor in node.contributors:
contributors.append({
'id': contributor._id,
'is_admin': node.has_permission(contributor, ADMIN),
'is_confirmed': contributor.is_confirmed
})
affiliated_institutions = [{
'id': affiliated_institution.pk,
'name': affiliated_institution.name
} for affiliated_institution in node.affiliated_institutions]
children = []
# List project/node if user has at least 'read' permissions (contributor or admin viewer) or if
# user is contributor on a component of the project/node
children.extend(node_child_tree(
user,
[
n._id
for n in node.nodes
if n.primary and
not n.is_deleted
]
))
item = {
'node': {
'id': node_id,
'url': node.url if can_read else '',
'title': node.title if can_read else 'Private Project',
'is_public': node.is_public,
'contributors': contributors,
'visible_contributors': node.visible_contributor_ids,
'is_admin': node.has_permission(user, ADMIN),
'affiliated_institutions': affiliated_institutions
},
'user_id': user._id,
'children': children,
'kind': 'folder' if not node.node__parent or not node.parent_node.has_permission(user, 'read') else 'node',
'nodeType': node.project_or_component,
'category': node.category,
'permissions': {
'view': can_read,
'is_admin': node.has_permission(user, 'read')
}
}
items.append(item)
return items
@must_be_logged_in
@must_be_valid_project
def get_node_tree(auth, **kwargs):
node = kwargs.get('node') or kwargs['project']
tree = node_child_tree(auth.user, [node._id])
return tree
@must_be_contributor_or_public
def get_forks(auth, node, **kwargs):
fork_list = node.forks.sort('-forked_date')
return _render_nodes(nodes=fork_list, auth=auth)
@must_be_contributor_or_public
def get_registrations(auth, node, **kwargs):
# get all undeleted registrations, including archiving
sorted_registrations = node.registrations_all.sort('-registered_date')
undeleted_registrations = [n for n in sorted_registrations if not n.is_deleted]
return _render_nodes(undeleted_registrations, auth)
@must_be_valid_project
@must_have_permission(ADMIN)
def project_generate_private_link_post(auth, node, **kwargs):
""" creata a new private link object and add it to the node and its selected children"""
node_ids = request.json.get('node_ids', [])
name = request.json.get('name', '')
anonymous = request.json.get('anonymous', False)
if node._id not in node_ids:
node_ids.insert(0, node._id)
nodes = [Node.load(node_id) for node_id in node_ids]
try:
new_link = new_private_link(
name=name, user=auth.user, nodes=nodes, anonymous=anonymous
)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
return new_link
@must_be_valid_project
@must_have_permission(ADMIN)
def project_private_link_edit(auth, **kwargs):
name = request.json.get('value', '')
try:
validate_title(name)
except ValidationValueError as e:
message = 'Invalid link name.' if e.message == 'Invalid title.' else e.message
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=message)
)
private_link_id = request.json.get('pk', '')
private_link = PrivateLink.load(private_link_id)
if private_link:
new_name = strip_html(name)
private_link.name = new_name
private_link.save()
return new_name
else:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long='View-only link not found.')
)
def _serialize_node_search(node):
"""Serialize a node for use in pointer search.
:param Node node: Node to serialize
:return: Dictionary of node data
"""
data = {
'id': node._id,
'title': node.title,
'etal': len(node.visible_contributors) > 1,
'isRegistration': node.is_registration
}
if node.is_registration:
data['title'] += ' (registration)'
data['dateRegistered'] = node.registered_date.isoformat()
else:
data['dateCreated'] = node.date_created.isoformat()
data['dateModified'] = node.date_modified.isoformat()
first_author = node.visible_contributors[0]
data['firstAuthor'] = first_author.family_name or first_author.given_name or first_author.full_name
return data
@must_be_logged_in
def search_node(auth, **kwargs):
"""
"""
# Get arguments
node = Node.load(request.json.get('nodeId'))
include_public = request.json.get('includePublic')
size = float(request.json.get('size', '5').strip())
page = request.json.get('page', 0)
query = request.json.get('query', '').strip()
start = (page * size)
if not query:
return {'nodes': []}
# Build ODM query
title_query = Q('title', 'icontains', query)
not_deleted_query = Q('is_deleted', 'eq', False)
visibility_query = Q('contributors', 'eq', auth.user)
no_folders_query = Q('is_collection', 'eq', False)
if include_public:
visibility_query = visibility_query | Q('is_public', 'eq', True)
odm_query = title_query & not_deleted_query & visibility_query & no_folders_query
# Exclude current node from query if provided
if node:
nin = [node._id] + node.node_ids
odm_query = (
odm_query &
Q('_id', 'nin', nin)
)
nodes = Node.find(odm_query)
count = nodes.count()
pages = math.ceil(count / size)
validate_page_num(page, pages)
return {
'nodes': [
_serialize_node_search(each)
for each in islice(nodes, start, start + size)
if each.contributors
],
'total': count,
'pages': pages,
'page': page
}
def _add_pointers(node, pointers, auth):
"""
:param Node node: Node to which pointers will be added
:param list pointers: Nodes to add as pointers
"""
added = False
for pointer in pointers:
node.add_pointer(pointer, auth, save=False)
added = True
if added:
node.save()
@collect_auth
def move_pointers(auth):
"""Move pointer from one node to another node.
"""
from_node_id = request.json.get('fromNodeId')
to_node_id = request.json.get('toNodeId')
pointers_to_move = request.json.get('pointerIds')
if from_node_id is None or to_node_id is None or pointers_to_move is None:
raise HTTPError(http.BAD_REQUEST)
from_node = Node.load(from_node_id)
to_node = Node.load(to_node_id)
if to_node is None or from_node is None:
raise HTTPError(http.BAD_REQUEST)
for pointer_to_move in pointers_to_move:
pointer_id = from_node.pointing_at(pointer_to_move)
pointer_node = Node.load(pointer_to_move)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
from_node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
from_node.save()
try:
_add_pointers(to_node, [pointer_node], auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}, 200, None
@collect_auth
def add_pointer(auth):
"""Add a single pointer to a node using only JSON parameters
"""
to_node_id = request.json.get('toNodeID')
pointer_to_move = request.json.get('pointerID')
if not (to_node_id and pointer_to_move):
raise HTTPError(http.BAD_REQUEST)
pointer = Node.load(pointer_to_move)
to_node = Node.load(to_node_id)
try:
_add_pointers(to_node, [pointer], auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
@must_have_permission(WRITE)
@must_not_be_registration
def add_pointers(auth, node, **kwargs):
"""Add pointers to a node.
"""
node_ids = request.json.get('nodeIds')
if not node_ids:
raise HTTPError(http.BAD_REQUEST)
nodes = [
Node.load(node_id)
for node_id in node_ids
]
try:
_add_pointers(node, nodes, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer(auth, node, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
# TODO: since these a delete request, shouldn't use request body. put pointer
# id in the URL instead
pointer_id = request.json.get('pointerId')
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_be_valid_project # injects project
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer_from_folder(auth, node, pointer_id, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer_id = node.pointing_at(pointer_id)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_have_permission(WRITE)
@must_not_be_registration
def fork_pointer(auth, node, **kwargs):
"""Fork a pointer. Raises BAD_REQUEST if pointer not provided, not found,
or not present in `nodes`.
"""
pointer_id = request.json.get('pointerId')
pointer = Pointer.load(pointer_id)
if pointer is None:
# TODO: Change this to 404?
raise HTTPError(http.BAD_REQUEST)
try:
node.fork_pointer(pointer, auth=auth, save=True)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
def abbrev_authors(node):
lead_author = node.visible_contributors[0]
ret = lead_author.family_name or lead_author.given_name or lead_author.fullname
if len(node.visible_contributor_ids) > 1:
ret += ' et al.'
return ret
def serialize_pointer(pointer, auth):
node = get_pointer_parent(pointer)
if node.can_view(auth):
return {
'id': node._id,
'url': node.url,
'title': node.title,
'authorShort': abbrev_authors(node),
}
return {
'url': None,
'title': 'Private Component',
'authorShort': 'Private Author(s)',
}
@must_be_contributor_or_public
def get_pointed(auth, node, **kwargs):
"""View that returns the pointers for a project."""
# exclude folders
return {'pointed': [
serialize_pointer(each, auth)
for each in node.pointed
if not get_pointer_parent(each).is_collection
]}
| apache-2.0 | 1,658,704,265,551,296,800 | 31.497063 | 130 | 0.605657 | false |
kogotko/carburetor | openstack_dashboard/dashboards/project/firewalls/views.py | 1 | 16454 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.firewalls \
import forms as fw_forms
from openstack_dashboard.dashboards.project.firewalls \
import tabs as fw_tabs
from openstack_dashboard.dashboards.project.firewalls \
import workflows as fw_workflows
AddRouterToFirewall = fw_forms.AddRouterToFirewall
InsertRuleToPolicy = fw_forms.InsertRuleToPolicy
RemoveRouterFromFirewall = fw_forms.RemoveRouterFromFirewall
RemoveRuleFromPolicy = fw_forms.RemoveRuleFromPolicy
UpdateFirewall = fw_forms.UpdateFirewall
UpdatePolicy = fw_forms.UpdatePolicy
UpdateRule = fw_forms.UpdateRule
FirewallDetailsTabs = fw_tabs.FirewallDetailsTabs
FirewallTabs = fw_tabs.FirewallTabs
PolicyDetailsTabs = fw_tabs.PolicyDetailsTabs
RuleDetailsTabs = fw_tabs.RuleDetailsTabs
AddFirewall = fw_workflows.AddFirewall
AddPolicy = fw_workflows.AddPolicy
AddRule = fw_workflows.AddRule
class IndexView(tabs.TabbedTableView):
tab_group_class = FirewallTabs
template_name = 'project/firewalls/details_tabs.html'
page_title = _("Firewalls")
class AddRuleView(workflows.WorkflowView):
workflow_class = AddRule
template_name = "project/firewalls/addrule.html"
page_title = _("Add New Rule")
class AddPolicyView(workflows.WorkflowView):
workflow_class = AddPolicy
template_name = "project/firewalls/addpolicy.html"
page_title = _("Add New Policy")
class AddFirewallView(workflows.WorkflowView):
workflow_class = AddFirewall
template_name = "project/firewalls/addfirewall.html"
page_title = _("Add New Firewall")
def get_workflow(self):
if api.neutron.is_extension_supported(self.request,
'fwaasrouterinsertion'):
AddFirewall.register(fw_workflows.SelectRoutersStep)
workflow = super(AddFirewallView, self).get_workflow()
return workflow
class RuleDetailsView(tabs.TabView):
tab_group_class = (RuleDetailsTabs)
template_name = 'horizon/common/_detail.html'
page_title = "{{ rule.name|default:rule.id }}"
failure_url = reverse_lazy('horizon:project:firewalls:index')
def get_context_data(self, **kwargs):
context = super(RuleDetailsView, self).get_context_data(**kwargs)
rule = self.get_data()
table = fw_tabs.RulesTable(self.request)
breadcrumb = [
(_("Rules"), reverse_lazy('horizon:project:firewalls:rules'))]
context["custom_breadcrumb"] = breadcrumb
context["rule"] = rule
context["url"] = self.failure_url
context["actions"] = table.render_row_actions(rule)
return context
@memoized.memoized_method
def get_data(self):
try:
rule_id = self.kwargs['rule_id']
rule = api.fwaas.rule_get(self.request, rule_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve rule details.'),
redirect=self.failure_url)
return rule
def get_tabs(self, request, *args, **kwargs):
rule = self.get_data()
return self.tab_group_class(request, rule=rule, **kwargs)
class PolicyDetailsView(tabs.TabView):
tab_group_class = (PolicyDetailsTabs)
template_name = 'horizon/common/_detail.html'
page_title = "{{ policy.name|default:policy.id }}"
failure_url = reverse_lazy('horizon:project:firewalls:index')
def get_context_data(self, **kwargs):
context = super(PolicyDetailsView, self).get_context_data(**kwargs)
policy = self.get_data()
table = fw_tabs.PoliciesTable(self.request)
breadcrumb = [
(_("Policies"),
reverse_lazy('horizon:project:firewalls:policies'))]
context["custom_breadcrumb"] = breadcrumb
context["policy"] = policy
context["url"] = self.failure_url
context["actions"] = table.render_row_actions(policy)
return context
@memoized.memoized_method
def get_data(self):
try:
policy_id = self.kwargs['policy_id']
policy = api.fwaas.policy_get(self.request, policy_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve policy details.'),
redirect=self.failure_url)
return policy
def get_tabs(self, request, *args, **kwargs):
policy = self.get_data()
return self.tab_group_class(request, policy=policy, **kwargs)
class FirewallDetailsView(tabs.TabView):
tab_group_class = (FirewallDetailsTabs)
template_name = 'horizon/common/_detail.html'
page_title = "{{ firewall.name|default:firewall.id }}"
failure_url = reverse_lazy('horizon:project:firewalls:index')
def get_context_data(self, **kwargs):
context = super(FirewallDetailsView, self).get_context_data(**kwargs)
firewall = self.get_data()
routers = self.get_routers_data(firewall)
table = fw_tabs.FirewallsTable(self.request)
context["firewall"] = firewall
context["routers"] = routers
context["url"] = self.failure_url
context["actions"] = table.render_row_actions(firewall)
return context
@memoized.memoized_method
def get_data(self):
try:
firewall_id = self.kwargs['firewall_id']
firewall = api.fwaas.firewall_get(self.request, firewall_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve firewall details.'),
redirect=self.failure_url)
return firewall
@memoized.memoized_method
def get_routers_data(self, firewall):
routers = []
try:
if api.neutron.is_extension_supported(self.request,
'fwaasrouterinsertion'):
tenant_id = self.request.user.tenant_id
tenant_routers = api.neutron.router_list(self.request,
tenant_id=tenant_id)
router_ids = firewall.get_dict()['router_ids']
routers = [r for r in tenant_routers
if r['id'] in router_ids]
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve list of routers.'), )
return routers
def get_tabs(self, request, *args, **kwargs):
firewall = self.get_data()
return self.tab_group_class(request, firewall=firewall, **kwargs)
class UpdateRuleView(forms.ModalFormView):
form_class = UpdateRule
form_id = "update_rule_form"
template_name = "project/firewalls/updaterule.html"
context_object_name = 'rule'
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:updaterule"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Edit Rule {{ name }}")
def get_context_data(self, **kwargs):
context = super(UpdateRuleView, self).get_context_data(**kwargs)
context['rule_id'] = self.kwargs['rule_id']
args = (self.kwargs['rule_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
rule_id = self.kwargs['rule_id']
try:
rule = api.fwaas.rule_get(self.request, rule_id)
return rule
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve rule details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
rule = self._get_object()
initial = rule.get_dict()
protocol = initial['protocol']
initial['protocol'] = protocol.upper() if protocol else 'ANY'
initial['action'] = initial['action'].upper()
return initial
class UpdatePolicyView(forms.ModalFormView):
form_class = UpdatePolicy
form_id = "update_policy_form"
template_name = "project/firewalls/updatepolicy.html"
context_object_name = 'policy'
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:updatepolicy"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Edit Policy {{ name }}")
def get_context_data(self, **kwargs):
context = super(UpdatePolicyView, self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
args = (self.kwargs['policy_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
policy_id = self.kwargs['policy_id']
try:
policy = api.fwaas.policy_get(self.request, policy_id)
return policy
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
return initial
class UpdateFirewallView(forms.ModalFormView):
form_class = UpdateFirewall
form_id = "update_firewall_form"
template_name = "project/firewalls/updatefirewall.html"
context_object_name = 'firewall'
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:updatefirewall"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Edit Firewall {{ name }}")
def get_context_data(self, **kwargs):
context = super(UpdateFirewallView, self).get_context_data(**kwargs)
context["firewall_id"] = self.kwargs['firewall_id']
args = (self.kwargs['firewall_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
firewall_id = self.kwargs['firewall_id']
try:
firewall = api.fwaas.firewall_get(self.request,
firewall_id)
return firewall
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve firewall details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
firewall = self._get_object()
initial = firewall.get_dict()
return initial
class InsertRuleToPolicyView(forms.ModalFormView):
form_class = InsertRuleToPolicy
form_id = "update_policy_form"
template_name = "project/firewalls/insert_rule_to_policy.html"
context_object_name = 'policy'
submit_url = "horizon:project:firewalls:insertrule"
submit_label = _("Save Changes")
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Insert Rule to Policy")
def get_context_data(self, **kwargs):
context = super(InsertRuleToPolicyView,
self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
args = (self.kwargs['policy_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
policy_id = self.kwargs['policy_id']
try:
policy = api.fwaas.policy_get(self.request, policy_id)
return policy
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
initial['policy_id'] = initial['id']
return initial
class RemoveRuleFromPolicyView(forms.ModalFormView):
form_class = RemoveRuleFromPolicy
form_id = "update_policy_form"
template_name = "project/firewalls/remove_rule_from_policy.html"
context_object_name = 'policy'
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:removerule"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Remove Rule from Policy")
def get_context_data(self, **kwargs):
context = super(RemoveRuleFromPolicyView,
self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
args = (self.kwargs['policy_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
policy_id = self.kwargs['policy_id']
try:
policy = api.fwaas.policy_get(self.request, policy_id)
return policy
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
initial['policy_id'] = initial['id']
return initial
class RouterCommonView(forms.ModalFormView):
form_id = "update_firewall_form"
context_object_name = 'firewall'
submit_label = _("Save Changes")
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(RouterCommonView,
self).get_context_data(**kwargs)
context["firewall_id"] = self.kwargs['firewall_id']
args = (self.kwargs['firewall_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
firewall_id = self.kwargs['firewall_id']
try:
firewall = api.fwaas.firewall_get(self.request, firewall_id)
return firewall
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve firewall details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
firewall = self._get_object()
initial = firewall.get_dict()
return initial
class AddRouterToFirewallView(RouterCommonView):
form_class = AddRouterToFirewall
template_name = "project/firewalls/add_router_to_firewall.html"
submit_url = "horizon:project:firewalls:addrouter"
page_title = _("Add Router to Firewall")
class RemoveRouterFromFirewallView(RouterCommonView):
form_class = RemoveRouterFromFirewall
template_name = "project/firewalls/remove_router_from_firewall.html"
submit_url = "horizon:project:firewalls:removerouter"
page_title = _("Remove Router from Firewall")
| apache-2.0 | -1,994,469,539,088,781,300 | 36.480638 | 78 | 0.633402 | false |
tensorflow/examples | tensorflow_examples/models/densenet/train.py | 1 | 6447 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Densenet Training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
import tensorflow as tf
from tensorflow_examples.models.densenet import densenet
from tensorflow_examples.models.densenet import utils
class Train(object):
"""Train class.
Args:
epochs: Number of epochs
enable_function: If True, wraps the train_step and test_step in tf.function
model: Densenet model.
"""
def __init__(self, epochs, enable_function, model):
self.epochs = epochs
self.enable_function = enable_function
self.autotune = tf.data.experimental.AUTOTUNE
self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True)
self.optimizer = tf.keras.optimizers.SGD(learning_rate=0.1,
momentum=0.9, nesterov=True)
self.train_loss_metric = tf.keras.metrics.Mean(name='train_loss')
self.train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy(
name='train_accuracy')
self.test_loss_metric = tf.keras.metrics.Mean(name='test_loss')
self.test_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy(
name='test_accuracy')
self.model = model
def decay(self, epoch):
if epoch < 150:
return 0.1
if epoch >= 150 and epoch < 225:
return 0.01
if epoch >= 225:
return 0.001
def keras_fit(self, train_dataset, test_dataset):
self.model.compile(
optimizer=self.optimizer, loss=self.loss_object, metrics=['accuracy'])
history = self.model.fit(
train_dataset, epochs=self.epochs, validation_data=test_dataset,
verbose=2, callbacks=[tf.keras.callbacks.LearningRateScheduler(
self.decay)])
return (history.history['loss'][-1],
history.history['accuracy'][-1],
history.history['val_loss'][-1],
history.history['val_accuracy'][-1])
def train_step(self, image, label):
"""One train step.
Args:
image: Batch of images.
label: corresponding label for the batch of images.
"""
with tf.GradientTape() as tape:
predictions = self.model(image, training=True)
loss = self.loss_object(label, predictions)
loss += sum(self.model.losses)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(
zip(gradients, self.model.trainable_variables))
self.train_loss_metric(loss)
self.train_acc_metric(label, predictions)
def test_step(self, image, label):
"""One test step.
Args:
image: Batch of images.
label: corresponding label for the batch of images.
"""
predictions = self.model(image, training=False)
loss = self.loss_object(label, predictions)
self.test_loss_metric(loss)
self.test_acc_metric(label, predictions)
def custom_loop(self, train_dataset, test_dataset):
"""Custom training and testing loop.
Args:
train_dataset: Training dataset
test_dataset: Testing dataset
Returns:
train_loss, train_accuracy, test_loss, test_accuracy
"""
if self.enable_function:
self.train_step = tf.function(self.train_step)
self.test_step = tf.function(self.test_step)
for epoch in range(self.epochs):
self.optimizer.learning_rate = self.decay(epoch)
for image, label in train_dataset:
self.train_step(image, label)
for test_image, test_label in test_dataset:
self.test_step(test_image, test_label)
template = ('Epoch: {}, Train Loss: {}, Train Accuracy: {}, '
'Test Loss: {}, Test Accuracy: {}')
print(
template.format(epoch, self.train_loss_metric.result(),
self.train_acc_metric.result(),
self.test_loss_metric.result(),
self.test_acc_metric.result()))
if epoch != self.epochs - 1:
self.train_loss_metric.reset_states()
self.train_acc_metric.reset_states()
self.test_loss_metric.reset_states()
self.test_acc_metric.reset_states()
return (self.train_loss_metric.result().numpy(),
self.train_acc_metric.result().numpy(),
self.test_loss_metric.result().numpy(),
self.test_acc_metric.result().numpy())
def run_main(argv):
"""Passes the flags to main.
Args:
argv: argv
"""
del argv
kwargs = utils.flags_dict()
main(**kwargs)
def main(epochs,
enable_function,
buffer_size,
batch_size,
mode,
growth_rate,
output_classes,
depth_of_model=None,
num_of_blocks=None,
num_layers_in_each_block=None,
data_format='channels_last',
bottleneck=True,
compression=0.5,
weight_decay=1e-4,
dropout_rate=0.,
pool_initial=False,
include_top=True,
train_mode='custom_loop',
data_dir=None):
model = densenet.DenseNet(mode, growth_rate, output_classes, depth_of_model,
num_of_blocks, num_layers_in_each_block,
data_format, bottleneck, compression, weight_decay,
dropout_rate, pool_initial, include_top)
train_obj = Train(epochs, enable_function, model)
train_dataset, test_dataset, _ = utils.create_dataset(
buffer_size, batch_size, data_format, data_dir)
print('Training...')
if train_mode == 'custom_loop':
return train_obj.custom_loop(train_dataset, test_dataset)
elif train_mode == 'keras_fit':
return train_obj.keras_fit(train_dataset, test_dataset)
if __name__ == '__main__':
utils.define_densenet_flags()
app.run(run_main)
| apache-2.0 | 9,165,127,955,172,058,000 | 31.725888 | 80 | 0.633473 | false |
taxpon/ndb_prop_gen | tests/test_arg.py | 1 | 1669 | import unittest
from ndb_prop_gen.arg import Arg
from ndb_prop_gen.arg import find_prop_type
sample_property = """\
@property
def sample_float(self):
return self._sample_float
"""
class TestArg(unittest.TestCase):
def setUp(self):
self.sample1 = Arg("sample_float", "Float", 0.0)
self.sample2 = Arg("sample_string", "string", "")
pass
def tearDown(self):
pass
def test_find_prop_type(self):
# non fuzzy
self.assertEqual(find_prop_type("float"), "ndb.Float")
# fuzzy
self.assertEqual(find_prop_type("int"), "ndb.Integer")
self.assertEqual(find_prop_type("Int"), "ndb.Integer")
self.assertEqual(find_prop_type("integer"), "ndb.Integer")
self.assertEqual(find_prop_type("integer"), "ndb.Integer")
# non ndb
self.assertEqual(find_prop_type("Original"), "Original")
def test_class_arg(self):
self.assertEqual(self.sample1.class_arg, "sample_float=0.0")
self.assertEqual(self.sample2.class_arg, "sample_string=\"\"")
def test_class_init(self):
self.assertEqual(self.sample1.class_init, "self._sample_float = sample_float")
self.assertEqual(self.sample2.class_init, "self._sample_string = sample_string")
def test_class_property(self):
self.assertEqual(self.sample1.class_property, sample_property)
def test_model_init(self):
self.assertEqual(self.sample1.model_init, "sample_float = ndb.FloatProperty(default=0.0)")
self.assertEqual(self.sample2.model_init, "sample_string = ndb.StringProperty(default=\"\")")
if __name__ == "__main__":
unittest.main()
| mit | -7,284,871,098,851,032,000 | 31.096154 | 101 | 0.6429 | false |
maas/maas | src/maasserver/clusterrpc/tests/test_utils.py | 1 | 10192 | # Copyright 2014-2016 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for :py:mod:`maasserver.clusterrpc.utils`."""
import random
from unittest.mock import Mock, sentinel
from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
from fixtures import FakeLogger
from testtools.matchers import Equals
from twisted.python.failure import Failure
from maasserver.clusterrpc import utils
from maasserver.clusterrpc.utils import call_racks_synchronously
from maasserver.node_action import RPC_EXCEPTIONS
from maasserver.testing.factory import factory
from maasserver.testing.testcase import MAASServerTestCase
from maasserver.utils import asynchronous
from maastesting.matchers import (
DocTestMatches,
MockCalledOnceWith,
MockNotCalled,
)
from provisioningserver.rpc.exceptions import NoConnectionsAvailable
class MockFailure(Failure):
"""Fake twisted Failure object.
Purposely doesn't call super().__init__().
"""
def __init__(self):
self.type = type(self)
self.frames = []
self.value = "Mock failure"
class TestCallClusters(MAASServerTestCase):
"""Tests for `utils.call_clusters`."""
def test_gets_clients(self):
rack = factory.make_RackController()
getClientFor = self.patch(utils, "getClientFor")
getClientFor.return_value = lambda: None
async_gather = self.patch(asynchronous, "gatherCallResults")
async_gather.return_value = []
# call_clusters returns with nothing because we patched out
# asynchronous.gather, but we're interested in the side-effect:
# getClientFor has been called for the accepted nodegroup.
self.assertItemsEqual([], utils.call_clusters(sentinel.command))
self.assertThat(getClientFor, MockCalledOnceWith(rack.system_id))
def test_with_successful_callbacks(self):
rack = factory.make_RackController()
getClientFor = self.patch(utils, "getClientFor")
getClientFor.return_value = lambda: None
partial = self.patch(utils, "partial")
partial.return_value = sentinel.partial
async_gather = self.patch(asynchronous, "gatherCallResults")
async_gather.return_value = (
result for result in [(sentinel.partial, sentinel.result)]
)
available_callback = Mock()
unavailable_callback = Mock()
success_callback = Mock()
failed_callback = Mock()
timeout_callback = Mock()
result = list(
utils.call_clusters(
sentinel.command,
available_callback=available_callback,
unavailable_callback=unavailable_callback,
success_callback=success_callback,
failed_callback=failed_callback,
timeout_callback=timeout_callback,
)
)
self.assertThat(result, Equals([sentinel.result]))
self.assertThat(available_callback, MockCalledOnceWith(rack))
self.assertThat(unavailable_callback, MockNotCalled())
self.assertThat(success_callback, MockCalledOnceWith(rack))
self.assertThat(failed_callback, MockNotCalled())
self.assertThat(timeout_callback, MockNotCalled())
def test_with_unavailable_callbacks(self):
logger = self.useFixture(FakeLogger("maasserver"))
rack = factory.make_RackController()
getClientFor = self.patch(utils, "getClientFor")
getClientFor.side_effect = NoConnectionsAvailable
partial = self.patch(utils, "partial")
partial.return_value = sentinel.partial
async_gather = self.patch(asynchronous, "gatherCallResults")
async_gather.return_value = iter([])
available_callback = Mock()
unavailable_callback = Mock()
success_callback = Mock()
failed_callback = Mock()
timeout_callback = Mock()
result = list(
utils.call_clusters(
sentinel.command,
available_callback=available_callback,
unavailable_callback=unavailable_callback,
success_callback=success_callback,
failed_callback=failed_callback,
timeout_callback=timeout_callback,
)
)
self.assertThat(result, Equals([]))
self.assertThat(available_callback, MockNotCalled())
self.assertThat(unavailable_callback, MockCalledOnceWith(rack))
self.assertThat(success_callback, MockNotCalled())
self.assertThat(failed_callback, MockNotCalled())
self.assertThat(timeout_callback, MockNotCalled())
self.assertThat(
logger.output, DocTestMatches("...Unable to get RPC connection...")
)
def test_with_failed_callbacks(self):
logger = self.useFixture(FakeLogger("maasserver"))
rack = factory.make_RackController()
getClientFor = self.patch(utils, "getClientFor")
getClientFor.return_value = lambda: None
partial = self.patch(utils, "partial")
partial.return_value = sentinel.partial
async_gather = self.patch(asynchronous, "gatherCallResults")
async_gather.return_value = (
result for result in [(sentinel.partial, MockFailure())]
)
available_callback = Mock()
unavailable_callback = Mock()
success_callback = Mock()
failed_callback = Mock()
timeout_callback = Mock()
result = list(
utils.call_clusters(
sentinel.command,
available_callback=available_callback,
unavailable_callback=unavailable_callback,
success_callback=success_callback,
failed_callback=failed_callback,
timeout_callback=timeout_callback,
)
)
self.assertThat(result, Equals([]))
self.assertThat(available_callback, MockCalledOnceWith(rack))
self.assertThat(unavailable_callback, MockNotCalled())
self.assertThat(success_callback, MockNotCalled())
self.assertThat(failed_callback, MockCalledOnceWith(rack))
self.assertThat(timeout_callback, MockNotCalled())
self.assertThat(
logger.output,
DocTestMatches(
"Exception during ... on rack controller...MockFailure: ..."
),
)
def test_with_timeout_callbacks(self):
logger = self.useFixture(FakeLogger("maasserver"))
rack = factory.make_RackController()
getClientFor = self.patch(utils, "getClientFor")
getClientFor.return_value = lambda: None
partial = self.patch(utils, "partial")
partial.return_value = sentinel.partial
async_gather = self.patch(asynchronous, "gatherCallResults")
async_gather.return_value = (result for result in [])
available_callback = Mock()
unavailable_callback = Mock()
success_callback = Mock()
failed_callback = Mock()
timeout_callback = Mock()
result = list(
utils.call_clusters(
sentinel.command,
available_callback=available_callback,
unavailable_callback=unavailable_callback,
success_callback=success_callback,
failed_callback=failed_callback,
timeout_callback=timeout_callback,
)
)
self.assertThat(result, Equals([]))
self.assertThat(available_callback, MockCalledOnceWith(rack))
self.assertThat(unavailable_callback, MockNotCalled())
self.assertThat(success_callback, MockNotCalled())
self.assertThat(failed_callback, MockNotCalled())
self.assertThat(timeout_callback, MockCalledOnceWith(rack))
self.assertThat(
logger.output, DocTestMatches("...RPC connection timed out...")
)
class TestCallRacksSynchronously(MAASServerTestCase):
"""Tests for `utils.call_rakcks_synchronously`."""
def test_gets_clients(self):
rack = factory.make_RackController()
getClientFor = self.patch(utils, "getClientFor")
getClientFor.return_value = lambda: None
async_gather = self.patch(asynchronous, "gatherCallResults")
async_gather.return_value = []
# call_clusters returns with nothing because we patched out
# asynchronous.gather, but we're interested in the side-effect:
# getClientFor has been called for the accepted nodegroup.
self.assertItemsEqual(
[], call_racks_synchronously(sentinel.command).results
)
self.assertThat(getClientFor, MockCalledOnceWith(rack.system_id))
class TestGetErrorMessageForException(MAASServerTestCase):
def test_returns_message_if_exception_has_one(self):
error_message = factory.make_name("exception")
self.assertEqual(
error_message,
utils.get_error_message_for_exception(Exception(error_message)),
)
def test_returns_message_if_exception_has_none(self):
exception_class = random.choice(RPC_EXCEPTIONS)
error_message = (
"Unexpected exception: %s. See "
"/var/log/maas/regiond.log "
"on the region server for more information."
% exception_class.__name__
)
self.assertEqual(
error_message,
utils.get_error_message_for_exception(exception_class()),
)
def test_returns_cluster_name_in_no_connections_error_message(self):
rack = factory.make_RackController()
exception = NoConnectionsAvailable(
"Unable to connect!", uuid=rack.system_id
)
self.assertEqual(
"Unable to connect to rack controller '%s' (%s); no connections "
"available." % (rack.hostname, rack.system_id),
utils.get_error_message_for_exception(exception),
)
def test_ValidationError(self):
exception = ValidationError({NON_FIELD_ERRORS: "Some error"})
self.assertEqual(
utils.get_error_message_for_exception(exception), "Some error"
)
| agpl-3.0 | -8,039,558,217,288,535,000 | 39.444444 | 79 | 0.647174 | false |
stadelmanma/OpenPNM | OpenPNM/Network/__Delaunay__.py | 1 | 16258 | # -*- coding: utf-8 -*-
"""
===============================================================================
Delaunay: Generate random networks based on Delaunay Tessellations
===============================================================================
"""
import sys
import scipy as sp
import numpy as np
import OpenPNM.Utilities.vertexops as vo
import scipy.sparse as sprs
import scipy.spatial as sptl
import scipy.ndimage as spim
from scipy.spatial import Voronoi
from OpenPNM.Network import GenericNetwork
from OpenPNM.Base import logging
logger = logging.getLogger(__name__)
class Delaunay(GenericNetwork):
r"""
This class contains the methods for creating a *Delaunay* network topology
based connecting pores with a Delaunay tessellation.
Parameters
----------
name : string
A unique name for the network
domain_size : list of floats, [Lx,Ly,Lz]
Bounding cube for internal pore positions
num_pores : int
Number of pores to place randomly within domain
prob : 3D float array
Values should be between 0 and 1 as determines probability of point
with relative domain coordinates being kept. Array does not have to
be same size as domain because positions are re-scaled
base_points : [Np,3] float array
coordinates to use instead of random generation
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.Delaunay(num_pores=100,
... domain_size=[0.0001, 0.0001, 0.0001])
>>> pn.num_pores()
100
"""
def __init__(self, num_pores=None, domain_size=None, prob=None,
base_points=None, **kwargs):
"""
Create Delauny network object
"""
super().__init__(**kwargs)
self.generate(num_pores, domain_size, prob, base_points)
def generate(self, num_pores, domain_size, prob, base_points):
r"""
Method to trigger the generation of the network
"""
logger.info('Start of network topology generation')
self._generate_setup(num_pores, domain_size, base_points)
if base_points is not None:
try:
dim = sp.shape(base_points)[1]
if dim != 3:
raise Exception('base points must be 3D')
except:
raise Exception('base points must be 3D')
self['pore.coords'] = base_points
else:
self._generate_pores(prob)
self._generate_throats()
logger.debug('Network generation complete')
def _generate_setup(self, num_pores, domain_size, base_points):
r"""
Perform applicable preliminary checks and calculations required for
generation
"""
logger.debug('generate_setup: Perform preliminary calculations and checks')
if domain_size is None:
raise Exception('domain_size must always be specified')
if num_pores is None and base_points is None:
raise Exception('num_pores or base_points must be specified')
elif num_pores is None and base_points is not None:
num_pores = len(base_points)
elif num_pores is not None and base_points is not None:
logger.warning('both num_pores and base_points arguments given' +
' num_pores over-written')
num_pores = len(base_points)
self._Lx = domain_size[0]
self._Ly = domain_size[1]
self._Lz = domain_size[2]
self._Np = num_pores
r"""
TODO: Fix this, btype should be received as an argument
"""
self._btype = [0, 0, 0]
def _generate_pores(self, prob=None):
r"""
Generate the pores with numbering scheme.
"""
logger.info('Place randomly located pores in the domain')
if prob is not None:
coords = []
i = 0
while i < self._Np:
coord = np.random.rand(3)
[indx, indy, indz] = np.floor(coord*np.shape(prob)).astype(int)
p = prob[indx][indy][indz]
if np.random.rand(1) <= p:
coords.append(coord)
i += 1
coords = np.asarray(coords)
else:
coords = np.random.random([self._Np, 3])
coords *= np.array([self._Lx, self._Ly, self._Lz])
self['pore.coords'] = coords
def _generate_throats(self):
r"""
Generate the throats connections
"""
logger.info('Define connections between pores')
pts = self['pore.coords']
Np = len(pts)
# Generate 6 dummy domains to pad onto each face of real domain This
# prevents surface pores from making long range connections to each other
x, y, z = self['pore.coords'].T
if x.max() > self._Lx:
Lx = x.max()*1.05
else:
Lx = self._Lx
if y.max() > self._Ly:
Ly = y.max()*1.05
else:
Ly = self._Ly
if z.max() > self._Lz:
Lz = z.max()*1.05
else:
Lz = self._Lz
# Reflect in X = Lx and 0
Pxp = pts.copy()
Pxp[:, 0] = 2*Lx-Pxp[:, 0]
Pxm = pts.copy()
Pxm[:, 0] = Pxm[:, 0]*(-1)
# Reflect in Y = Ly and 0
Pyp = pts.copy()
Pyp[:, 1] = 2*Ly-Pxp[:, 1]
Pym = pts.copy()
Pym[:, 1] = Pxm[:, 1]*(-1)
# Reflect in Z = Lz and 0
Pzp = pts.copy()
Pzp[:, 2] = 2*Lz-Pxp[:, 2]
Pzm = pts.copy()
Pzm[:, 2] = Pxm[:, 2]*(-1)
# Add dummy domains to real domain
# Order important for boundary logic
pts = np.vstack((pts, Pxp, Pxm, Pyp, Pym, Pzp, Pzm))
# Perform tessellation
logger.debug('Beginning tessellation')
Tri = sptl.Delaunay(pts)
logger.debug('Converting tessellation to adjacency matrix')
adjmat = sprs.lil_matrix((Np, Np), dtype=int)
for i in sp.arange(0, sp.shape(Tri.simplices)[0]):
# Keep only simplices that are fully in real domain
# this used to be vectorize, but it stopped working...change in scipy?
for j in Tri.simplices[i]:
if j < Np:
adjmat[j, Tri.simplices[i][Tri.simplices[i] < Np]] = 1
# Remove duplicate (lower triangle) and self connections (diagonal)
# and convert to coo
adjmat = sprs.triu(adjmat, k=1, format="coo")
logger.debug('Conversion to adjacency matrix complete')
self['throat.conns'] = sp.vstack((adjmat.row, adjmat.col)).T
self['pore.all'] = np.ones(len(self['pore.coords']), dtype=bool)
self['throat.all'] = np.ones(len(self['throat.conns']), dtype=bool)
# Do Voronoi diagram - creating voronoi polyhedra around each pore and save
# vertex information
self._vor = Voronoi(pts)
all_vert_index = sp.ndarray(Np, dtype=object)
for i, polygon in enumerate(self._vor.point_region[0:Np]):
if -1 not in self._vor.regions[polygon]:
all_vert_index[i] = \
dict(zip(self._vor.regions[polygon],
self._vor.vertices[self._vor.regions[polygon]]))
# Add throat vertices by looking up vor.ridge_dict
throat_verts = sp.ndarray(len(self['throat.conns']), dtype=object)
for i, (p1, p2) in enumerate(self['throat.conns']):
try:
throat_verts[i] = \
dict(zip(self._vor.ridge_dict[(p1, p2)],
self._vor.vertices[self._vor.ridge_dict[(p1, p2)]]))
except KeyError:
try:
throat_verts[i] = \
dict(zip(self._vor.ridge_dict[(p2, p1)],
self._vor.vertices[self._vor.ridge_dict[(p2, p1)]]))
except KeyError:
logger.error('Throat Pair Not Found in Voronoi Ridge Dictionary')
self['pore.vert_index'] = all_vert_index
self['throat.vert_index'] = throat_verts
logger.debug(sys._getframe().f_code.co_name + ': End of method')
def add_boundaries(self):
r"""
This method identifies pores in the original Voronoi object that straddle a
boundary imposed by the reflection. The pore inside the original set of pores
(with index 0 - Np) is identified and the coordinates are saved. The vertices
making up the boundary throat are retrieved from the ridge_dict values and
these are used to identify which boundary the throat sits at.
A new pore and new connection is created with coordinates lying on the
boundary plane.
N.B This method will only work properly if the original network remains
unaltered i.e. not trimmed or extended
This preserves the connection between pore index on the network object
and the Voronoi object
The point of using this method is so that the throat vertices created by
the Voronoi object are preserved
This method will create boundary pores at the centre of the voronoi faces
that align with the outer planes of the domain.
The original pores in the domain are labelled internal and the boundary pores
are labelled external
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.Delaunay(num_pores=100,
... domain_size=[0.0001,0.0001,0.0001])
>>> pn.add_boundaries()
>>> pn.num_pores('boundary') > 0
True
"""
bound_conns = []
bound_coords = []
bound_vert_index = []
throat_vert_index = []
# Find boundary extent
[x_min, x_max, y_min, y_max, z_min, z_max] = \
vo.vertex_dimension(self, self.pores(), parm='minmax')
min_point = np.around(np.array([x_min, y_min, z_min]), 10)
max_point = np.around(np.array([x_max, y_max, z_max]), 10)
Np = self.num_pores()
Nt = self.num_throats()
new_throat_count = 0
# ridge_dict contains a dictionary where the key is a set of 2 neighbouring
# pores and the value is the vertex indices that form the throat or ridge
# between them
for p, v in self._vor.ridge_dict.items():
# If the vertex with index -1 is contained in list then the ridge is
# unbounded - ignore these
if np.all(np.asarray(v) >= 0):
# Boundary throats will be those connecting one pore inside the
# original set and one out
if (p[0] in range(Np) and p[1] not in range(Np)) or \
(p[0] not in range(Np) and p[1] in range(Np)):
# The dictionary key is not in numerical order so find the pore
# index inside
if p[0] in range(Np):
my_pore = p[0]
else:
my_pore = p[1]
my_pore_coord = self["pore.coords"][my_pore]
new_pore_coord = my_pore_coord.copy()
# Rounding necessary here to identify the plane as Voronoi can
# have 1e-17 and smaller errors
throat_verts = np.around(self._vor.vertices[v], 10)
# Find which plane we are aligned with (if any) and align
# new_pore with throat plane
if len(np.unique(throat_verts[:, 0])) == 1:
new_pore_coord[0] = np.unique(throat_verts[:, 0])
elif len(np.unique(throat_verts[:, 1])) == 1:
new_pore_coord[1] = np.unique(throat_verts[:, 1])
elif len(np.unique(throat_verts[:, 2])) == 1:
new_pore_coord[2] = np.unique(throat_verts[:, 2])
else:
new_pore_coord = np.mean(throat_verts, axis=0)
pass
bound_coords.append(new_pore_coord)
bound_conns.append(np.array([my_pore, new_throat_count + Np]))
bound_vert_index.append(dict(zip(v, throat_verts)))
throat_vert_index.append(dict(zip(v, throat_verts)))
new_throat_count += 1
# Add new pores and connections
self.extend(pore_coords=bound_coords, throat_conns=bound_conns)
# Record new number of pores
Mp = self.num_pores()
Mt = self.num_throats()
new_pore_ids = np.arange(Np, Mp)
new_throat_ids = np.arange(Nt, Mt)
# Identify which boundary the pore sits on
front = self.pores()[self['pore.coords'][:, 0] == min_point[0]]
back = self.pores()[self['pore.coords'][:, 0] == max_point[0]]
left = self.pores()[self['pore.coords'][:, 1] == min_point[1]]
right = self.pores()[self['pore.coords'][:, 1] == max_point[1]]
bottom = self.pores()[self['pore.coords'][:, 2] == min_point[2]]
top = self.pores()[self['pore.coords'][:, 2] == max_point[2]]
if len(top) == 0:
top = self.pores()[self['pore.coords'][:, 2] ==
np.asarray(bound_coords)[:, 2].max()]
# Assign labels
self['pore.boundary'] = False
self['pore.boundary'][new_pore_ids] = True
self['throat.boundary'] = False
self['throat.boundary'][new_throat_ids] = True
self['pore.right_boundary'] = False
self['pore.left_boundary'] = False
self['pore.front_boundary'] = False
self['pore.back_boundary'] = False
self['pore.top_boundary'] = False
self['pore.bottom_boundary'] = False
self['pore.right_boundary'][right] = True
self['pore.left_boundary'][left] = True
self['pore.front_boundary'][front] = True
self['pore.back_boundary'][back] = True
self['pore.top_boundary'][top] = True
self['pore.bottom_boundary'][bottom] = True
# Save the throat verts
self["pore.vert_index"][new_pore_ids] = bound_vert_index
self["throat.vert_index"][new_throat_ids] = throat_vert_index
def domain_length(self, face_1, face_2):
r"""
Returns the distance between two faces
No coplanar checking this is done in vertex_dimension
"""
L = vo.vertex_dimension(self, face_1, face_2, parm='length')
return L
def domain_area(self, face):
r"""
Returns the area of a face
No coplanar checking this is done in vertex_dimension
"""
A = vo.vertex_dimension(self, face, parm='area')
return A
def _export_vor_fibres(self):
r"""
Run through the throat vertices, compute the convex hull order and save
the vertices and ordered faces in a pickle dictionary to be used in
blender
"""
import pickle as pickle
Indices = []
for t in self.throats():
indices = list(self["throat.vert_index"][t].keys())
verts = self._vor.vertices[indices]
# Need to order the indices in convex hull order
# Compute the standard deviation in all coordinates and eliminate
# the axis with the smallest to make 2d
stds = [np.std(verts[:, 0]), np.std(verts[:, 1]), np.std(verts[:, 2])]
if np.argmin(stds) == 0:
verts2d = np.vstack((verts[:, 1], verts[:, 2])).T
elif np.argmin(stds) == 1:
verts2d = np.vstack((verts[:, 0], verts[:, 2])).T
else:
verts2d = np.vstack((verts[:, 0], verts[:, 1])).T
# 2d convexhull returns vertices in hull order
hull2d = sptl.ConvexHull(verts2d, qhull_options='QJ Pp')
# Re-order the vertices and save as list (blender likes them as lists)
Indices.append(np.asarray(indices)[hull2d.vertices].tolist())
# Create dictionary to pickle
data = {}
data["Verts"] = self._vor.vertices
data["Indices"] = Indices
pickle.dump(data, open("fibres.p", "wb"))
| mit | 3,054,005,862,668,982,000 | 41.560209 | 85 | 0.548714 | false |
FabriceSalvaire/mupdf-v1.3 | bindings/example.py | 1 | 15058 | #! /usr/bin/env python
# -*- Python -*-
####################################################################################################
import argparse
import sys
import numpy as np
import mupdf as cmupdf
from MuPDF import *
from PyQt4 import QtCore, QtGui
####################################################################################################
def show_metadata(ctx, doc):
for key in (
'Title',
'Subject',
'Author',
'Creator',
'Producer',
'CreationDate',
'ModDate',
):
print cmupdf.get_meta_info(doc, key, 1024)
fz_buffer = cmupdf.pdf_metadata(doc)
print cmupdf.fz_buffer_data(fz_buffer)
cmupdf.fz_drop_buffer(ctx, fz_buffer)
####################################################################################################
def show_pdf(np_array):
application = QtGui.QApplication(sys.argv)
height, width = np_array.shape[:2]
image = QtGui.QImage(np_array.data, width, height, QtGui.QImage.Format_ARGB32)
label = QtGui.QLabel()
label.setPixmap(QtGui.QPixmap.fromImage(image))
area = QtGui.QScrollArea()
area.setWidget(label)
area.setWindowTitle(args.filename)
area.show()
application.exec_()
####################################################################################################
def get_font_name(font):
font_name = cmupdf.get_font_name(font)
i = font_name.find('+')
if i:
font_name = font_name[i+1:]
return font_name
####################################################################################################
def dump_bbox(obj):
return "[%g %g %g %g]" % (obj.bbox.x0, obj.bbox.y0,
obj.bbox.x1, obj.bbox.y1)
####################################################################################################
def dump_text_style(text_sheet):
style = text_sheet.style
while style:
font = style.font
message = "span.s%u{font-family:\"%s\";font-size:%gpt" % (style.id, get_font_name(font), style.size)
if cmupdf.font_is_italic(font):
message += ';font-style:italic'
if cmupdf.font_is_bold(font):
message += ';font-weight:bold;'
message += '}'
print message
style = style.next
####################################################################################################
def dump_text_page_xml(text_page):
print "<page>"
for block in TextBlockIterator(text_page):
print "<block bbox=\"" + dump_bbox(block) + "\">"
for line in TextLineIterator(block):
print " "*2 + "<line bbox=\"" + dump_bbox(line) + "\">"
for span in TextSpanIterator(line):
print " "*4 + "<span bbox=\"" + dump_bbox(span) + "\" \">"
for ch in TextCharIterator(span):
style = ch.style
font_name = get_font_name(style.font)
print " "*6 + "<char " + \
u" c=\"%s\" font=\"%s\" size=\"%g\"/>" % (unichr(ch.c), font_name, style.size)
print " "*4 + "</span>"
print " "*2 + "</line>"
print "</block>"
print "</page>"
####################################################################################################
def dump_text_page(text_page):
empty_block = False
for block in TextBlockIterator(text_page):
if not empty_block:
print '\n<Block>'
empty_block = True
for line in TextLineIterator(block):
line_text = u''
for span in TextSpanIterator(line):
span_text = u''
for ch in TextCharIterator(span):
span_text += unichr(ch.c)
span_text = span_text.rstrip()
if span_text:
line_text += '<Span>' + span_text + '</Span>'
else:
line_text += '<Empty Span>'
if line_text:
print line_text
empty_block = False
####################################################################################################
class GrowingTextBrowser(QtGui.QTextBrowser):
_id = 0
##############################################
def __init__(self, *args, **kwargs):
GrowingTextBrowser._id += 1
self._id = GrowingTextBrowser._id
super(GrowingTextBrowser, self).__init__(*args, **kwargs)
size_policy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
size_policy.setHeightForWidth(True)
self.setSizePolicy(size_policy)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
##############################################
def setPlainText(self, text):
super(GrowingTextBrowser, self).setPlainText(text)
self._text = text
##############################################
def print_document_size(self, document=None):
if document is None:
document = self.document()
document_size = document.size()
print "Document width", document_size.width(), 'height', document_size.height()
##############################################
def sizePolicy(self):
size_policy = super(GrowingTextBrowser, self).sizePolicy()
print 'GrowingTextBrowser.sizePolicy', self._id, \
size_policy.horizontalPolicy(), size_policy.verticalPolicy()
return size_policy
##############################################
def sizeHint(self):
size = super(GrowingTextBrowser, self).sizeHint()
print 'GrowingTextBrowser.sizeHint', self._id, size.width(), size.height()
return QtCore.QSize(0, 0)
##############################################
def minimumSizeHint(self):
size = super(GrowingTextBrowser, self).minimumSizeHint()
print 'GrowingTextBrowser.minimumSizeHint', self._id, size.width(), size.height()
return QtCore.QSize(0, 0)
##############################################
def heightForWidth(self, width):
print 'GrowingTextBrowser.heightForWidth', self._id, width
document = QtGui.QTextDocument(self._text)
document.setPageSize(QtCore.QSizeF(width, -1))
height = document.documentLayout().documentSize().toSize().height()
self.print_document_size(document)
return height + self.font().pointSize()
##############################################
def resizeEvent(self, event):
print 'GrowingTextBrowser.resizeEvent', self._id, \
'old', event.oldSize().width(), event.oldSize().height(), \
'new', event.size().width(), event.size().height()
self.print_document_size()
return super(GrowingTextBrowser, self).resizeEvent(event)
####################################################################################################
def append_block(parent, vertical_layout, source_text):
text_browser = GrowingTextBrowser(parent)
text_browser.setPlainText(source_text)
# vertical_layout.addWidget(text_browser)
horizontal_layout = QtGui.QHBoxLayout()
horizontal_layout.addWidget(text_browser, 0, QtCore.Qt.AlignTop)
vertical_layout.addLayout(horizontal_layout)
def show_text_page(text_page):
application = QtGui.QApplication(sys.argv)
main_window = QtGui.QMainWindow()
main_window.resize(1000, 800)
main_window.setWindowTitle(args.filename)
scroll_area = QtGui.QScrollArea(main_window)
# scroll_area.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
scroll_area.setWidgetResizable(True)
main_window.setCentralWidget(scroll_area)
container_widget = QtGui.QWidget()
vertical_layout = QtGui.QVBoxLayout(container_widget) # Set container_widget layout
scroll_area.setWidget(container_widget)
for block in TextBlockIterator(text_page):
block_text = u''
for line in TextLineIterator(block):
line_text = u''
for span in TextSpanIterator(line):
span_text = u''
for ch in TextCharIterator(span):
span_text += unichr(ch.c)
span_text = span_text.rstrip()
if span_text: # Append span to line
line_text += span_text
else: # Empty span then append a block
if block_text:
append_block(container_widget, vertical_layout, block_text)
block_text = u''
line_text = u''
# Append line to block
if block_text:
block_text += ' '
block_text += line_text
if block_text:
append_block(container_widget, vertical_layout, block_text)
spacer_item = QtGui.QSpacerItem(0, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
vertical_layout.addItem(spacer_item)
print 'Show'
#main_window.show()
main_window.showMaximized()
application.exec_()
####################################################################################################
argument_parser = argparse.ArgumentParser(description='Example.')
argument_parser.add_argument('filename', metavar='FILENAME',
help='PDF file')
argument_parser.add_argument('--page', dest='page_number',
type=int,
default=1,
help='Page number')
argument_parser.add_argument('--zoom', dest='zoom',
type=int,
default=100,
help='Zoom factor in %%')
argument_parser.add_argument('--rotation', dest='rotation',
type=int,
default=0,
help='Rotation')
args = argument_parser.parse_args()
####################################################################################################
# Create a context to hold the exception stack and various caches.
ctx = cmupdf.fz_new_context(None, None, cmupdf.FZ_STORE_UNLIMITED)
####################################################################################################
# Open the PDF, XPS or CBZ document.
doc = cmupdf.fz_open_document(ctx, args.filename)
show_metadata(ctx, doc)
####################################################################################################
# Retrieve the number of pages (not used in this example).
page_count = cmupdf.fz_count_pages(doc)
# Load the page we want. Page numbering starts from zero.
page = cmupdf.fz_load_page(doc, args.page_number -1)
####################################################################################################
# Calculate a transform to use when rendering. This transform contains the scale and
# rotation. Convert zoom percentage to a scaling factor. Without scaling the resolution is 72 dpi.
transform = cmupdf.fz_matrix_s()
cmupdf.fz_rotate(transform, args.rotation)
cmupdf.fz_pre_scale(transform, args.zoom / 100.0, args.zoom / 100.0)
# Take the page bounds and transform them by the same matrix that we will use to render the page.
bounds = cmupdf.fz_rect_s()
cmupdf.fz_bound_page(doc, page, bounds)
cmupdf.fz_transform_rect(bounds, transform)
####################################################################################################
# A page consists of a series of objects (text, line art, images, gradients). These objects are
# passed to a device when the interpreter runs the page. There are several devices, used for
# different purposes:
#
# draw device -- renders objects to a target pixmap.
#
# text device -- extracts the text in reading order with styling
# information. This text can be used to provide text search.
#
# list device -- records the graphic objects in a list that can
# be played back through another device. This is useful if you
# need to run the same page through multiple devices, without
# the overhead of parsing the page each time.
####################################################################################################
# Create a blank pixmap to hold the result of rendering. The pixmap bounds used here are the same as
# the transformed page bounds, so it will contain the entire page. The page coordinate space has the
# origin at the top left corner and the x axis extends to the right and the y axis extends down.
bbox = cmupdf.fz_irect_s()
cmupdf.fz_round_rect(bbox, bounds)
width, height = bbox.x1 - bbox.x0, bbox.y1 - bbox.y0
np_array = np.zeros((height, width, 4), dtype=np.uint8)
# pixmap = cmupdf.fz_new_pixmap_with_bbox(ctx, cmupdf.get_fz_device_rgb(), bbox)
pixmap = cmupdf.fz_new_pixmap_with_bbox_and_data(ctx, cmupdf.fz_device_rgb(ctx), bbox,
cmupdf.numpy_to_pixmap(np_array))
cmupdf.fz_clear_pixmap_with_value(ctx, pixmap, 0xff)
# Create a draw device with the pixmap as its target.
# Run the page with the transform.
device = cmupdf.fz_new_draw_device(ctx, pixmap)
cmupdf.fz_set_aa_level(ctx, 8)
cmupdf.fz_run_page(doc, page, device, transform, None)
cmupdf.fz_free_device(device)
if True:
show_pdf(np_array)
if False:
# Save the pixmap to a file.
cmupdf.fz_write_png(ctx, pixmap, "out.png", 0)
####################################################################################################
text_sheet = cmupdf.fz_new_text_sheet(ctx)
text_page = cmupdf.fz_new_text_page(ctx)
device = cmupdf.fz_new_text_device(ctx, text_sheet, text_page)
cmupdf.fz_run_page(doc, page, device, transform, None)
cmupdf.fz_free_device(device)
if False:
# Dump text style and page.
dump_text_style(text_sheet)
dump_text_page_xml(text_page)
if True:
dump_text_page(text_page)
show_text_page(text_page)
if False:
file_handler = cmupdf.fz_fopen("out.css", "w+")
output_file = cmupdf.fz_new_output_with_file(ctx, file_handler)
cmupdf.fz_print_text_sheet(ctx, output_file, text_sheet)
cmupdf.fz_close_output(output_file)
cmupdf.fz_fclose(file_handler)
output_file = cmupdf.fz_fopen("out.txt", "w+")
output_file = cmupdf.fz_new_output_with_file(ctx, file_handler)
# cmupdf.fz_print_text_page(ctx, output_file, text_page)
# cmupdf.fz_print_text_page_html(ctx, output_file, text_page)
cmupdf.fz_print_text_page_xml(ctx, output_file, text_page)
cmupdf.fz_close_output(output_file)
cmupdf.fz_fclose(file_handler)
####################################################################################################
# Clean up.
cmupdf.fz_free_text_sheet(ctx, text_sheet)
cmupdf.fz_free_text_page(ctx, text_page)
cmupdf.fz_drop_pixmap(ctx, pixmap)
cmupdf.fz_free_page(doc, page)
cmupdf.fz_close_document(doc)
cmupdf.fz_free_context(ctx)
####################################################################################################
#
# End
#
####################################################################################################
| agpl-3.0 | 8,527,913,039,060,901,000 | 34.682464 | 108 | 0.521052 | false |
twitter/pants | src/python/pants/backend/jvm/ivy_utils.py | 1 | 51293 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import os
import pkgutil
import threading
import xml.etree.ElementTree as ET
from abc import abstractmethod
from builtins import object, open, str
from collections import defaultdict, namedtuple
from functools import total_ordering
import six
from future.utils import PY3
from twitter.common.collections import OrderedSet
from pants.backend.jvm.subsystems.jar_dependency_management import (JarDependencyManagement,
PinnedJarArtifactSet)
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.base.generator import Generator, TemplateData
from pants.base.revision import Revision
from pants.build_graph.target import Target
from pants.ivy.bootstrapper import Bootstrapper
from pants.java.jar.exclude import Exclude
from pants.java.jar.jar_dependency import JarDependency
from pants.java.jar.jar_dependency_utils import M2Coordinate, ResolvedJar
from pants.java.util import execute_runner
from pants.util.collections_abc_backport import OrderedDict
from pants.util.dirutil import safe_concurrent_creation, safe_mkdir, safe_open
from pants.util.fileutil import atomic_copy, safe_hardlink_or_copy
class IvyResolutionStep(object):
"""Ivy specific class for describing steps of performing resolution."""
# NB(nh): This class is the base class for the ivy resolve and fetch steps.
# It also specifies the abstract methods that define the components of resolution steps.
def __init__(self, confs, hash_name, pinned_artifacts, soft_excludes, ivy_resolution_cache_dir,
ivy_repository_cache_dir, ivy_workdir):
"""
:param confs: A tuple of string ivy confs to resolve for.
:param hash_name: A unique string name for this resolve.
:param pinned_artifacts: A tuple of "artifact-alikes" to force the versions of.
:param soft_excludes: A flag marking whether to pass excludes to Ivy or to apply them after the
fact.
:param ivy_repository_cache_dir: The cache directory used by Ivy for repository cache data.
:param ivy_resolution_cache_dir: The cache directory used by Ivy for resolution cache data.
:param ivy_workdir: A task-specific workdir that all ivy outputs live in.
"""
self.confs = confs
self.hash_name = hash_name
self.pinned_artifacts = pinned_artifacts
self.soft_excludes = soft_excludes
self.ivy_repository_cache_dir = ivy_repository_cache_dir
self.ivy_resolution_cache_dir = ivy_resolution_cache_dir
self.ivy_workdir = ivy_workdir
self.workdir_reports_by_conf = {c: self.resolve_report_path(c) for c in confs}
@abstractmethod
def required_load_files_exist(self):
"""The files required to load a previous resolve exist."""
@abstractmethod
def required_exec_files_exist(self):
"""The files to do a resolve exist."""
@abstractmethod
def load(self, targets):
"""Loads the result of a resolve or fetch."""
@abstractmethod
def exec_and_load(self, executor, extra_args, targets, jvm_options, workunit_name,
workunit_factory):
"""Runs the resolve or fetch and loads the result, returning it."""
@property
def workdir(self):
return os.path.join(self.ivy_workdir, self.hash_name)
@property
def hardlink_classpath_filename(self):
return os.path.join(self.workdir, 'classpath')
@property
def ivy_cache_classpath_filename(self):
return '{}.raw'.format(self.hardlink_classpath_filename)
@property
def frozen_resolve_file(self):
return os.path.join(self.workdir, 'resolution.json')
@property
def hardlink_dir(self):
return os.path.join(self.ivy_workdir, 'jars')
@abstractmethod
def ivy_xml_path(self):
"""Ivy xml location."""
@abstractmethod
def resolve_report_path(self, conf):
"""Location of the resolve report in the workdir."""
def _construct_and_load_hardlink_map(self):
artifact_paths, hardlink_map = IvyUtils.construct_and_load_hardlink_map(
self.hardlink_dir,
self.ivy_repository_cache_dir,
self.ivy_cache_classpath_filename,
self.hardlink_classpath_filename)
return artifact_paths, hardlink_map
def _call_ivy(self, executor, extra_args, ivyxml, jvm_options, hash_name_for_report,
workunit_factory, workunit_name):
IvyUtils.do_resolve(executor,
extra_args,
ivyxml,
jvm_options,
self.workdir_reports_by_conf,
self.confs,
self.ivy_resolution_cache_dir,
self.ivy_cache_classpath_filename,
hash_name_for_report,
workunit_factory,
workunit_name)
class IvyFetchStep(IvyResolutionStep):
"""Resolves ivy artifacts using the coordinates from a previous resolve."""
def required_load_files_exist(self):
return (all(os.path.isfile(report) for report in self.workdir_reports_by_conf.values()) and
os.path.isfile(self.ivy_cache_classpath_filename) and
os.path.isfile(self.frozen_resolve_file))
def resolve_report_path(self, conf):
return os.path.join(self.workdir, 'fetch-report-{}.xml'.format(conf))
@property
def ivy_xml_path(self):
return os.path.join(self.workdir, 'fetch-ivy.xml')
def required_exec_files_exist(self):
return os.path.isfile(self.frozen_resolve_file)
def load(self, targets):
try:
frozen_resolutions = FrozenResolution.load_from_file(self.frozen_resolve_file,
targets)
except Exception as e:
logger.debug('Failed to load {}: {}'.format(self.frozen_resolve_file, e))
return NO_RESOLVE_RUN_RESULT
return self._load_from_fetch(frozen_resolutions)
def exec_and_load(self, executor, extra_args, targets, jvm_options, workunit_name,
workunit_factory):
try:
frozen_resolutions = FrozenResolution.load_from_file(self.frozen_resolve_file,
targets)
except Exception as e:
logger.debug('Failed to load {}: {}'.format(self.frozen_resolve_file, e))
return NO_RESOLVE_RUN_RESULT
self._do_fetch(executor, extra_args, frozen_resolutions, jvm_options,
workunit_name, workunit_factory)
result = self._load_from_fetch(frozen_resolutions)
if not result.all_linked_artifacts_exist():
raise IvyResolveMappingError(
'Some artifacts were not linked to {} for {}'.format(self.ivy_workdir,
result))
return result
def _load_from_fetch(self, frozen_resolutions):
artifact_paths, hardlink_map = self._construct_and_load_hardlink_map()
return IvyFetchResolveResult(artifact_paths,
hardlink_map,
self.hash_name,
self.workdir_reports_by_conf,
frozen_resolutions)
def _do_fetch(self, executor, extra_args, frozen_resolution, jvm_options, workunit_name,
workunit_factory):
# It's important for fetches to have a different ivy report from resolves as their
# contents differ.
hash_name_for_report = '{}-fetch'.format(self.hash_name)
ivyxml = self.ivy_xml_path
self._prepare_ivy_xml(frozen_resolution, ivyxml, hash_name_for_report)
self._call_ivy(executor, extra_args, ivyxml, jvm_options, hash_name_for_report,
workunit_factory, workunit_name)
def _prepare_ivy_xml(self, frozen_resolution, ivyxml, resolve_hash_name_for_report):
# NB(nh): Our ivy.xml ensures that we always get the default configuration, even if it's not
# part of the requested confs.
default_resolution = frozen_resolution.get('default')
if default_resolution is None:
raise IvyUtils.IvyError("Couldn't find the frozen resolution for the 'default' ivy conf.")
try:
jars = default_resolution.jar_dependencies
IvyUtils.generate_fetch_ivy(jars, ivyxml, self.confs, resolve_hash_name_for_report)
except Exception as e:
raise IvyUtils.IvyError('Failed to prepare ivy resolve: {}'.format(e))
class IvyResolveStep(IvyResolutionStep):
"""Resolves ivy artifacts and produces a cacheable file containing the resulting coordinates."""
def required_load_files_exist(self):
return (all(os.path.isfile(report) for report in self.workdir_reports_by_conf.values()) and
os.path.isfile(self.ivy_cache_classpath_filename))
def resolve_report_path(self, conf):
return os.path.join(self.workdir, 'resolve-report-{}.xml'.format(conf))
@property
def ivy_xml_path(self):
return os.path.join(self.workdir, 'resolve-ivy.xml')
def load(self, targets):
artifact_paths, hardlink_map = self._construct_and_load_hardlink_map()
return IvyResolveResult(artifact_paths,
hardlink_map,
self.hash_name,
self.workdir_reports_by_conf)
def exec_and_load(self, executor, extra_args, targets, jvm_options,
workunit_name, workunit_factory):
self._do_resolve(executor, extra_args, targets, jvm_options, workunit_name, workunit_factory)
result = self.load(targets)
if not result.all_linked_artifacts_exist():
raise IvyResolveMappingError(
'Some artifacts were not linked to {} for {}'.format(self.ivy_workdir,
result))
frozen_resolutions_by_conf = result.get_frozen_resolutions_by_conf(targets)
FrozenResolution.dump_to_file(self.frozen_resolve_file, frozen_resolutions_by_conf)
return result
def _do_resolve(self, executor, extra_args, targets, jvm_options, workunit_name, workunit_factory):
ivyxml = self.ivy_xml_path
hash_name = '{}-resolve'.format(self.hash_name)
self._prepare_ivy_xml(targets, ivyxml, hash_name)
self._call_ivy(executor, extra_args, ivyxml, jvm_options, hash_name,
workunit_factory, workunit_name)
def _prepare_ivy_xml(self, targets, ivyxml, hash_name):
# TODO(John Sirois): merge the code below into IvyUtils or up here; either way, better
# diagnostics can be had in `IvyUtils.generate_ivy` if this is done.
# See: https://github.com/pantsbuild/pants/issues/2239
jars, global_excludes = IvyUtils.calculate_classpath(targets)
# Don't pass global excludes to ivy when using soft excludes.
if self.soft_excludes:
global_excludes = []
IvyUtils.generate_ivy(targets, jars, global_excludes, ivyxml, self.confs,
hash_name, self.pinned_artifacts)
class FrozenResolution(object):
"""Contains the abstracted results of a resolve.
With this we can do a simple fetch.
"""
# TODO(nh): include full dependency graph in here.
# So that we can inject it into the build graph if we want to.
class MissingTarget(Exception):
"""Thrown when a loaded resolution has a target spec for a target that doesn't exist."""
def __init__(self):
self.target_to_resolved_coordinates = defaultdict(OrderedSet)
self.all_resolved_coordinates = OrderedSet()
self.coordinate_to_attributes = OrderedDict()
@property
def jar_dependencies(self):
return [
JarDependency(c.org, c.name, c.rev, classifier=c.classifier, ext=c.ext,
**self.coordinate_to_attributes.get(c, {}))
for c in self.all_resolved_coordinates]
def add_resolved_jars(self, target, resolved_jars):
coords = [j.coordinate for j in resolved_jars]
self.add_resolution_coords(target, coords)
# Assuming target is a jar library.
for j in target.jar_dependencies:
url = j.get_url(relative=True)
if url:
self.coordinate_to_attributes[j.coordinate] = {'url': url, 'base_path': j.base_path}
else:
self.coordinate_to_attributes[j.coordinate] = {}
def add_resolution_coords(self, target, coords):
for c in coords:
self.target_to_resolved_coordinates[target].add(c)
self.all_resolved_coordinates.add(c)
def target_spec_to_coordinate_strings(self):
return {t.address.spec: [str(c) for c in coordinates]
for t, coordinates in self.target_to_resolved_coordinates.items()}
def __repr__(self):
return 'FrozenResolution(\n target_to_resolved_coordinates\n {}\n all\n {}'.format(
'\n '.join(': '.join([t.address.spec,
'\n '.join(str(c) for c in cs)])
for t,cs in self.target_to_resolved_coordinates.items()),
'\n '.join(str(c) for c in self.coordinate_to_attributes.keys())
)
def __eq__(self, other):
return (type(self) == type(other) and
self.all_resolved_coordinates == other.all_resolved_coordinates and
self.target_to_resolved_coordinates == other.target_to_resolved_coordinates)
def __ne__(self, other):
return not self == other
@classmethod
def load_from_file(cls, filename, targets):
if not os.path.exists(filename):
return None
with open(filename, 'r') as f:
# Using OrderedDict here to maintain insertion order of dict entries.
from_file = json.load(f, object_pairs_hook=OrderedDict)
result = {}
target_lookup = {t.address.spec: t for t in targets}
for conf, serialized_resolution in from_file.items():
resolution = FrozenResolution()
def m2_for(c):
return M2Coordinate.from_string(c)
for coord, attr_dict in serialized_resolution['coord_to_attrs'].items():
m2 = m2_for(coord)
resolution.coordinate_to_attributes[m2] = attr_dict
for spec, coord_strs in serialized_resolution['target_to_coords'].items():
t = target_lookup.get(spec, None)
if t is None:
raise cls.MissingTarget('Cannot find target for address {} in frozen resolution'
.format(spec))
resolution.add_resolution_coords(t, [m2_for(c) for c in coord_strs])
result[conf] = resolution
return result
@classmethod
def dump_to_file(cls, filename, resolutions_by_conf):
res = {}
for conf, resolution in resolutions_by_conf.items():
res[conf] = OrderedDict([
['target_to_coords',resolution.target_spec_to_coordinate_strings()],
['coord_to_attrs', OrderedDict([str(c), attrs]
for c, attrs in resolution.coordinate_to_attributes.items())]
])
with safe_concurrent_creation(filename) as tmp_filename:
mode = 'w' if PY3 else 'wb'
with open(tmp_filename, mode) as f:
json.dump(res, f)
class IvyResolveResult(object):
"""The result of an Ivy resolution.
The result data includes the list of resolved artifacts, the relationships between those artifacts
and the targets that requested them and the hash name of the resolve.
"""
def __init__(self, resolved_artifact_paths, hardlink_map, resolve_hash_name, reports_by_conf):
self._reports_by_conf = reports_by_conf
self.resolved_artifact_paths = resolved_artifact_paths
self.resolve_hash_name = resolve_hash_name
self._hardlink_map = hardlink_map
@property
def has_resolved_artifacts(self):
"""The requested targets have a resolution associated with them."""
return self.resolve_hash_name is not None
def all_linked_artifacts_exist(self):
"""All of the artifact paths for this resolve point to existing files."""
if not self.has_resolved_artifacts:
return False
for path in self.resolved_artifact_paths:
if not os.path.isfile(path):
return False
else:
return True
def report_for_conf(self, conf):
"""Returns the path to the ivy report for the provided conf.
Returns None if there is no path.
"""
return self._reports_by_conf.get(conf)
def get_frozen_resolutions_by_conf(self, targets):
frozen_resolutions_by_conf = OrderedDict()
for conf in self._reports_by_conf:
frozen_resolution = FrozenResolution()
for target, resolved_jars in self.resolved_jars_for_each_target(conf, targets):
frozen_resolution.add_resolved_jars(target, resolved_jars)
frozen_resolutions_by_conf[conf] = frozen_resolution
return frozen_resolutions_by_conf
def resolved_jars_for_each_target(self, conf, targets):
"""Yields the resolved jars for each passed JarLibrary.
If there is no report for the requested conf, yields nothing.
:param conf: The ivy conf to load jars for.
:param targets: The collection of JarLibrary targets to find resolved jars for.
:yield: target, resolved_jars
:raises IvyTaskMixin.UnresolvedJarError
"""
ivy_info = self._ivy_info_for(conf)
if not ivy_info:
return
jar_library_targets = [t for t in targets if isinstance(t, JarLibrary)]
ivy_jar_memo = {}
for target in jar_library_targets:
# Add the artifacts from each dependency module.
resolved_jars = self._resolved_jars_with_hardlinks(conf, ivy_info, ivy_jar_memo,
self._jar_dependencies_for_target(conf,
target),
target)
yield target, resolved_jars
def _jar_dependencies_for_target(self, conf, target):
return target.jar_dependencies
def _ivy_info_for(self, conf):
report_path = self._reports_by_conf.get(conf)
return IvyUtils.parse_xml_report(conf, report_path)
def _new_resolved_jar_with_hardlink_path(self, conf, target, resolved_jar_without_hardlink):
def candidate_cache_paths():
# There is a focus on being lazy here to avoid `os.path.realpath` when we can.
yield resolved_jar_without_hardlink.cache_path
yield os.path.realpath(resolved_jar_without_hardlink.cache_path)
for cache_path in candidate_cache_paths():
pants_path = self._hardlink_map.get(cache_path)
if pants_path:
break
else:
raise IvyResolveMappingError(
'Jar {resolved_jar} in {spec} not resolved to the ivy '
'hardlink map in conf {conf}.'
.format(spec=target.address.spec,
resolved_jar=resolved_jar_without_hardlink.cache_path,
conf=conf))
return ResolvedJar(coordinate=resolved_jar_without_hardlink.coordinate,
pants_path=pants_path,
cache_path=resolved_jar_without_hardlink.cache_path)
def _resolved_jars_with_hardlinks(self, conf, ivy_info, ivy_jar_memo, coordinates, target):
raw_resolved_jars = ivy_info.get_resolved_jars_for_coordinates(coordinates,
memo=ivy_jar_memo)
resolved_jars = [self._new_resolved_jar_with_hardlink_path(conf, target, raw_resolved_jar)
for raw_resolved_jar in raw_resolved_jars]
return resolved_jars
class IvyFetchResolveResult(IvyResolveResult):
"""A resolve result that uses the frozen resolution to look up dependencies."""
def __init__(self, resolved_artifact_paths, hardlink_map, resolve_hash_name, reports_by_conf,
frozen_resolutions):
super(IvyFetchResolveResult, self).__init__(resolved_artifact_paths, hardlink_map,
resolve_hash_name, reports_by_conf)
self._frozen_resolutions = frozen_resolutions
def _jar_dependencies_for_target(self, conf, target):
return self._frozen_resolutions[conf].target_to_resolved_coordinates.get(target, ())
NO_RESOLVE_RUN_RESULT = IvyResolveResult([], {}, None, {})
IvyModule = namedtuple('IvyModule', ['ref', 'artifact', 'callers'])
Dependency = namedtuple('DependencyAttributes',
['org', 'name', 'rev', 'mutable', 'force', 'transitive'])
Artifact = namedtuple('Artifact', ['name', 'type_', 'ext', 'url', 'classifier'])
logger = logging.getLogger(__name__)
class IvyResolveMappingError(Exception):
"""Raised when there is a failure mapping the ivy resolve results to pants objects."""
@total_ordering
class IvyModuleRef(object):
"""
:API: public
"""
# latest.integration is ivy magic meaning "just get the latest version"
_ANY_REV = 'latest.integration'
def __init__(self, org, name, rev, classifier=None, ext=None):
self.org = org
self.name = name
self.rev = rev
self.classifier = classifier
self.ext = ext or 'jar'
self._id = (self.org, self.name, self.rev, self.classifier, self.ext)
def __eq__(self, other):
return isinstance(other, IvyModuleRef) and self._id == other._id
# TODO(#6071): Return NotImplemented if other does not have attributes
def __lt__(self, other):
# We can't just re-use __repr__ or __str_ because we want to order rev last
return ((self.org, self.name, self.classifier or '', self.ext, self.rev) <
(other.org, other.name, other.classifier or '', other.ext, other.rev))
def __hash__(self):
return hash(self._id)
def __str__(self):
return 'IvyModuleRef({})'.format(':'.join((x or '') for x in self._id))
def __repr__(self):
return ('IvyModuleRef(org={!r}, name={!r}, rev={!r}, classifier={!r}, ext={!r})'
.format(*self._id))
@property
def caller_key(self):
"""This returns an identifier for an IvyModuleRef that only retains the caller org and name.
Ivy represents dependees as `<caller/>`'s with just org and name and rev information.
This method returns a `<caller/>` representation of the current ref.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV)
@property
def unversioned(self):
"""This returns an identifier for an IvyModuleRef without version information.
It's useful because ivy might return information about a different version of a dependency than
the one we request, and we want to ensure that all requesters of any version of that dependency
are able to learn about it.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV, classifier=self.classifier,
ext=self.ext)
class IvyInfo(object):
"""
:API: public
"""
def __init__(self, conf):
self._conf = conf
self.modules_by_ref = {} # Map from ref to referenced module.
self.refs_by_unversioned_refs = {} # Map from unversioned ref to the resolved versioned ref
# Map from ref of caller to refs of modules required by that caller.
self._deps_by_caller = defaultdict(OrderedSet)
# Map from _unversioned_ ref to OrderedSet of IvyArtifact instances.
self._artifacts_by_ref = defaultdict(OrderedSet)
def add_module(self, module):
if not module.artifact:
# Module was evicted, so do not record information about it
return
ref_unversioned = module.ref.unversioned
if ref_unversioned in self.refs_by_unversioned_refs:
raise IvyResolveMappingError('Already defined module {}, as rev {}!'
.format(ref_unversioned, module.ref.rev))
if module.ref in self.modules_by_ref:
raise IvyResolveMappingError('Already defined module {}, would be overwritten!'
.format(module.ref))
self.refs_by_unversioned_refs[ref_unversioned] = module.ref
self.modules_by_ref[module.ref] = module
for caller in module.callers:
self._deps_by_caller[caller.caller_key].add(module.ref)
self._artifacts_by_ref[ref_unversioned].add(module.artifact)
def _do_traverse_dependency_graph(self, ref, collector, memo, visited):
memoized_value = memo.get(ref)
if memoized_value:
return memoized_value
if ref in visited:
# Ivy allows for circular dependencies
# If we're here, that means we're resolving something that
# transitively depends on itself
return set()
visited.add(ref)
acc = collector(ref)
# NB(zundel): ivy does not return deps in a consistent order for the same module for
# different resolves. Sort them to get consistency and prevent cache invalidation.
# See https://github.com/pantsbuild/pants/issues/2607
deps = sorted(self._deps_by_caller.get(ref.caller_key, ()))
for dep in deps:
acc.update(self._do_traverse_dependency_graph(dep, collector, memo, visited))
memo[ref] = acc
return acc
def traverse_dependency_graph(self, ref, collector, memo=None):
"""Traverses module graph, starting with ref, collecting values for each ref into the sets
created by the collector function.
:param ref an IvyModuleRef to start traversing the ivy dependency graph
:param collector a function that takes a ref and returns a new set of values to collect for
that ref, which will also be updated with all the dependencies accumulated values
:param memo is a dict of ref -> set that memoizes the results of each node in the graph.
If provided, allows for retaining cache across calls.
:returns the accumulated set for ref
"""
resolved_ref = self.refs_by_unversioned_refs.get(ref.unversioned)
if resolved_ref:
ref = resolved_ref
if memo is None:
memo = dict()
visited = set()
return self._do_traverse_dependency_graph(ref, collector, memo, visited)
def get_resolved_jars_for_coordinates(self, coordinates, memo=None):
"""Collects jars for the passed coordinates.
Because artifacts are only fetched for the "winning" version of a module, the artifacts
will not always represent the version originally declared by the library.
This method is transitive within the passed coordinates dependencies.
:param coordinates collections.Iterable: Collection of coordinates to collect transitive
resolved jars for.
:param memo: See `traverse_dependency_graph`.
:returns: All the artifacts for all of the jars for the provided coordinates,
including transitive dependencies.
:rtype: list of :class:`pants.java.jar.ResolvedJar`
"""
def to_resolved_jar(jar_ref, jar_path):
return ResolvedJar(coordinate=M2Coordinate(org=jar_ref.org,
name=jar_ref.name,
rev=jar_ref.rev,
classifier=jar_ref.classifier,
ext=jar_ref.ext),
cache_path=jar_path)
resolved_jars = OrderedSet()
def create_collection(dep):
return OrderedSet([dep])
for jar in coordinates:
classifier = jar.classifier if self._conf == 'default' else self._conf
jar_module_ref = IvyModuleRef(jar.org, jar.name, jar.rev, classifier, jar.ext)
for module_ref in self.traverse_dependency_graph(jar_module_ref, create_collection, memo):
for artifact_path in self._artifacts_by_ref[module_ref.unversioned]:
resolved_jars.add(to_resolved_jar(module_ref, artifact_path))
return resolved_jars
def __repr__(self):
return 'IvyInfo(conf={}, refs={})'.format(self._conf, self.modules_by_ref.keys())
class IvyUtils(object):
"""Useful methods related to interaction with ivy.
:API: public
"""
# Protects ivy executions.
_ivy_lock = threading.RLock()
# Protect writes to the global map of jar path -> hardlinks to that jar.
_hardlink_map_lock = threading.Lock()
INTERNAL_ORG_NAME = 'internal'
class IvyError(Exception):
"""Indicates an error preparing an ivy operation."""
class IvyResolveReportError(IvyError):
"""Indicates that an ivy report cannot be found."""
class IvyResolveConflictingDepsError(IvyError):
"""Indicates two or more locally declared dependencies conflict."""
class BadRevisionError(IvyError):
"""Indicates an unparseable version number."""
@staticmethod
def _generate_exclude_template(exclude):
return TemplateData(org=exclude.org, name=exclude.name)
@staticmethod
def _generate_override_template(jar):
return TemplateData(org=jar.org, module=jar.name, version=jar.rev)
@staticmethod
def _load_classpath_from_cachepath(path):
if not os.path.exists(path):
return []
else:
with safe_open(path, 'r') as cp:
return [_f for _f in (path.strip() for path in cp.read().split(os.pathsep)) if _f]
@classmethod
def do_resolve(cls, executor, extra_args, ivyxml, jvm_options, workdir_report_paths_by_conf,
confs, ivy_resolution_cache_dir, ivy_cache_classpath_filename, resolve_hash_name,
workunit_factory, workunit_name):
"""Execute Ivy with the given ivy.xml and copies all relevant files into the workdir.
This method does an Ivy resolve, which may be either a Pants resolve or a Pants fetch depending
on whether there is an existing frozen resolution.
After it is run, the Ivy reports are copied into the workdir at the paths specified by
workdir_report_paths_by_conf along with a file containing a list of all the requested artifacts
and their transitive dependencies.
:param executor: A JVM executor to use to invoke ivy.
:param extra_args: Extra arguments to pass to ivy.
:param ivyxml: The input ivy.xml containing the dependencies to resolve.
:param jvm_options: A list of jvm option strings to use for the ivy invoke, or None.
:param workdir_report_paths_by_conf: A dict mapping confs to report paths in the workdir.
:param confs: The confs used in the resolve.
:param resolve_hash_name: The hash to use as the module name for finding the ivy report file.
:param workunit_factory: A workunit factory for the ivy invoke, or None.
:param workunit_name: A workunit name for the ivy invoke, or None.
"""
ivy = Bootstrapper.default_ivy(bootstrap_workunit_factory=workunit_factory)
with safe_concurrent_creation(ivy_cache_classpath_filename) as raw_target_classpath_file_tmp:
extra_args = extra_args or []
args = ['-cachepath', raw_target_classpath_file_tmp] + extra_args
with cls._ivy_lock:
cls._exec_ivy(ivy, confs, ivyxml, args,
jvm_options=jvm_options,
executor=executor,
workunit_name=workunit_name,
workunit_factory=workunit_factory)
if not os.path.exists(raw_target_classpath_file_tmp):
raise cls.IvyError('Ivy failed to create classpath file at {}'
.format(raw_target_classpath_file_tmp))
cls._copy_ivy_reports(workdir_report_paths_by_conf, confs, ivy_resolution_cache_dir, resolve_hash_name)
logger.debug('Moved ivy classfile file to {dest}'
.format(dest=ivy_cache_classpath_filename))
@classmethod
def _copy_ivy_reports(cls, workdir_report_paths_by_conf, confs, ivy_resolution_cache_dir, resolve_hash_name):
for conf in confs:
ivy_cache_report_path = IvyUtils.xml_report_path(ivy_resolution_cache_dir, resolve_hash_name,
conf)
workdir_report_path = workdir_report_paths_by_conf[conf]
try:
atomic_copy(ivy_cache_report_path,
workdir_report_path)
except IOError as e:
raise cls.IvyError('Failed to copy report into workdir from {} to {}: {}'
.format(ivy_cache_report_path, workdir_report_path, e))
@classmethod
def _exec_ivy(cls, ivy, confs, ivyxml, args, jvm_options, executor,
workunit_name, workunit_factory):
ivy = ivy or Bootstrapper.default_ivy()
ivy_args = ['-ivy', ivyxml]
ivy_args.append('-confs')
ivy_args.extend(confs)
ivy_args.extend(args)
ivy_jvm_options = list(jvm_options)
# Disable cache in File.getCanonicalPath(), makes Ivy work with -symlink option properly on ng.
ivy_jvm_options.append('-Dsun.io.useCanonCaches=false')
runner = ivy.runner(jvm_options=ivy_jvm_options, args=ivy_args, executor=executor)
try:
with ivy.resolution_lock:
result = execute_runner(runner, workunit_factory=workunit_factory,
workunit_name=workunit_name)
if result != 0:
raise IvyUtils.IvyError('Ivy returned {result}. cmd={cmd}'.format(result=result,
cmd=runner.cmd))
except runner.executor.Error as e:
raise IvyUtils.IvyError(e)
@classmethod
def construct_and_load_hardlink_map(cls, hardlink_dir, ivy_repository_cache_dir,
ivy_cache_classpath_filename, hardlink_classpath_filename):
# Make our actual classpath be hardlinks, so that the paths are uniform across systems.
# Note that we must do this even if we read the raw_target_classpath_file from the artifact
# cache. If we cache the target_classpath_file we won't know how to create the hardlinks.
with IvyUtils._hardlink_map_lock:
# A common dir for hardlinks into the ivy2 cache. This ensures that paths to jars
# in artifact-cached analysis files are consistent across systems.
# Note that we have one global, well-known hardlink dir, again so that paths are
# consistent across builds.
hardlink_map = cls._hardlink_cachepath(ivy_repository_cache_dir,
ivy_cache_classpath_filename,
hardlink_dir,
hardlink_classpath_filename)
classpath = cls._load_classpath_from_cachepath(hardlink_classpath_filename)
return classpath, hardlink_map
@classmethod
def _hardlink_cachepath(cls, ivy_repository_cache_dir, inpath, hardlink_dir, outpath):
"""hardlinks all paths listed in inpath that are under ivy_repository_cache_dir into hardlink_dir.
If there is an existing hardlink for a file under inpath, it is used rather than creating
a new hardlink. Preserves all other paths. Writes the resulting paths to outpath.
Returns a map of path -> hardlink to that path.
"""
safe_mkdir(hardlink_dir)
# The ivy_repository_cache_dir might itself be a hardlink. In this case, ivy may return paths that
# reference the realpath of the .jar file after it is resolved in the cache dir. To handle
# this case, add both the hardlink'ed path and the realpath to the jar to the hardlink map.
real_ivy_cache_dir = os.path.realpath(ivy_repository_cache_dir)
hardlink_map = OrderedDict()
inpaths = cls._load_classpath_from_cachepath(inpath)
paths = OrderedSet([os.path.realpath(path) for path in inpaths])
for path in paths:
if path.startswith(real_ivy_cache_dir):
hardlink_map[path] = os.path.join(hardlink_dir, os.path.relpath(path, real_ivy_cache_dir))
else:
# This path is outside the cache. We won't hardlink it.
hardlink_map[path] = path
# Create hardlinks for paths in the ivy cache dir.
for path, hardlink in six.iteritems(hardlink_map):
if path == hardlink:
# Skip paths that aren't going to be hardlinked.
continue
safe_mkdir(os.path.dirname(hardlink))
safe_hardlink_or_copy(path, hardlink)
# (re)create the classpath with all of the paths
with safe_open(outpath, 'w') as outfile:
outfile.write(':'.join(OrderedSet(hardlink_map.values())))
return dict(hardlink_map)
@classmethod
def xml_report_path(cls, resolution_cache_dir, resolve_hash_name, conf):
"""The path to the xml report ivy creates after a retrieve.
:API: public
:param string cache_dir: The path of the ivy cache dir used for resolves.
:param string resolve_hash_name: Hash from the Cache key from the VersionedTargetSet used for
resolution.
:param string conf: The ivy conf name (e.g. "default").
:returns: The report path.
:rtype: string
"""
return os.path.join(resolution_cache_dir, '{}-{}-{}.xml'.format(IvyUtils.INTERNAL_ORG_NAME,
resolve_hash_name, conf))
@classmethod
def parse_xml_report(cls, conf, path):
"""Parse the ivy xml report corresponding to the name passed to ivy.
:API: public
:param string conf: the ivy conf name (e.g. "default")
:param string path: The path to the ivy report file.
:returns: The info in the xml report.
:rtype: :class:`IvyInfo`
:raises: :class:`IvyResolveMappingError` if no report exists.
"""
if not os.path.exists(path):
raise cls.IvyResolveReportError('Missing expected ivy output file {}'.format(path))
logger.debug("Parsing ivy report {}".format(path))
ret = IvyInfo(conf)
etree = ET.parse(path)
doc = etree.getroot()
for module in doc.findall('dependencies/module'):
org = module.get('organisation')
name = module.get('name')
for revision in module.findall('revision'):
rev = revision.get('name')
callers = []
for caller in revision.findall('caller'):
callers.append(IvyModuleRef(caller.get('organisation'),
caller.get('name'),
caller.get('callerrev')))
for artifact in revision.findall('artifacts/artifact'):
classifier = artifact.get('extra-classifier')
ext = artifact.get('ext')
ivy_module_ref = IvyModuleRef(org=org, name=name, rev=rev,
classifier=classifier, ext=ext)
artifact_cache_path = artifact.get('location')
ivy_module = IvyModule(ivy_module_ref, artifact_cache_path, tuple(callers))
ret.add_module(ivy_module)
return ret
@classmethod
def generate_ivy(cls, targets, jars, excludes, ivyxml, confs, resolve_hash_name=None,
pinned_artifacts=None, jar_dep_manager=None):
if not resolve_hash_name:
resolve_hash_name = Target.maybe_readable_identify(targets)
return cls._generate_resolve_ivy(jars, excludes, ivyxml, confs, resolve_hash_name, pinned_artifacts,
jar_dep_manager)
@classmethod
def _generate_resolve_ivy(cls, jars, excludes, ivyxml, confs, resolve_hash_name, pinned_artifacts=None,
jar_dep_manager=None):
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
extra_configurations = [conf for conf in confs if conf and conf != 'default']
jars_by_key = OrderedDict()
for jar in jars:
jars = jars_by_key.setdefault((jar.org, jar.name), [])
jars.append(jar)
manager = jar_dep_manager or JarDependencyManagement.global_instance()
artifact_set = PinnedJarArtifactSet(pinned_artifacts) # Copy, because we're modifying it.
for jars in jars_by_key.values():
for i, dep in enumerate(jars):
direct_coord = M2Coordinate.create(dep)
managed_coord = artifact_set[direct_coord]
if direct_coord.rev != managed_coord.rev:
# It may be necessary to actually change the version number of the jar we want to resolve
# here, because overrides do not apply directly (they are exclusively transitive). This is
# actually a good thing, because it gives us more control over what happens.
coord = manager.resolve_version_conflict(managed_coord, direct_coord, force=dep.force)
jars[i] = dep.copy(rev=coord.rev)
elif dep.force:
# If this dependency is marked as 'force' and there is no version conflict, use the normal
# pants behavior for 'force'.
artifact_set.put(direct_coord)
dependencies = [cls._generate_jar_template(jars) for jars in jars_by_key.values()]
# As it turns out force is not transitive - it only works for dependencies pants knows about
# directly (declared in BUILD files - present in generated ivy.xml). The user-level ivy docs
# don't make this clear [1], but the source code docs do (see isForce docs) [2]. I was able to
# edit the generated ivy.xml and use the override feature [3] though and that does work
# transitively as you'd hope.
#
# [1] http://ant.apache.org/ivy/history/2.3.0/settings/conflict-managers.html
# [2] https://svn.apache.org/repos/asf/ant/ivy/core/branches/2.3.0/
# src/java/org/apache/ivy/core/module/descriptor/DependencyDescriptor.java
# [3] http://ant.apache.org/ivy/history/2.3.0/ivyfile/override.html
overrides = [cls._generate_override_template(_coord) for _coord in artifact_set]
excludes = [cls._generate_exclude_template(exclude) for exclude in excludes]
template_data = TemplateData(
org=org,
module=name,
extra_configurations=extra_configurations,
dependencies=dependencies,
excludes=excludes,
overrides=overrides)
template_relpath = os.path.join('templates', 'ivy_utils', 'ivy.xml.mustache')
cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
@classmethod
def generate_fetch_ivy(cls, jars, ivyxml, confs, resolve_hash_name):
"""Generates an ivy xml with all jars marked as intransitive using the all conflict manager."""
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
extra_configurations = [conf for conf in confs if conf and conf != 'default']
# Use org name _and_ rev so that we can have dependencies with different versions. This will
# allow for batching fetching if we want to do that.
jars_by_key = OrderedDict()
for jar in jars:
jars_by_key.setdefault((jar.org, jar.name, jar.rev), []).append(jar)
dependencies = [cls._generate_fetch_jar_template(_jars) for _jars in jars_by_key.values()]
template_data = TemplateData(org=org,
module=name,
extra_configurations=extra_configurations,
dependencies=dependencies)
template_relpath = os.path.join('templates', 'ivy_utils', 'ivy_fetch.xml.mustache')
cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
@classmethod
def _write_ivy_xml_file(cls, ivyxml, template_data, template_relpath):
template_text = pkgutil.get_data(__name__, template_relpath).decode('utf-8')
generator = Generator(template_text, lib=template_data)
with safe_open(ivyxml, 'w') as output:
generator.write(output)
@classmethod
def calculate_classpath(cls, targets):
"""Creates a consistent classpath and list of excludes for the passed targets.
It also modifies the JarDependency objects' excludes to contain all the jars excluded by
provides.
:param iterable targets: List of targets to collect JarDependencies and excludes from.
:returns: A pair of a list of JarDependencies, and a set of excludes to apply globally.
"""
jars = OrderedDict()
global_excludes = set()
provide_excludes = set()
targets_processed = set()
# Support the ivy force concept when we sanely can for internal dep conflicts.
# TODO(John Sirois): Consider supporting / implementing the configured ivy revision picking
# strategy generally.
def add_jar(jar):
# TODO(John Sirois): Maven allows for depending on an artifact at one rev and one of its
# attachments (classified artifacts) at another. Ivy does not, allow this, the dependency
# can carry only 1 rev and that hosts multiple artifacts for that rev. This conflict
# resolution happens at the classifier level, allowing skew in a
# multi-artifact/multi-classifier dependency. We only find out about the skew later in
# `_generate_jar_template` below which will blow up with a conflict. Move this logic closer
# together to get a more clear validate, then emit ivy.xml then resolve flow instead of the
# spread-out validations happening here.
# See: https://github.com/pantsbuild/pants/issues/2239
coordinate = (jar.org, jar.name, jar.classifier)
existing = jars.get(coordinate)
jars[coordinate] = jar if not existing else cls._resolve_conflict(existing=existing,
proposed=jar)
def collect_jars(target):
if isinstance(target, JarLibrary):
for jar in target.jar_dependencies:
add_jar(jar)
def collect_excludes(target):
target_excludes = target.payload.get_field_value('excludes')
if target_excludes:
global_excludes.update(target_excludes)
def collect_provide_excludes(target):
if not (isinstance(target, ExportableJvmLibrary) and target.provides):
return
logger.debug('Automatically excluding jar {}.{}, which is provided by {}'.format(
target.provides.org, target.provides.name, target))
provide_excludes.add(Exclude(org=target.provides.org, name=target.provides.name))
def collect_elements(target):
targets_processed.add(target)
collect_jars(target)
collect_excludes(target)
collect_provide_excludes(target)
for target in targets:
target.walk(collect_elements, predicate=lambda target: target not in targets_processed)
# If a source dep is exported (ie, has a provides clause), it should always override
# remote/binary versions of itself, ie "round trip" dependencies.
# TODO: Move back to applying provides excludes as target-level excludes when they are no
# longer global.
if provide_excludes:
additional_excludes = tuple(provide_excludes)
new_jars = OrderedDict()
for coordinate, jar in jars.items():
new_jars[coordinate] = jar.copy(excludes=jar.excludes + additional_excludes)
jars = new_jars
return list(jars.values()), global_excludes
@classmethod
def _resolve_conflict(cls, existing, proposed):
if existing.rev is None:
return proposed
if proposed.rev is None:
return existing
if proposed == existing:
if proposed.force:
return proposed
return existing
elif existing.force and proposed.force:
raise cls.IvyResolveConflictingDepsError('Cannot force {}#{};{} to both rev {} and {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
elif existing.force:
logger.debug('Ignoring rev {} for {}#{};{} already forced to {}'.format(
proposed.rev, proposed.org, proposed.name, proposed.classifier or '', existing.rev
))
return existing
elif proposed.force:
logger.debug('Forcing {}#{};{} from {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
return proposed
else:
if Revision.lenient(proposed.rev) > Revision.lenient(existing.rev):
logger.debug('Upgrading {}#{};{} from rev {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev,
))
return proposed
else:
return existing
@classmethod
def _generate_jar_template(cls, jars):
global_dep_attributes = set(Dependency(org=jar.org,
name=jar.name,
rev=jar.rev,
mutable=jar.mutable,
force=jar.force,
transitive=jar.transitive)
for jar in jars)
if len(global_dep_attributes) != 1:
# TODO: Need to provide information about where these came from - could be
# far-flung JarLibrary targets. The jars here were collected from targets via
# `calculate_classpath` above so executing this step there instead may make more
# sense.
conflicting_dependencies = sorted(str(g) for g in global_dep_attributes)
raise cls.IvyResolveConflictingDepsError('Found conflicting dependencies:\n\t{}'
.format('\n\t'.join(conflicting_dependencies)))
jar_attributes = global_dep_attributes.pop()
excludes = set()
for jar in jars:
excludes.update(jar.excludes)
any_have_url = False
artifacts = OrderedDict()
for jar in jars:
ext = jar.ext
url = jar.get_url()
if url:
any_have_url = True
classifier = jar.classifier
artifact = Artifact(name=jar.name,
type_=ext or 'jar',
ext=ext,
url=url,
classifier=classifier)
artifacts[(ext, url, classifier)] = artifact
template = TemplateData(
org=jar_attributes.org,
module=jar_attributes.name,
version=jar_attributes.rev,
mutable=jar_attributes.mutable,
force=jar_attributes.force,
transitive=jar_attributes.transitive,
artifacts=list(artifacts.values()),
any_have_url=any_have_url,
excludes=[cls._generate_exclude_template(exclude) for exclude in excludes])
return template
@classmethod
def _generate_fetch_jar_template(cls, jars):
global_dep_attributes = set(Dependency(org=jar.org,
name=jar.name,
rev=jar.rev,
transitive=False,
mutable=jar.mutable,
force=True)
for jar in jars)
if len(global_dep_attributes) != 1:
# If we batch fetches and assume conflict manager all, we could ignore these.
# Leaving this here for now.
conflicting_dependencies = sorted(str(g) for g in global_dep_attributes)
raise cls.IvyResolveConflictingDepsError('Found conflicting dependencies:\n\t{}'
.format('\n\t'.join(conflicting_dependencies)))
jar_attributes = global_dep_attributes.pop()
any_have_url = False
artifacts = OrderedDict()
for jar in jars:
ext = jar.ext
url = jar.get_url()
if url:
any_have_url = True
classifier = jar.classifier
artifact = Artifact(name=jar.name,
type_=ext or 'jar',
ext=ext,
url=url,
classifier=classifier)
artifacts[(ext, url, classifier)] = artifact
template = TemplateData(
org=jar_attributes.org,
module=jar_attributes.name,
version=jar_attributes.rev,
mutable=jar_attributes.mutable,
artifacts=list(artifacts.values()),
any_have_url=any_have_url,
excludes=[])
return template
| apache-2.0 | -425,431,684,784,504,900 | 40.837684 | 111 | 0.651765 | false |
patochectp/navitia | source/tyr/migrations/versions/3e56c7e0a4a4_create_billing_plan_table.py | 1 | 2152 | """Create billing_plan table
Revision ID: 3e56c7e0a4a4
Revises: 3aaddd5707bd
Create Date: 2015-11-05 13:30:32.460413
"""
revision = '3e56c7e0a4a4'
down_revision = '3aaddd5707bd'
from alembic import op, context
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.types import Enum
def upgrade():
op.create_table(
'billing_plan',
sa.Column('id', sa.Integer(), primary_key=True, nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('max_request_count', sa.Integer(), nullable=True),
sa.Column('max_object_count', sa.Integer(), nullable=True),
sa.Column('default', sa.Boolean(), nullable=False),
sa.Column('end_point_id', sa.Integer(), nullable=False, server_default='1'),
)
op.create_foreign_key("fk_billing_plan_end_point", "billing_plan", "end_point", ["end_point_id"], ["id"])
op.execute(
"INSERT INTO end_point (name, \"default\") SELECT 'sncf',false WHERE NOT EXISTS (SELECT id FROM end_point WHERE name = 'sncf');"
)
op.execute(
"INSERT INTO billing_plan (name, max_request_count, max_object_count, \"default\", end_point_id) VALUES ('nav_dev',3000,NULL,false,1),('nav_ent',NULL,NULL,false,1),('nav_ctp',NULL,NULL,true,1),('sncf_dev',3000,60000,true,(SELECT id FROM end_point WHERE name='sncf')),('sncf_ent',NULL,NULL,false,(SELECT id FROM end_point WHERE name='sncf'));"
)
op.add_column(u'user', sa.Column('billing_plan_id', sa.Integer(), nullable=True))
op.create_foreign_key("fk_user_billing_plan", "user", "billing_plan", ["billing_plan_id"], ["id"])
op.execute(
"UPDATE public.user SET billing_plan_id = (SELECT b.id FROM billing_plan b WHERE b.default AND end_point_id=1) WHERE end_point_id=1;"
)
op.execute(
"UPDATE public.user u SET billing_plan_id = (SELECT b.id FROM billing_plan b INNER JOIN end_point ep ON ep.id = b.end_point_id WHERE b.default AND ep.name='sncf') FROM end_point ep WHERE ep.id = u.end_point_id AND ep.name='sncf';"
)
def downgrade():
op.drop_column('user', 'billing_plan_id')
op.drop_table('billing_plan')
| agpl-3.0 | 2,663,967,288,322,674,700 | 40.384615 | 350 | 0.664963 | false |
vincegogh/ByteOS | util/syscall_gen.py | 1 | 1793 | import sys as _sys
if len(_sys.argv) != 2:
print("Incorrect number of arguments")
exit(-1)
elif ["c", "h", "asm"].index(_sys.argv[1]) == -1:
print("Invalid filetype %s" % _sys.argv[1])
exit(-1)
print("\t\033[32;1mGenerating\033[0m include/gen/syscall_gen.%s" % _sys.argv[1])
syscall_list = []
def syscall(name, args="void"):
syscall_list.append({ "name": name, "args": args })
syscall("write", "char c")
syscall("fork", "uint64_t flags, struct callee_regs *regs, virtaddr_t return_addr")
syscall("exit", "int code")
syscall("sched_yield")
decls = [
"#include \"mm_types.h\"",
"#include \"proc.h\"",
"",
]
defs = [
"#define ENOSYS 0xFFFFFFFFFFFFFFFFLL",
"#define NUM_SYSCALLS %d" % len(syscall_list)
]
table = [
"syscall_t syscall_table[NUM_SYSCALLS] = {"
]
asm_defs = [
"%define ENOSYS 0xFFFFFFFFFFFFFFFF",
"%%define NUM_SYSCALLS %d" % len(syscall_list)
]
for i in range(0, len(syscall_list)):
sys = syscall_list[i]
defs.append("#define SYSCALL_%s %d" % (sys["name"].upper(), i))
table.append("\t[SYSCALL_%s] = (syscall_t)syscall_%s," % (sys["name"].upper(), sys["name"]))
decls.append("int64_t syscall_%s(%s);" % (sys["name"], sys["args"]))
asm_defs.append("%%define SYSCALL_%s %d" % (sys["name"].upper(), i))
table.append("};")
defs = "\n".join(defs)
table = "\n".join(table)
decls = "\n".join(decls)
asm_defs = "\n".join(asm_defs)
h_out = """#pragma once
%s
%s
extern syscall_t syscall_table[NUM_SYSCALLS];
""" % (defs, decls)
c_out = """%s
""" % table
asm_out = """%s
""" % asm_defs
out_data = { "c": c_out, "h": h_out, "asm": asm_out }
prefix = "include/gen/syscall_gen"
path = prefix + "." + _sys.argv[1]
target_file = open(path, "w")
target_file.write(out_data[_sys.argv[1]])
target_file.close()
| mit | 3,375,265,367,951,926,000 | 22.906667 | 96 | 0.596765 | false |
Ezhil-Language-Foundation/open-tamil | tests/transliterate_tests.py | 1 | 9299 | # -*- coding: utf-8 -*-
# (C) 2013-2018,2020 Muthiah Annamalai
#
# This file is part of 'open-tamil' package tests
#
# setup the paths
import unittest
from opentamiltests import *
from tamil.utf8 import get_letters
from transliterate import azhagi, jaffna, combinational, UOM, ISO, itrans, algorithm
class ReverseTransliterationTests(unittest.TestCase):
def test_tamil2en_1(self):
tamil_str = u"வணக்கம்"
azhagi_table = azhagi.Transliteration.table
eng_str = algorithm.Tamil2English.transliterate(azhagi_table, tamil_str)
self.assertEqual(eng_str, u"vaNacKam")
tamil_str = u"அன்னம்"
azhagi_table = azhagi.Transliteration.table
eng_str = algorithm.Tamil2English.transliterate(azhagi_table, tamil_str)
self.assertEqual(eng_str, u"annHam")
tamil_str = u"இறையோன்"
exp_eng_str = "iRaiyOn"
eng_str = algorithm.Tamil2English.transliterate(azhagi_table, tamil_str)
self.assertEqual(eng_str, exp_eng_str)
class ISOTest(unittest.TestCase):
def test_tables(self):
self.assertEqual(len(ISO.ReverseTransliteration.table), len(ISO.Transliteration.table))
def test_ISO(self):
ISO_table = ISO.ReverseTransliteration.table
expected = 'cāmi. citamparaṉār nūṟ kaḷañciyam'
tamil_str = "சாமி. சிதம்பரனார் நூற் களஞ்சியம்"
eng_str = algorithm.Direct.transliterate(ISO_table, tamil_str)
self.assertEqual(expected, eng_str)
def test_issue_237(self):
ISO_table = ISO.ReverseTransliteration.table
expected = 'pāvēntam'
tamil_str = "பாவேந்தம்"
eng_str = algorithm.Direct.transliterate(ISO_table, tamil_str)
self.assertEqual(expected, eng_str)
def test_issue_239(self):
ISO_table = ISO.ReverseTransliteration.table
expected = 'tiyākarājaṉ'
tamil_str = "தியாகராஜன்"
eng_str = algorithm.Direct.transliterate(ISO_table, tamil_str)
self.assertEqual(expected, eng_str)
class GreedyTests(unittest.TestCase):
@unittest.skip("incorrect")
def test_ISO(self):
ISO_table = algorithm.reverse_transliteration_table(ISO.Transliteration.table)
expected = 'cāmi. citamparaṉār nūṟ kaḷañciyam'
tamil_str = "சாமி. சிதம்பரனார் நூற் களஞ்சியம்"
eng_words = []
for tamil_word in tamil_str.split(' '):
_, eng_str = algorithm.Greedy.transliterate(ISO_table, tamil_word, full_search=True)
print(eng_str.options)
if len(eng_str.options) < 1: continue
eng_str.options = list(eng_str.options)
eng_words.append(eng_str.options[0])
eng_fullstr = ' '.join(eng_words)
self.assertEqual(expected, eng_fullstr)
def test_UOM(self):
# University of Madras Lexicon style transliteration standard
tamil_word = u"வணக்கம்"
for eng_string in [u"vṇikkim"]:
top_match, greedy = algorithm.Greedy.transliterate(
UOM.Transliteration.table, eng_string
)
# import pprint
# pprint.pprint(greedy.options)
self.assertTrue(tamil_word in greedy.options)
def test_vanakkam(self):
tamil_word = u"வணக்கம்"
for eng_string in ["vaNakkam", "vanakkam"]:
top_match, greedy = algorithm.Greedy.transliterate(
jaffna.Transliteration.table, eng_string
)
self.assertTrue(tamil_word in greedy.options)
class Yazhpanam(unittest.TestCase):
def test_vandemataram(self):
tamil_words = u"வந்தே மாதரம்"
eng_string = u"vanthE mAtharam"
tamil_tx = algorithm.Iterative.transliterate(
jaffna.Transliteration.table, eng_string
)
if LINUX:
print(
"]" + tamil_tx + "[",
len(tamil_words),
len(tamil_tx),
type(tamil_tx),
type(tamil_words),
)
if LINUX:
print("]" + tamil_words + "[")
self.assertTrue(tamil_words == tamil_tx)
def test_combinational(self):
tamil_words = u"வந்தே மாதரம்"
eng_string = u"van-thee maatharam"
tamil_tx = algorithm.Iterative.transliterate(
combinational.Transliteration.table, eng_string
)
if LINUX:
print(
"]" + tamil_tx + "[",
len(tamil_words),
len(tamil_tx),
type(tamil_tx),
type(tamil_words),
)
if LINUX:
print("]" + tamil_words + "[", len(tamil_tx), len(tamil_words))
self.assertTrue(tamil_words.find(tamil_tx) >= 0)
def test_azhagi_spec(self):
# test for
tamil_tx = {}
correct_tx = {
u"ke": u"கெ",
u"khae": u"கே",
u"cai": u"கை",
u"koh": u"கொ",
u"kho": u"கோ",
}
for eng_string in [u"ke", u"khae", u"cai", u"koh", u"kho"]:
tamil_tx[eng_string] = algorithm.Iterative.transliterate(
azhagi.Transliteration.table, eng_string
)
if LINUX:
print(tamil_tx[eng_string], " => ", eng_string)
self.assertTrue(tamil_tx[eng_string], eng_string)
def test_azhagi(self):
## challenge use a probabilistic model on Tamil language to score the next letter,
## instead of using the longest/earliest match
## http://www.mazhalaigal.com/tamil/learn/keys.php
codes = {
"neenga": u"நீங்க",
"andam": u"அண்டம்",
"nandri": u"நன்றி",
"katru": u"கற்று",
"viswam": u"விஸ்வம்",
"namaskaaram": u"நமஸ்காரம்",
"sreedhar": u"ஸ்ரீதர்",
"manju": u"மஞ்சு",
"gnaayam": u"ஞாயம்",
"poi": u"பொய்",
"kaai": u"காய்",
"aGnGnaanam": u"அஞ்ஞானம்",
"mei": u"மெய்",
"nanghu": u"நன்கு",
"palancaL": u"பலன்கள்",
"payanKaL": "பயன்கள்",
"avanThaan": u"அவன்தான்",
"leoni": u"லியோனி",
"paeTrik": u"பேட்ரிக்",
"peTroal": u"பெட்ரோல்",
"coapanHaegan": u"கோபன்ஹேகன்",
"bandham": u"பந்தம்",
"saantham": u"சாந்தம்",
"kaeLvi": u"கேள்வி",
"koavil": u"கோவில்",
"nhagar": u"நகர்",
"maanhagaram": u"மாநகரம்",
"senhnheer": u"செந்நீர்",
}
tamil_words = u""
for eng_string, tamil_words in codes.items():
tamil_tx = algorithm.Iterative.transliterate(
azhagi.Transliteration.table, eng_string
)
if LINUX:
print(
"]" + tamil_tx + "[",
len(tamil_words),
len(tamil_tx),
"]" + tamil_words + "[",
)
# self.assertTrue( tamil_words == tamil_tx ) #we are almost there but not yet
def test_devotional(self):
for k, v in {
u"thiruvaachakam": u"திருவாசகம்",
u"mANikka vAsagar": u"மாணிக்க வாசகர்",
}.items():
tamil_tx = algorithm.Iterative.transliterate(
azhagi.Transliteration.table, k
)
if tamil_tx != v:
raise Exception(
u"Transliteration changed\n Expected %s, but got %s for string input %\n"
% (v, tamil_tx, k)
)
return
class DubashTest(unittest.TestCase):
def test_multi_lang(self):
test_str = u"அம்மா ammA"
expected_str = u"அம்மா அம்மா"
tamil_tx = algorithm.BlindIterative.transliterate(
azhagi.Transliteration.table, test_str
)
self.assertEqual(tamil_tx, expected_str)
return
def test_multi_lang2(self):
test_str = u"அம்மா ammA"
expected_str = u"அம்மா அம்மா"
tamil_tx = algorithm.Iterative.transliterate(
azhagi.Transliteration.table, test_str
)
self.assertEqual(tamil_tx, expected_str)
return
class ITRANSTest(unittest.TestCase):
def test_vanakkam_itrans(self):
tamil_word = "வணக்கம்"
for eng_string in ["vaNakkam"]:
tamil_tx = algorithm.Iterative.transliterate(
itrans.Transliteration.table, eng_string
)
self.assertEqual(tamil_word, tamil_tx)
if __name__ == "__main__":
unittest.main()
| mit | 1,226,422,652,849,312,500 | 33.493927 | 96 | 0.540728 | false |
Lantero/vcenter-driver | setup.py | 1 | 1071 | from setuptools import setup, find_packages
setup(
version='4.2.0',
name='vcdriver',
description='A vcenter driver based on pyvmomi, fabric and pywinrm',
url='https://github.com/Lantero/vcdriver',
author='Carlos Ruiz Lantero',
author_email='[email protected]',
license='MIT',
install_requires=['colorama', 'Fabric3', 'pyvmomi', 'pywinrm', 'six'],
packages=find_packages(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
],
)
| mit | -2,964,818,212,189,995,000 | 35.931034 | 74 | 0.615313 | false |
edeno/Jadhav-2016-Data-Analysis | tests/spectral/test_transforms.py | 1 | 11984 | import numpy as np
from pytest import mark
from scipy.signal import correlate
from nitime.algorithms.spectral import dpss_windows as nitime_dpss_windows
from src.spectral.transforms import (Multitaper, _add_axes,
_auto_correlation, _fix_taper_sign,
_get_low_bias_tapers,
_get_taper_eigenvalues,
_multitaper_fft, _sliding_window,
dpss_windows)
def test__add_axes():
# Add dimension if no trials
n_time_samples, n_signals = (2, 3)
test_data = np.ones((n_time_samples, n_signals))
expected_shape = (n_time_samples, 1, n_signals)
assert np.allclose(_add_axes(test_data).shape, expected_shape)
# Add two dimensions if no trials and signals
test_data = np.ones((n_time_samples,))
expected_shape = (n_time_samples, 1, 1)
assert np.allclose(_add_axes(test_data).shape, expected_shape)
# if there is a trial dimension, do nothing
n_trials = 10
test_data = np.ones((n_time_samples, n_trials, n_signals))
expected_shape = (n_time_samples, n_trials, n_signals)
assert np.allclose(_add_axes(test_data).shape, expected_shape)
@mark.parametrize(
'test_array, window_size, step_size, axis, expected_array',
[(np.arange(1, 6), 3, 1, -1, np.array([[1, 2, 3],
[2, 3, 4],
[3, 4, 5]])),
(np.arange(1, 6), 3, 2, -1, np.array([[1, 2, 3],
[3, 4, 5]])),
(np.arange(0, 6).reshape((2, 3)), 2, 1, 0, np.array([[[0, 3],
[1, 4],
[2, 5]]]))
])
def test__sliding_window(
test_array, window_size, step_size, axis, expected_array):
assert np.allclose(
_sliding_window(
test_array, window_size=window_size, step_size=step_size,
axis=axis),
expected_array)
@mark.parametrize(
'time_halfbandwidth_product, expected_n_tapers',
[(3, 5), (1, 1), (1.75, 2)])
def test_n_tapers(time_halfbandwidth_product, expected_n_tapers):
n_time_samples, n_trials, n_signals = 100, 10, 2
time_series = np.zeros((n_time_samples, n_trials, n_signals))
m = Multitaper(
time_series=time_series,
time_halfbandwidth_product=time_halfbandwidth_product)
assert m.n_tapers == expected_n_tapers
@mark.parametrize(
'sampling_frequency, time_window_duration, expected_duration',
[(1000, None, 0.1), (2000, None, 0.05), (1000, 0.1, 0.1)])
def test_time_window_duration(sampling_frequency, time_window_duration,
expected_duration):
n_time_samples, n_trials, n_signals = 100, 10, 2
time_series = np.zeros((n_time_samples, n_trials, n_signals))
m = Multitaper(
time_series=time_series,
sampling_frequency=sampling_frequency,
time_window_duration=time_window_duration)
assert m.time_window_duration == expected_duration
@mark.parametrize(
'sampling_frequency, time_window_step, expected_step',
[(1000, None, 0.1), (2000, None, 0.05), (1000, 0.1, 0.1)])
def test_time_window_step(
sampling_frequency, time_window_step, expected_step):
n_time_samples, n_trials, n_signals = 100, 10, 2
time_series = np.zeros((n_time_samples, n_trials, n_signals))
m = Multitaper(
time_series=time_series,
sampling_frequency=sampling_frequency,
time_window_step=time_window_step)
assert m.time_window_step == expected_step
@mark.parametrize(
('sampling_frequency, time_window_duration,'
'expected_n_time_samples_per_window'),
[(1000, None, 100), (1000, 0.1, 100), (2000, 0.025, 50)])
def test_n_time_samples(
sampling_frequency, time_window_duration,
expected_n_time_samples_per_window):
n_time_samples, n_trials, n_signals = 100, 10, 2
time_series = np.zeros((n_time_samples, n_trials, n_signals))
m = Multitaper(
time_series=time_series,
sampling_frequency=sampling_frequency,
time_window_duration=time_window_duration)
assert (m.n_time_samples_per_window ==
expected_n_time_samples_per_window)
@mark.parametrize(
('sampling_frequency, time_window_duration, n_fft_samples,'
'expected_n_fft_samples'),
[(1000, None, 5, 5), (1000, 0.1, None, 100)])
def test_n_fft_samples(
sampling_frequency, time_window_duration, n_fft_samples,
expected_n_fft_samples):
n_time_samples, n_trials, n_signals = 100, 10, 2
time_series = np.zeros((n_time_samples, n_trials, n_signals))
m = Multitaper(
time_series=time_series,
sampling_frequency=sampling_frequency,
time_window_duration=time_window_duration,
n_fft_samples=n_fft_samples)
assert m.n_fft_samples == expected_n_fft_samples
def test_frequencies():
n_time_samples, n_trials, n_signals = 100, 10, 2
time_series = np.zeros((n_time_samples, n_trials, n_signals))
n_fft_samples = 4
sampling_frequency = 1000
m = Multitaper(
time_series=time_series,
sampling_frequency=sampling_frequency,
n_fft_samples=n_fft_samples)
expected_frequencies = np.array([0, 250, -500, -250])
assert np.allclose(m.frequencies, expected_frequencies)
def test_n_signals():
n_time_samples, n_trials, n_signals = 100, 10, 2
time_series = np.zeros((n_time_samples, n_trials, n_signals))
m = Multitaper(time_series=time_series)
assert m.n_signals == n_signals
def test_n_trials():
n_time_samples, n_trials, n_signals = 100, 10, 2
time_series = np.zeros((n_time_samples, n_trials, n_signals))
m = Multitaper(time_series=time_series)
assert m.n_trials == n_trials
time_series = np.zeros((n_time_samples, n_signals))
m = Multitaper(time_series=time_series)
assert m.n_trials == 1
@mark.parametrize(
('time_halfbandwidth_product, time_window_duration, '
'expected_frequency_resolution'),
[(3, .10, 30), (1, 0.02, 50), (5, 1, 5)])
def test_frequency_resolution(
time_halfbandwidth_product, time_window_duration,
expected_frequency_resolution):
n_time_samples, n_trials, n_signals = 100, 10, 2
time_series = np.zeros((n_time_samples, n_trials, n_signals))
m = Multitaper(
time_series=time_series,
time_halfbandwidth_product=time_halfbandwidth_product,
time_window_duration=time_window_duration)
assert m.frequency_resolution == expected_frequency_resolution
@mark.parametrize(
('time_window_step, n_time_samples_per_step, '
'expected_n_samples_per_time_step'),
[(None, None, 100), (0.001, None, 1), (0.002, None, 2),
(None, 10, 10)])
def test_n_samples_per_time_step(
time_window_step, n_time_samples_per_step,
expected_n_samples_per_time_step):
n_time_samples, n_trials, n_signals = 100, 10, 2
time_series = np.zeros((n_time_samples, n_trials, n_signals))
m = Multitaper(
time_window_duration=0.10,
n_time_samples_per_step=n_time_samples_per_step,
time_series=time_series,
time_window_step=time_window_step)
assert m.n_time_samples_per_step == expected_n_samples_per_time_step
@mark.parametrize('time_window_duration', [0.1, 0.2, 2.4, 0.16])
def test_time(time_window_duration):
sampling_frequency = 1500
start_time, end_time = -2.4, 2.4
n_trials, n_signals = 10, 2
n_time_samples = int(
(end_time - start_time) * sampling_frequency) + 1
time_series = np.zeros((n_time_samples, n_trials, n_signals))
expected_time = np.arange(start_time, end_time, time_window_duration)
if not np.allclose(expected_time[-1] + time_window_duration, end_time):
expected_time = expected_time[:-1]
m = Multitaper(
sampling_frequency=sampling_frequency,
time_series=time_series,
start_time=start_time,
time_window_duration=time_window_duration)
assert np.allclose(m.time, expected_time)
def test_tapers():
n_time_samples, n_trials, n_signals = 100, 10, 2
time_series = np.zeros((n_time_samples, n_trials, n_signals))
m = Multitaper(time_series, is_low_bias=False)
assert np.allclose(m.tapers.shape, (n_time_samples, m.n_tapers))
m = Multitaper(time_series, tapers=np.zeros((10, 3)))
assert np.allclose(m.tapers.shape, (10, 3))
@mark.parametrize(
'eigenvalues, expected_n_tapers',
[(np.array([0.95, 0.95, 0.95]), 3),
(np.array([0.95, 0.8, 0.95]), 2),
(np.array([0.8, 0.8, 0.8]), 1)])
def test__get_low_bias_tapers(eigenvalues, expected_n_tapers):
tapers = np.zeros((3, 100))
filtered_tapers, filtered_eigenvalues = _get_low_bias_tapers(
tapers, eigenvalues)
assert (filtered_tapers.shape[0] == filtered_eigenvalues.shape[0] ==
expected_n_tapers)
def test__fix_taper_sign():
n_time_samples, n_tapers = 100, 4
tapers = -3 * np.ones((n_tapers, n_time_samples))
tapers[1, :3] = -1 * np.arange(0, 3) # Begin with negative lobe
tapers[2, :] = 2
tapers[3, :3] = np.arange(0, 3) # Begin with positive lobe
fixed_tapers = _fix_taper_sign(tapers, n_time_samples)
assert np.all(fixed_tapers[::2, :].sum(axis=1) >= 0)
assert np.all(fixed_tapers[2, :] == 2)
assert np.all(fixed_tapers[1, :].sum() >= 0)
assert ~np.all(fixed_tapers[3, :].sum() >= 0)
@mark.parametrize(
'n_time_samples, time_halfbandwidth_product, n_tapers',
[(1000, 3, 5), (31, 6, 4), (31, 7, 4)])
def test_dpss_windows(
n_time_samples, time_halfbandwidth_product, n_tapers):
tapers, eigenvalues = dpss_windows(
n_time_samples, time_halfbandwidth_product, n_tapers,
is_low_bias=False)
nitime_tapers, nitime_eigenvalues = nitime_dpss_windows(
n_time_samples, time_halfbandwidth_product, n_tapers)
assert np.allclose(np.sum(tapers ** 2, axis=1), 1.0)
assert np.allclose(tapers, nitime_tapers)
assert np.allclose(eigenvalues, nitime_eigenvalues)
@mark.parametrize(
'n_time_samples, time_halfbandwidth_product, n_tapers',
[(31, 6, 4), (31, 7, 4), (31, 8, 4), (31, 8, 4.2)])
def test__get_taper_eigenvalues(
n_time_samples, time_halfbandwidth_product, n_tapers):
time_index = np.arange(n_time_samples, dtype='d')
half_bandwidth = float(time_halfbandwidth_product) / n_time_samples
nitime_tapers, _ = nitime_dpss_windows(
n_time_samples, time_halfbandwidth_product, n_tapers)
eigenvalues = _get_taper_eigenvalues(
nitime_tapers, half_bandwidth, time_index)
assert np.allclose(eigenvalues, 1.0)
def test__auto_correlation():
n_time_samples, n_tapers = 100, 3
test_data = np.random.rand(n_tapers, n_time_samples)
rxx = _auto_correlation(test_data)[:, :n_time_samples]
for taper_ind in np.arange(n_tapers):
expected_correlation = correlate(
test_data[taper_ind, :], test_data[taper_ind, :])[
n_time_samples - 1:]
assert np.allclose(rxx[taper_ind], expected_correlation)
def test__multitaper_fft():
n_windows, n_trials, n_time_samples, n_tapers, n_fft_samples = (
2, 10, 100, 3, 100)
sampling_frequency = 1000
time_series = np.ones((n_windows, n_trials, n_time_samples))
tapers = np.ones((n_time_samples, n_tapers))
fourier_coefficients = _multitaper_fft(
tapers, time_series, n_fft_samples, sampling_frequency)
assert np.allclose(
fourier_coefficients.shape,
(n_windows, n_trials, n_fft_samples, n_tapers))
def test_fft():
n_time_samples, n_trials, n_signals, n_windows = 100, 10, 2, 1
time_series = np.zeros((n_time_samples, n_trials, n_signals))
m = Multitaper(time_series=time_series)
assert np.allclose(
m.fft().shape,
(n_windows, n_trials, m.tapers.shape[1], m.n_fft_samples,
n_signals))
| gpl-3.0 | -1,963,308,179,923,673,600 | 37.658065 | 75 | 0.623248 | false |
ARodri/Lars | lars/mapper/io/http.py | 1 | 2559 | import requests
import functools
import json
from lars.mapper import Mapper
from lars.util import PeriodicTask
class HTTPRequest(Mapper):
valid_actions = ['GET','POST']
def loadConfigJSON(self,config):
self.base_url = config['base_url']
self.action = config['action']
self.output_key = config['output_key']
self.response_code_key = config['response_code_key']
self.timeout = config.get("timout",30)
if self.action not in self.valid_actions:
raise AttributeError, 'Invalid action: %s' % self.action
self.session = requests.Session()
self.http_func = None
if self.action == 'GET':
self.http_func = self.session.get
if self.action == 'POST':
self.http_func = self.session.post
ping = config.get("ping",30)
if ping > 0:
t = PeriodicTask(ping, self.arod_request,jitter=config.get("jitter",.25))
t.run()
self.arod_request()
self.provides = [self.output_key, self.response_code_key]
def arod_request(self):
self.session.options(self.base_url)
self.logger.debug("connection heartbeat for %s" % self.base_url)
def setContentToJSON(self):
self.session.headers.update({'Content-Type':'application/json'})
def getResponse(self, data=None, params=None):
try:
return self.http_func(self.base_url, data=data, params=params, timeout=self.timeout)
except requests.exceptions.Timeout, e:
self.logger.error("timeout exceeded on http request")
raise e
def makeRequestData(self,record):
raise NotImplementedError, 'makeRequest method on HTTPRequest has not been implemented'
def process(self, record):
raise NotImplementedError, 'process method on HTTPRequest object has not been implemented'
class JSONRequest(HTTPRequest):
def loadConfigJSON(self,config):
#super(JSONRequest, self).loadConfigJSON(config)
HTTPRequest.loadConfigJSON(self, config)
self.setContentToJSON()
def process(self,record):
j = self.makeRequestData(record)
resp = self.getResponse(data=json.dumps(j))
#resp_code = resp.code
resp_code = resp.status_code
if resp_code != 200:
self.logger.error("http_code: %s" % resp_code)
self.logger.error(resp.text)
#self.logger.error(resp.body)
if resp.status_code >= 400:
raise requests.exceptions.HTTPError("HTTP_CODE: %s; %s" % (resp_code, resp.text))
#raise requests.exceptions.HTTPError("HTTP_CODE: %s; %s" % (resp_code, resp.body))
record[self.response_code_key] = resp_code
record[self.output_key] = resp.json()
#record[self.output_key] = json.loads(resp.body)
return record
def makeRequestData(record):
return record
| gpl-2.0 | 2,684,987,604,786,061,000 | 29.831325 | 92 | 0.720203 | false |
eseom/glide | glide/process.py | 1 | 5210 | import os
import multiprocessing
import asyncore
import datetime
class Status(object):
"""process status enum"""
REDY, RUNN, RSTT, STNG, KLNG, STPD, EXTD = \
'READY', 'RUNNING', 'RESTARTING', \
'STOPPING', 'KILLING', 'STOPPED', 'EXITED'
class Process(asyncore.file_dispatcher):
"""main process object"""
class Message(object):
"""container class of the emitted messages from the target process"""
def __init__(self, process, message):
self.process = process
self.message = message
def __str__(self):
return '%s: %s' % (self.process.name, self.message)
def __init__(self,
name,
path,
max_nl,
bm,
try_restart=-1,
kill_duration_time=20,
):
self.status = Status.REDY # initial status READY
self.name = name
self.path = path
self.max_nl = max_nl # max name length
self.bm = bm # blast module
self.try_restart = try_restart
self.kill_duration_time = kill_duration_time
self.bi = 0 # blast index
self.restarted = 0
self.rpi = 0
self.wpi = 0
self.status = Status.REDY
self.start_time = None
def start(self):
if self.status not in (Status.REDY, Status.STPD, Status.EXTD):
return False, 'already operating'
self.rpi, self.wpi = os.pipe()
self.process = multiprocessing.Process(
target=self.__execute,
args=(self.path, self.rpi, self.wpi)
)
self.process.start()
self.pid = self.process.pid
# register the pipe's reader descriptor to asyncore
asyncore.file_dispatcher.__init__(self, self.rpi)
self.status = Status.RUNN
self.start_time = datetime.datetime.now()
self.elapsed_rule_time = None
return True, ''
def __execute(self, path, rpi, wpi):
pid = os.getpid()
# set the child process as a process group master itself
os.setpgid(pid, pid)
os.dup2(wpi, 1)
os.dup2(wpi, 2)
os.close(wpi)
os.close(rpi)
os.execv(path[0], path)
def handle_read(self):
data = []
try:
while True: # read data from the pipe's reader
d = self.recv(1)
if d == '\n':
break
data.append(d)
# blast to the registered blast module
self.bm(Process.Message(self, ''.join(data)), self.bi)
self.bi += 1
except OSError: # tried to read after the descriptor closed
pass
def writable(self):
"""trick: add timeout callback implementation"""
if self.elapsed_rule_time:
self.elapsed_time = datetime.datetime.now() - self.elapsed_rule_time
if self.elapsed_time > \
datetime.timedelta(seconds=self.kill_duration_time):
os.kill(self.pid, 9)
return False
def terminate(self):
try:
self.elapsed_rule_time = datetime.datetime.now()
self.process.terminate()
except OSError: # no such process id
pass
def stop(self):
if self.status != Status.RUNN:
return False, 'not running'
self.status = Status.STNG
self.terminate()
return True, ''
def restart(self):
if self.status != Status.RUNN:
return False, 'not running'
self.status = Status.RSTT
self.terminate()
return True, ''
def hangup(self):
if self.status != Status.RUNN:
return False, 'not running'
os.kill(self.proc.pid, 1)
return True, ''
def alarm(self):
if self.status != Status.RUNN:
return False, 'not running'
os.kill(self.proc.pid, 14)
return True, ''
def cleanup(self):
for descriptor in [self.rpi, self.wpi]:
try:
os.close(descriptor)
except:
pass
asyncore.file_dispatcher.close(self)
if ((self.try_restart == -1 or self.try_restart > self.restarted) and
self.status == Status.EXTD) or self.status == Status.RSTT:
self.restarted += 1
self.status = Status.REDY
self.start()
return self
else:
self.status = Status.STPD
return None
def handle_error(self):
nil, t, v, tbinfo = asyncore.compact_traceback()
print '---', nil, t, v, tbinfo
def __str__(self):
if self.status not in (Status.STPD, Status.REDY, Status.EXTD):
tmpl = '%-' + str(self.max_nl) + \
's %10s pid %5s, uptime %s sec'
return tmpl % (self.name,
self.status,
self.pid,
datetime.datetime.now() - self.start_time)
else:
tmpl = '%-' + str(self.max_nl) + 's %10s'
return tmpl % (self.name,
self.status,)
| mit | -5,170,071,940,740,597,000 | 29.828402 | 80 | 0.51881 | false |
rjschwei/azure-sdk-for-python | azure-mgmt-logic/azure/mgmt/logic/models/x12_validation_override.py | 1 | 3184 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class X12ValidationOverride(Model):
"""X12ValidationOverride.
:param message_id: The message id on which the validation settings has to
be applied.
:type message_id: str
:param validate_edi_types: The value indicating whether to validate EDI
types.
:type validate_edi_types: bool
:param validate_xsd_types: The value indicating whether to validate XSD
types.
:type validate_xsd_types: bool
:param allow_leading_and_trailing_spaces_and_zeroes: The value indicating
whether to allow leading and trailing spaces and zeroes.
:type allow_leading_and_trailing_spaces_and_zeroes: bool
:param validate_character_set: The value indicating whether to validate
character Set.
:type validate_character_set: bool
:param trim_leading_and_trailing_spaces_and_zeroes: The value indicating
whether to trim leading and trailing spaces and zeroes.
:type trim_leading_and_trailing_spaces_and_zeroes: bool
:param trailing_separator_policy: The trailing separator policy. Possible
values include: 'NotSpecified', 'NotAllowed', 'Optional', 'Mandatory'
:type trailing_separator_policy: str or :class:`TrailingSeparatorPolicy
<azure.mgmt.logic.models.TrailingSeparatorPolicy>`
"""
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
'validate_edi_types': {'key': 'validateEDITypes', 'type': 'bool'},
'validate_xsd_types': {'key': 'validateXSDTypes', 'type': 'bool'},
'allow_leading_and_trailing_spaces_and_zeroes': {'key': 'allowLeadingAndTrailingSpacesAndZeroes', 'type': 'bool'},
'validate_character_set': {'key': 'validateCharacterSet', 'type': 'bool'},
'trim_leading_and_trailing_spaces_and_zeroes': {'key': 'trimLeadingAndTrailingSpacesAndZeroes', 'type': 'bool'},
'trailing_separator_policy': {'key': 'trailingSeparatorPolicy', 'type': 'TrailingSeparatorPolicy'},
}
def __init__(self, message_id=None, validate_edi_types=None, validate_xsd_types=None, allow_leading_and_trailing_spaces_and_zeroes=None, validate_character_set=None, trim_leading_and_trailing_spaces_and_zeroes=None, trailing_separator_policy=None):
self.message_id = message_id
self.validate_edi_types = validate_edi_types
self.validate_xsd_types = validate_xsd_types
self.allow_leading_and_trailing_spaces_and_zeroes = allow_leading_and_trailing_spaces_and_zeroes
self.validate_character_set = validate_character_set
self.trim_leading_and_trailing_spaces_and_zeroes = trim_leading_and_trailing_spaces_and_zeroes
self.trailing_separator_policy = trailing_separator_policy
| mit | 9,220,224,516,508,743,000 | 52.966102 | 252 | 0.685616 | false |
frombeijingwithlove/dlcv_for_beginners | chap8/mxnet/train_lenet5.py | 1 | 1811 | import mxnet as mx
import logging
# data & preprocessing
data = mx.symbol.Variable('data')
# 1st conv
conv1 = mx.symbol.Convolution(data=data, kernel=(5, 5), num_filter=20)
pool1 = mx.symbol.Pooling(data=conv1, pool_type="max",
kernel=(2, 2), stride=(2, 2))
# 2nd conv
conv2 = mx.symbol.Convolution(data=pool1, kernel=(5, 5), num_filter=50)
pool2 = mx.symbol.Pooling(data=conv2, pool_type="max",
kernel=(2, 2), stride=(2, 2))
# 1st fc & relu
flatten = mx.symbol.Flatten(data=pool2)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
relu1 = mx.symbol.Activation(data=fc1, act_type="relu")
# 2nd fc
fc2 = mx.symbol.FullyConnected(data=relu1, num_hidden=10)
# loss
lenet5 = mx.symbol.SoftmaxOutput(data=fc2, name='softmax')
train_dataiter = mx.io.ImageRecordIter(
path_imgrec="../data/train.rec",
data_shape=(1, 28, 28),
batch_size=50,
mean_r=128,
scale=0.00390625,
rand_crop=True,
min_crop_size=26,
max_crop_size=28,
max_rotate_angle=15,
fill_value=0
)
val_dataiter = mx.io.ImageRecordIter(
path_imgrec="../data/val.rec",
data_shape=(1, 28, 28),
batch_size=100,
mean_r=128,
scale=0.00390625,
)
logging.getLogger().setLevel(logging.DEBUG)
fh = logging.FileHandler('train_mnist_lenet.log')
logging.getLogger().addHandler(fh)
lr_scheduler = mx.lr_scheduler.FactorScheduler(1000, factor=0.95)
optimizer_params = {
'learning_rate': 0.01,
'momentum': 0.9,
'wd': 0.0005,
'lr_scheduler': lr_scheduler
}
checkpoint = mx.callback.do_checkpoint('mnist_lenet', period=5)
mod = mx.mod.Module(lenet5, context=mx.gpu(2))
mod.fit(train_dataiter,
eval_data=val_dataiter,
optimizer_params=optimizer_params,
num_epoch=36,
epoch_end_callback=checkpoint)
| bsd-3-clause | -5,082,072,010,840,330,000 | 27.296875 | 71 | 0.661513 | false |
admitad/admitad-python-api | admitad/tests/test_campaigns.py | 1 | 3571 | # coding: utf-8
from __future__ import unicode_literals
import unittest
import responses
from admitad.items import Campaigns, CampaignsForWebsite, \
CampaignsManage
from admitad.tests.base import BaseTestCase
class CampaignsTestCase(BaseTestCase):
def test_get_campaigns_request(self):
with responses.RequestsMock() as resp:
resp.add(
resp.GET,
self.prepare_url(Campaigns.URL, params={
'website': 10,
'has_tool': ['deeplink', 'retag'],
'limit': 10,
'offset': 0,
'language': 'en'
}),
match_querystring=True,
json={'status': 'ok'},
status=200
)
result = self.client.Campaigns.get(website=10, has_tool=['deeplink', 'retag'],
limit=10, offset=0, language='en')
self.assertIn('status', result)
def test_get_campaigns_request_with_id(self):
with responses.RequestsMock() as resp:
resp.add(
resp.GET,
self.prepare_url(Campaigns.SINGLE_URL, campaign_id=10),
match_querystring=True,
json={'status': 'ok'},
status=200
)
result = self.client.Campaigns.getOne(10)
self.assertIn('status', result)
class CampaignsForWebsiteTestCase(BaseTestCase):
def test_get_campaigns_for_websites_request(self):
with responses.RequestsMock() as resp:
resp.add(
resp.GET,
self.prepare_url(CampaignsForWebsite.URL, website_id=16, params={
'limit': 26,
'offset': 10
}),
match_querystring=True,
json={'status': 'ok'},
status=200
)
result = self.client.CampaignsForWebsite.get(16, limit=26, offset=10)
self.assertIn('status', result)
def test_get_campaigns_request_with_id(self):
with responses.RequestsMock() as resp:
resp.add(
resp.GET,
self.prepare_url(CampaignsForWebsite.SINGLE_URL, website_id=10, campaign_id=88),
match_querystring=True,
json={'status': 'ok'},
status=200
)
result = self.client.CampaignsForWebsite.getOne(10, 88)
self.assertIn('status', result)
class CampaignsConnectWebsiteTestCase(BaseTestCase):
def test_campaign_connect_websites_request(self):
with responses.RequestsMock() as resp:
resp.add(
resp.POST,
self.prepare_url(CampaignsManage.CONNECT_URL, campaign_id=10, website_id=22),
match_querystring=True,
json={'status': 'ok'},
status=200
)
result = self.client.CampaignsManage.connect(10, 22)
self.assertIn('status', result)
def test_campaign_disconnect_websites_request(self):
with responses.RequestsMock() as resp:
resp.add(
resp.POST,
self.prepare_url(CampaignsManage.DISCONNECT_URL, campaign_id=10, website_id=22),
match_querystring=True,
json={'status': 'ok'},
status=200
)
result = self.client.CampaignsManage.disconnect(10, 22)
self.assertIn('status', result)
if __name__ == '__main__':
unittest.main()
| mit | -4,248,813,920,931,956,000 | 31.463636 | 96 | 0.532344 | false |
iagcl/data_pipeline | data_pipeline/db/filedb.py | 1 | 3423 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
# Module: filedb
# Purpose: Represents a file-based "database"
#
# Notes:
#
###############################################################################
import glob
import os
import data_pipeline.constants.const as const
import data_pipeline.utils.filesystem as fsutils
from .file_query_results import FileQueryResults
from .db import Db
from os.path import basename
from data_pipeline.stream.file_reader import FileReader
class FileDb(Db):
def __init__(self):
super(FileDb, self).__init__()
self._file_reader = None
self._closed = True # Maintains the "closed" state for posterity
self._data_dir = None
@property
def dbtype(self):
return const.FILE
def _connect(self, connection_details):
self._closed = False
self._data_dir = connection_details.data_dir
def execute_stored_proc(self, stored_proc):
pass
def execute_query(self, tablename, arraysize, values=(),
post_process_func=None):
"""Return wrapper around file handle that reads line by line
:param str tablename
Files containing this tablename as its basename
will be read line-by-line as the query result.
:param int arraysize Unused
:param tuple values Unused
:param tuple post_process_func
Function to execute on each record after retrieval
"""
if tablename is None:
return
filename = self.get_data_filename(tablename)
if filename is None:
return
self._logger.info("Querying file: {f}".format(f=filename))
return FileQueryResults(filename, post_process_func)
def get_data_filename(self, tablename):
glob_pattern = "{}*".format(os.path.join(self._data_dir, tablename))
matching_data_files = fsutils.insensitive_glob(glob_pattern)
for f in matching_data_files:
filename_with_ext = basename(f)
dot_i = filename_with_ext.find(const.DOT, 1)
if dot_i > 0:
filename_without_ext = filename_with_ext[:dot_i]
else:
filename_without_ext = filename_with_ext
if filename_without_ext.lower() == tablename.lower():
return f
return None
def execute(self, sql, values=(), log_sql=True):
pass
def commit(self):
pass
def rollback(self):
pass
def closed(self):
return self._closed
def disconnect(self):
self._closed = True
| apache-2.0 | -8,469,073,670,559,711,000 | 31.292453 | 79 | 0.621677 | false |
stuliveshere/PySeis | docs/notebooks/toolbox/toolbox.py | 1 | 9786 | import numpy as np
import matplotlib.pyplot as pylab
from matplotlib.widgets import Slider
#==================================================
# decorators
#==================================================
def io(func):
'''
an io decorator that allows
input/output to be either a filename
(i.e. a string) or an array
'''
def wrapped(*args, **kwargs) :
if type(args[0]) == type(''):
workspace = read(args[0])
else:
workspace = args[0]
result = func(workspace, **kwargs)
if type(result) != type(None):
if type(args[1]) == type(''):
return write(result, args[1])
else:
return result
return wrapped
#==================================================
# display tools
#==================================================
class KeyHandler(object):
def __init__(self, fig, ax, dataset, kwargs):
self.fig = fig
self.ax = ax
self.kwargs = kwargs
self.dataset = dataset
self.start = 0
if kwargs['primary'] == None:
self.slice = self.dataset
else:
keys = np.unique(dataset[kwargs['primary']])
self.keys = keys[::kwargs['step']]
self.nkeys = self.keys.size
self.ensemble()
if 'clip' in kwargs and kwargs['clip'] != 0:
self.clip = kwargs['clip']
else:
self.clip = np.mean(np.abs(self.dataset['trace']))
print 'PySeis Seismic Viewer'
print 'type "h" for help'
self.draw()
def __call__(self, e):
print e.xdata, e.ydata
if e.key == "right":
self.start += 1
self.ensemble()
elif e.key == "left":
self.start -= 1
self.ensemble()
elif e.key == "up":
self.clip /= 1.1
print self.clip
elif e.key == "down":
self.clip *= 1.1
print self.clip
elif e.key == "h":
print "right arrow: next gather"
print "left arrow: last gather"
print "up arrow: hotter"
print "down arrow: colder"
print "clip=", self.clip
else:
return
self.draw()
def draw(self):
self.ax.cla()
self.im = self.ax.imshow(self.slice['trace'].T, aspect='auto', cmap='Greys', vmax =self.clip, vmin=-1*self.clip)
try:
self.ax.set_title('%s = %d' %(self.kwargs['primary'], self.keys[self.start]))
except AttributeError:
pass
self.fig.canvas.draw()
def ensemble(self):
try:
self.slice = self.dataset[self.dataset[self.kwargs['primary']] == self.keys[self.start]]
except IndexError:
self.start = 0
@io
def display(dataset, **kwargs):
'''
iterates through dataset using
left and right keys
parameters required:
primary key
seconary key
step size
works well unless you want to load a big dataset...
'''
fig = pylab.figure()
ax = fig.add_subplot(111)
eventManager = KeyHandler(fig, ax, dataset, kwargs)
fig.canvas.mpl_connect('key_press_event',eventManager)
def scan(dataset):
print " %0-35s: %0-15s %s" %('key', 'min', 'max')
print "========================================="
for key in np.result_type(dataset).descr:
a = np.amin(dataset[key[0]])
b = np.amax(dataset[key[0]])
if (a != 0) and (b != 0):
print "%0-35s %0-15.3f %.3f" %(key, a, b)
print "========================================="
#~ def build_vels(times, velocities, ns=1000, dt=0.001):
#~ '''builds a full velocity trace from a list of vels and times'''
#~ tx = np.linspace(dt, dt*ns, ns)
#~ vels = np.interp(tx, times, velocities)
#~ vels = np.pad(vels, (100,100), 'reflect')
#~ vels = np.convolve(np.ones(100.0)/100.0, vels, mode='same')
#~ vels = vels[100:-100]
#~ return vels
@io
def cp(workspace, **params):
return workspace
@io
def agc(workspace, window=100, **params):
'''
automatic gain control
inputs:
window
'''
vec = np.ones(window, 'f')
func = np.apply_along_axis(lambda m: np.convolve(np.abs(m), vec, mode='same'), axis=-1, arr=workspace['trace'])
workspace['trace'] /= func
workspace['trace'][~np.isfinite(workspace['trace'])] = 0
workspace['trace'] /= np.amax(np.abs(workspace['trace']))
return workspace
def ricker(f, length=0.512, dt=0.001):
t = np.linspace(-length/2, (length-dt)/2, length/dt)
y = (1.0 - 2.0*(np.pi**2)*(f**2)*(t**2)) * np.exp(-(np.pi**2)*(f**2)*(t**2))
y = np.around(y, 10)
inds = np.nonzero(y)[0]
return y[np.amin(inds):np.amax(inds)]
def conv(workspace, wavelet):
workspace['trace'] = np.apply_along_axis(lambda m: np.convolve(m, wavelet, mode='same'), axis=-1, arr=workspace['trace'])
return workspace
@io
def fx(workspace, **params):
f = np.abs(np.fft.rfft(workspace['trace'], axis=-1))
correction = np.mean(np.abs(f), axis=-1).reshape(-1,1)
f /= correction
f = 20.0*np.log10(f)[:,::-1]
freq = np.fft.rfftfreq(params['ns'], params['dt'])
print params['ns'], params['dt']
hmin = np.amin(workspace['cdp'])
hmax = np.amax(workspace['cdp'])
vmin = np.amin(freq)
vmax = np.amax(freq)
extent=[hmin,hmax,vmin,vmax]
pylab.imshow(f.T, aspect='auto', extent=extent)
def db(data):
return 20.0*np.log10(data)
import numpy as np
su_header_dtype = np.dtype([
('tracl', np.int32),
('tracr', np.int32),
('fldr', np.int32),
('tracf', np.int32),
('ep', np.int32),
('cdp', np.int32),
('cdpt', np.int32),
('trid', np.int16),
('nvs', np.int16),
('nhs', np.int16),
('duse', np.int16),
('offset', np.int32),
('gelev', np.int32),
('selev', np.int32),
('sdepth', np.int32),
('gdel', np.int32),
('sdel', np.int32),
('swdep', np.int32),
('gwdep', np.int32),
('scalel', np.int16),
('scalco', np.int16),
('sx', np.int32),
('sy', np.int32),
('gx', np.int32),
('gy', np.int32),
('counit', np.int16),
('wevel', np.int16),
('swevel', np.int16),
('sut', np.int16),
('gut', np.int16),
('sstat', np.int16),
('gstat', np.int16),
('tstat', np.int16),
('laga', np.int16),
('lagb', np.int16),
('delrt', np.int16),
('muts', np.int16),
('mute', np.int16),
('ns', np.uint16),
('dt', np.uint16),
('gain', np.int16),
('igc', np.int16),
('igi', np.int16),
('corr', np.int16),
('sfs', np.int16),
('sfe', np.int16),
('slen', np.int16),
('styp', np.int16),
('stas', np.int16),
('stae', np.int16),
('tatyp', np.int16),
('afilf', np.int16),
('afils', np.int16),
('nofilf', np.int16),
('nofils', np.int16),
('lcf', np.int16),
('hcf', np.int16),
('lcs', np.int16),
('hcs', np.int16),
('year', np.int16),
('day', np.int16),
('hour', np.int16),
('minute', np.int16),
('sec', np.int16),
('timebas', np.int16),
('trwf', np.int16),
('grnors', np.int16),
('grnofr', np.int16),
('grnlof', np.int16),
('gaps', np.int16),
('otrav', np.int16), #179,180
('d1', np.float32), #181,184
('f1', np.float32), #185,188
('d2', np.float32), #189,192
('f2', np.float32), #193, 196
('ShotPoint', np.int32), #197,200
('unscale', np.int16), #201, 204
('TraceValueMeasurementUnit', np.int16),
('TransductionConstantMantissa', np.int32),
('TransductionConstantPower', np.int16),
('TransductionUnit', np.int16),
('TraceIdentifier', np.int16),
('ScalarTraceHeader', np.int16),
('SourceType', np.int16),
('SourceEnergyDirectionMantissa', np.int32),
('SourceEnergyDirectionExponent', np.int16),
('SourceMeasurementMantissa', np.int32),
('SourceMeasurementExponent', np.int16),
('SourceMeasurementUnit', np.int16),
('UnassignedInt1', np.int32),
('ns1', np.int32),
])
def typeSU(ns):
return np.dtype(su_header_dtype.descr + [('trace', ('<f4',ns))])
def readSUheader(filename):
raw = open(filename, 'rb').read()
return np.fromstring(raw, dtype=su_header_dtype, count=1)
def read(filename=None):
if filename == None:
raw= sys.stdin.read()
else:
raw = open(filename, 'rb').read()
return readData(raw)
def readData(raw):
su_header = np.fromstring(raw, dtype=su_header_dtype, count=1)
ns = su_header['ns'][0]
file_dtype = typeSU(ns)
data = np.fromstring(raw, dtype=file_dtype)
return data
def write(data, filename=None):
if filename == None:
data.tofile(sys.stdout)
else:
data.tofile(filename)
| mit | -8,316,606,742,452,979,000 | 29.391304 | 129 | 0.474249 | false |
amdor/skyscraper | tests/test_scraper_service.py | 1 | 1401 | import os
import unittest
from skyscraper.scraper_service import ScraperServiceFactory
from skyscraper.utils.constants import SPEEDOMETER_KEY, AGE_KEY, CAR_KEY, PRICE_KEY, POWER_KEY, CURRENCY_KEY
from common_test_utils import gather_extension_files, VALIDATION_DATA
class TestScraping(unittest.TestCase):
files_under_test = set()
@classmethod
def setUpClass(cls):
path = os.path.dirname(os.path.realpath(__file__))
cls.files_under_test = gather_extension_files(path)
def test_scraping(self):
for file_name in [*VALIDATION_DATA]:
abs_path = list(filter(lambda test_file: test_file.endswith(file_name), self.files_under_test))[0]
with open(abs_path, 'rb') as html_file:
file_content = html_file.read()
file_content = str(file_content, encoding='utf-8')
scraper = ScraperServiceFactory.get_for_dict({file_name: file_content})
car_data = scraper.get_car_data()
actual_value = car_data[0]
expected_value = VALIDATION_DATA[file_name]
print(actual_value[CAR_KEY] + ' assertions')
self.assertEqual(expected_value[SPEEDOMETER_KEY], actual_value[SPEEDOMETER_KEY])
self.assertEqual(expected_value[AGE_KEY], actual_value[AGE_KEY])
self.assertEqual(expected_value[PRICE_KEY], actual_value[PRICE_KEY])
self.assertEqual(expected_value[POWER_KEY], actual_value[POWER_KEY])
self.assertEqual(expected_value[CURRENCY_KEY], actual_value[CURRENCY_KEY])
| mit | 1,581,957,246,233,209,900 | 42.78125 | 108 | 0.741613 | false |
ibis-project/ibis | ibis/tests/sql/test_compiler.py | 1 | 70526 | import datetime
import unittest
import pytest
import ibis
import ibis.expr.api as api
import ibis.expr.operations as ops
from ibis.backends.base.sql.compiler import Compiler, QueryContext
from ibis.tests.expr.mocks import MockConnection
pytest.importorskip('sqlalchemy')
class TestASTBuilder(unittest.TestCase):
def setUp(self):
self.con = MockConnection()
def test_ast_with_projection_join_filter(self):
table = self.con.table('test1')
table2 = self.con.table('test2')
filter_pred = table['f'] > 0
table3 = table[filter_pred]
join_pred = table3['g'] == table2['key']
joined = table2.inner_join(table3, [join_pred])
result = joined[[table3, table2['value']]]
stmt = _get_query(result)
def foo():
table3 = table[filter_pred]
joined = table2.inner_join(table3, [join_pred])
result = joined[[table3, table2['value']]]
return result
assert len(stmt.select_set) == 2
# #790, make sure the filter stays put
assert len(stmt.where) == 0
# Check that the joined tables are not altered
tbl = stmt.table_set
tbl_node = tbl.op()
assert isinstance(tbl_node, ops.InnerJoin)
assert tbl_node.left is table2
assert tbl_node.right is table3
def test_ast_with_aggregation_join_filter(self):
table = self.con.table('test1')
table2 = self.con.table('test2')
filter_pred = table['f'] > 0
table3 = table[filter_pred]
join_pred = table3['g'] == table2['key']
joined = table2.inner_join(table3, [join_pred])
met1 = (table3['f'] - table2['value']).mean().name('foo')
result = joined.aggregate(
[met1, table3['f'].sum().name('bar')],
by=[table3['g'], table2['key']],
)
stmt = _get_query(result)
# #790, this behavior was different before
ex_pred = [table3['g'] == table2['key']]
expected_table_set = table2.inner_join(table3, ex_pred)
assert stmt.table_set.equals(expected_table_set)
# Check various exprs
ex_metrics = [
(table3['f'] - table2['value']).mean().name('foo'),
table3['f'].sum().name('bar'),
]
ex_by = [table3['g'], table2['key']]
for res, ex in zip(stmt.select_set, ex_by + ex_metrics):
assert res.equals(ex)
for res, ex in zip(stmt.group_by, ex_by):
assert stmt.select_set[res].equals(ex)
# The filter is in the joined subtable
assert len(stmt.where) == 0
class TestNonTabularResults(unittest.TestCase):
"""
"""
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
def test_simple_scalar_aggregates(self):
from pandas import DataFrame
# Things like table.column.{sum, mean, ...}()
table = self.con.table('alltypes')
expr = table[table.c > 0].f.sum()
query = _get_query(expr)
sql_query = query.compile()
expected = """SELECT sum(`f`) AS `sum`
FROM alltypes
WHERE `c` > 0"""
assert sql_query == expected
# Maybe the result handler should act on the cursor. Not sure.
handler = query.result_handler
output = DataFrame({'sum': [5]})
assert handler(output) == 5
def test_scalar_aggregates_multiple_tables(self):
# #740
table = ibis.table([('flag', 'string'), ('value', 'double')], 'tbl')
flagged = table[table.flag == '1']
unflagged = table[table.flag == '0']
expr = flagged.value.mean() / unflagged.value.mean() - 1
result = Compiler.to_sql(expr)
expected = """\
SELECT (t0.`mean` / t1.`mean`) - 1 AS `tmp`
FROM (
SELECT avg(`value`) AS `mean`
FROM tbl
WHERE `flag` = '1'
) t0
CROSS JOIN (
SELECT avg(`value`) AS `mean`
FROM tbl
WHERE `flag` = '0'
) t1"""
assert result == expected
fv = flagged.value
uv = unflagged.value
expr = (fv.mean() / fv.sum()) - (uv.mean() / uv.sum())
result = Compiler.to_sql(expr)
expected = """\
SELECT t0.`tmp` - t1.`tmp` AS `tmp`
FROM (
SELECT avg(`value`) / sum(`value`) AS `tmp`
FROM tbl
WHERE `flag` = '1'
) t0
CROSS JOIN (
SELECT avg(`value`) / sum(`value`) AS `tmp`
FROM tbl
WHERE `flag` = '0'
) t1"""
assert result == expected
def test_table_column_unbox(self):
from pandas import DataFrame
table = self.table
m = table.f.sum().name('total')
agged = table[table.c > 0].group_by('g').aggregate([m])
expr = agged.g
query = _get_query(expr)
sql_query = query.compile()
expected = """\
SELECT `g`
FROM (
SELECT `g`, sum(`f`) AS `total`
FROM alltypes
WHERE `c` > 0
GROUP BY 1
) t0"""
assert sql_query == expected
# Maybe the result handler should act on the cursor. Not sure.
handler = query.result_handler
output = DataFrame({'g': ['foo', 'bar', 'baz']})
assert (handler(output) == output['g']).all()
def test_complex_array_expr_projection(self):
# May require finding the base table and forming a projection.
expr = self.table.group_by('g').aggregate(
[self.table.count().name('count')]
)
expr2 = expr.g.cast('double')
query = Compiler.to_sql(expr2)
expected = """SELECT CAST(`g` AS double) AS `tmp`
FROM (
SELECT `g`, count(*) AS `count`
FROM alltypes
GROUP BY 1
) t0"""
assert query == expected
def test_scalar_exprs_no_table_refs(self):
expr1 = ibis.now()
expected1 = """\
SELECT now() AS `tmp`"""
expr2 = ibis.literal(1) + ibis.literal(2)
expected2 = """\
SELECT 1 + 2 AS `tmp`"""
cases = [(expr1, expected1), (expr2, expected2)]
for expr, expected in cases:
result = Compiler.to_sql(expr)
assert result == expected
def test_expr_list_no_table_refs(self):
exlist = ibis.api.expr_list(
[
ibis.literal(1).name('a'),
ibis.now().name('b'),
ibis.literal(2).log().name('c'),
]
)
result = Compiler.to_sql(exlist)
expected = """\
SELECT 1 AS `a`, now() AS `b`, ln(2) AS `c`"""
assert result == expected
def test_isnull_case_expr_rewrite_failure(self):
# #172, case expression that was not being properly converted into an
# aggregation
reduction = self.table.g.isnull().ifelse(1, 0).sum()
result = Compiler.to_sql(reduction)
expected = """\
SELECT sum(CASE WHEN `g` IS NULL THEN 1 ELSE 0 END) AS `sum`
FROM alltypes"""
assert result == expected
def _get_query(expr):
ast = Compiler.to_ast(expr, QueryContext(compiler=Compiler))
return ast.queries[0]
nation = api.table(
[('n_regionkey', 'int32'), ('n_nationkey', 'int32'), ('n_name', 'string')],
'nation',
)
region = api.table([('r_regionkey', 'int32'), ('r_name', 'string')], 'region')
customer = api.table(
[('c_nationkey', 'int32'), ('c_name', 'string'), ('c_acctbal', 'double')],
'customer',
)
def _table_wrapper(name, tname=None):
@property
def f(self):
return self._table_from_schema(name, tname)
return f
class ExprTestCases:
_schemas = {
'foo': [
('job', 'string'),
('dept_id', 'string'),
('year', 'int32'),
('y', 'double'),
],
'bar': [('x', 'double'), ('job', 'string')],
't1': [('key1', 'string'), ('key2', 'string'), ('value1', 'double')],
't2': [('key1', 'string'), ('key2', 'string')],
}
def _table_from_schema(self, name, tname=None):
tname = tname or name
return api.table(self._schemas[name], tname)
def _case_multiple_joins(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
t3 = self.con.table('star3')
predA = t1['foo_id'] == t2['foo_id']
predB = t1['bar_id'] == t3['bar_id']
what = (
t1.left_join(t2, [predA])
.inner_join(t3, [predB])
.projection([t1, t2['value1'], t3['value2']])
)
return what
def _case_join_between_joins(self):
t1 = api.table(
[('key1', 'string'), ('key2', 'string'), ('value1', 'double')],
'first',
)
t2 = api.table([('key1', 'string'), ('value2', 'double')], 'second')
t3 = api.table(
[('key2', 'string'), ('key3', 'string'), ('value3', 'double')],
'third',
)
t4 = api.table([('key3', 'string'), ('value4', 'double')], 'fourth')
left = t1.inner_join(t2, [('key1', 'key1')])[t1, t2.value2]
right = t3.inner_join(t4, [('key3', 'key3')])[t3, t4.value4]
joined = left.inner_join(right, [('key2', 'key2')])
# At one point, the expression simplification was resulting in bad refs
# here (right.value3 referencing the table inside the right join)
exprs = [left, right.value3, right.value4]
projected = joined.projection(exprs)
return projected
def _case_join_just_materialized(self):
t1 = self.con.table('tpch_nation')
t2 = self.con.table('tpch_region')
t3 = self.con.table('tpch_customer')
# GH #491
return t1.inner_join(t2, t1.n_regionkey == t2.r_regionkey).inner_join(
t3, t1.n_nationkey == t3.c_nationkey
)
def _case_semi_anti_joins(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
sj = t1.semi_join(t2, [t1.foo_id == t2.foo_id])[[t1]]
aj = t1.anti_join(t2, [t1.foo_id == t2.foo_id])[[t1]]
return sj, aj
def _case_self_reference_simple(self):
t1 = self.con.table('star1')
return t1.view()
def _case_self_reference_join(self):
t1 = self.con.table('star1')
t2 = t1.view()
return t1.inner_join(t2, [t1.foo_id == t2.bar_id])[[t1]]
def _case_join_projection_subquery_bug(self):
# From an observed bug, derived from tpch tables
geo = nation.inner_join(region, [('n_regionkey', 'r_regionkey')])[
nation.n_nationkey,
nation.n_name.name('nation'),
region.r_name.name('region'),
]
expr = geo.inner_join(customer, [('n_nationkey', 'c_nationkey')])[
customer, geo
]
return expr
def _case_where_simple_comparisons(self):
t1 = self.con.table('star1')
what = t1.filter([t1.f > 0, t1.c < t1.f * 2])
return what
def _case_where_with_join(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
# This also tests some cases of predicate pushdown
e1 = (
t1.inner_join(t2, [t1.foo_id == t2.foo_id])
.projection([t1, t2.value1, t2.value3])
.filter([t1.f > 0, t2.value3 < 1000])
)
# e2 = (t1.inner_join(t2, [t1.foo_id == t2.foo_id])
# .filter([t1.f > 0, t2.value3 < 1000])
# .projection([t1, t2.value1, t2.value3]))
# return e1, e2
return e1
def _case_subquery_used_for_self_join(self):
# There could be cases that should look in SQL like
# WITH t0 as (some subquery)
# select ...
# from t0 t1
# join t0 t2
# on t1.kind = t2.subkind
# ...
# However, the Ibis code will simply have an expression (projection or
# aggregation, say) built on top of the subquery expression, so we need
# to extract the subquery unit (we see that it appears multiple times
# in the tree).
t = self.con.table('alltypes')
agged = t.aggregate([t.f.sum().name('total')], by=['g', 'a', 'b'])
view = agged.view()
metrics = [(agged.total - view.total).max().name('metric')]
expr = agged.inner_join(view, [agged.a == view.b]).aggregate(
metrics, by=[agged.g]
)
return expr
def _case_subquery_factor_correlated_subquery(self):
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
customer = self.con.table('tpch_customer')
orders = self.con.table('tpch_orders')
fields_of_interest = [
customer,
region.r_name.name('region'),
orders.o_totalprice.name('amount'),
orders.o_orderdate.cast('timestamp').name('odate'),
]
tpch = (
region.join(nation, region.r_regionkey == nation.n_regionkey)
.join(customer, customer.c_nationkey == nation.n_nationkey)
.join(orders, orders.o_custkey == customer.c_custkey)[
fields_of_interest
]
)
# Self-reference + correlated subquery complicates things
t2 = tpch.view()
conditional_avg = t2[t2.region == tpch.region].amount.mean()
amount_filter = tpch.amount > conditional_avg
return tpch[amount_filter].limit(10)
def _case_self_join_subquery_distinct_equal(self):
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
j1 = region.join(nation, region.r_regionkey == nation.n_regionkey)[
region, nation
]
j2 = region.join(nation, region.r_regionkey == nation.n_regionkey)[
region, nation
].view()
expr = j1.join(j2, j1.r_regionkey == j2.r_regionkey)[
j1.r_name, j2.n_name
]
return expr
def _case_cte_factor_distinct_but_equal(self):
t = self.con.table('alltypes')
tt = self.con.table('alltypes')
expr1 = t.group_by('g').aggregate(t.f.sum().name('metric'))
expr2 = tt.group_by('g').aggregate(tt.f.sum().name('metric')).view()
expr = expr1.join(expr2, expr1.g == expr2.g)[[expr1]]
return expr
def _case_tpch_self_join_failure(self):
# duplicating the integration test here
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
customer = self.con.table('tpch_customer')
orders = self.con.table('tpch_orders')
fields_of_interest = [
region.r_name.name('region'),
nation.n_name.name('nation'),
orders.o_totalprice.name('amount'),
orders.o_orderdate.cast('timestamp').name('odate'),
]
joined_all = (
region.join(nation, region.r_regionkey == nation.n_regionkey)
.join(customer, customer.c_nationkey == nation.n_nationkey)
.join(orders, orders.o_custkey == customer.c_custkey)[
fields_of_interest
]
)
year = joined_all.odate.year().name('year')
total = joined_all.amount.sum().cast('double').name('total')
annual_amounts = joined_all.group_by(['region', year]).aggregate(total)
current = annual_amounts
prior = annual_amounts.view()
yoy_change = (current.total - prior.total).name('yoy_change')
yoy = current.join(prior, current.year == (prior.year - 1))[
current.region, current.year, yoy_change
]
return yoy
def _case_subquery_in_filter_predicate(self):
# E.g. comparing against some scalar aggregate value. See Ibis #43
t1 = self.con.table('star1')
pred = t1.f > t1.f.mean()
expr = t1[pred]
# This brought out another expression rewriting bug, since the filtered
# table isn't found elsewhere in the expression.
pred2 = t1.f > t1[t1.foo_id == 'foo'].f.mean()
expr2 = t1[pred2]
return expr, expr2
def _case_filter_subquery_derived_reduction(self):
t1 = self.con.table('star1')
# Reduction can be nested inside some scalar expression
pred3 = t1.f > t1[t1.foo_id == 'foo'].f.mean().log()
pred4 = t1.f > (t1[t1.foo_id == 'foo'].f.mean().log() + 1)
expr3 = t1[pred3]
expr4 = t1[pred4]
return expr3, expr4
def _case_topk_operation(self):
# TODO: top K with filter in place
table = api.table(
[
('foo', 'string'),
('bar', 'string'),
('city', 'string'),
('v1', 'double'),
('v2', 'double'),
],
'tbl',
)
what = table.city.topk(10, by=table.v2.mean())
e1 = table[what]
# Test the default metric (count)
what = table.city.topk(10)
e2 = table[what]
return e1, e2
def _case_simple_aggregate_query(self):
t1 = self.con.table('star1')
cases = [
t1.aggregate([t1['f'].sum().name('total')], [t1['foo_id']]),
t1.aggregate([t1['f'].sum().name('total')], ['foo_id', 'bar_id']),
]
return cases
def _case_aggregate_having(self):
# Filtering post-aggregation predicate
t1 = self.con.table('star1')
total = t1.f.sum().name('total')
metrics = [total]
e1 = t1.aggregate(metrics, by=['foo_id'], having=[total > 10])
e2 = t1.aggregate(metrics, by=['foo_id'], having=[t1.count() > 100])
return e1, e2
def _case_aggregate_count_joined(self):
# count on more complicated table
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
join_expr = region.r_regionkey == nation.n_regionkey
joined = region.inner_join(nation, join_expr)
table_ref = joined[nation, region.r_name.name('region')]
return table_ref.count()
def _case_sort_by(self):
table = self.con.table('star1')
return [
table.sort_by('f'),
table.sort_by(('f', 0)),
table.sort_by(['c', ('f', 0)]),
]
def _case_limit(self):
star1 = self.con.table('star1')
cases = [
star1.limit(10),
star1.limit(10, offset=5),
star1[star1.f > 0].limit(10),
# Semantically, this should produce a subquery
star1.limit(10)[lambda x: x.f > 0],
]
return cases
foo = _table_wrapper('foo')
bar = _table_wrapper('bar')
t1 = _table_wrapper('t1', 'foo')
t2 = _table_wrapper('t2', 'bar')
def _case_where_uncorrelated_subquery(self):
return self.foo[self.foo.job.isin(self.bar.job)]
def _case_where_correlated_subquery(self):
t1 = self.foo
t2 = t1.view()
stat = t2[t1.dept_id == t2.dept_id].y.mean()
return t1[t1.y > stat]
def _case_exists(self):
t1, t2 = self.t1, self.t2
cond = (t1.key1 == t2.key1).any()
expr = t1[cond]
cond2 = ((t1.key1 == t2.key1) & (t2.key2 == 'foo')).any()
expr2 = t1[cond2]
return expr, expr2
def _case_not_exists(self):
t1, t2 = self.t1, self.t2
cond = (t1.key1 == t2.key1).any()
return t1[-cond]
def _case_join_with_limited_table(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
limited = t1.limit(100)
joined = limited.inner_join(t2, [limited.foo_id == t2.foo_id])[
[limited]
]
return joined
def _case_union(self, distinct=False):
table = self.con.table('functional_alltypes')
t1 = table[table.int_col > 0][
table.string_col.name('key'),
table.float_col.cast('double').name('value'),
]
t2 = table[table.int_col <= 0][
table.string_col.name('key'), table.double_col.name('value')
]
expr = t1.union(t2, distinct=distinct)
return expr
def _case_intersect(self):
table = self.con.table('functional_alltypes')
t1 = table[table.int_col > 0][
table.string_col.name('key'),
table.float_col.cast('double').name('value'),
]
t2 = table[table.int_col <= 0][
table.string_col.name('key'), table.double_col.name('value')
]
expr = t1.intersect(t2)
return expr
def _case_difference(self):
table = self.con.table('functional_alltypes')
t1 = table[table.int_col > 0][
table.string_col.name('key'),
table.float_col.cast('double').name('value'),
]
t2 = table[table.int_col <= 0][
table.string_col.name('key'), table.double_col.name('value')
]
expr = t1.difference(t2)
return expr
def _case_simple_case(self):
t = self.con.table('alltypes')
return (
t.g.case()
.when('foo', 'bar')
.when('baz', 'qux')
.else_('default')
.end()
)
def _case_search_case(self):
t = self.con.table('alltypes')
return ibis.case().when(t.f > 0, t.d * 2).when(t.c < 0, t.a * 2).end()
def _case_self_reference_in_exists(self):
t = self.con.table('functional_alltypes')
t2 = t.view()
cond = (t.string_col == t2.string_col).any()
semi = t[cond]
anti = t[-cond]
return semi, anti
def _case_self_reference_limit_exists(self):
alltypes = self.con.table('functional_alltypes')
t = alltypes.limit(100)
t2 = t.view()
return t[-((t.string_col == t2.string_col).any())]
def _case_limit_cte_extract(self):
alltypes = self.con.table('functional_alltypes')
t = alltypes.limit(100)
t2 = t.view()
return t.join(t2).projection(t)
def _case_subquery_aliased(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
agged = t1.aggregate([t1.f.sum().name('total')], by=['foo_id'])
what = agged.inner_join(t2, [agged.foo_id == t2.foo_id])[
agged, t2.value1
]
return what
def _case_filter_self_join_analysis_bug(self):
purchases = ibis.table(
[
('region', 'string'),
('kind', 'string'),
('user', 'int64'),
('amount', 'double'),
],
'purchases',
)
metric = purchases.amount.sum().name('total')
agged = purchases.group_by(['region', 'kind']).aggregate(metric)
left = agged[agged.kind == 'foo']
right = agged[agged.kind == 'bar']
joined = left.join(right, left.region == right.region)
result = joined[left.region, (left.total - right.total).name('diff')]
return result, purchases
def _case_projection_fuse_filter(self):
# Probably test this during the evaluation phase. In SQL, "fusable"
# table operations will be combined together into a single select
# statement
#
# see ibis #71 for more on this
t = ibis.table(
[
('a', 'int8'),
('b', 'int16'),
('c', 'int32'),
('d', 'int64'),
('e', 'float'),
('f', 'double'),
('g', 'string'),
('h', 'boolean'),
],
'foo',
)
proj = t['a', 'b', 'c']
# Rewrite a little more aggressively here
expr1 = proj[t.a > 0]
# at one point these yielded different results
filtered = t[t.a > 0]
expr2 = filtered[t.a, t.b, t.c]
expr3 = filtered.projection(['a', 'b', 'c'])
return expr1, expr2, expr3
def _case_startswith(self):
t1 = self.con.table('star1')
return t1.foo_id.startswith('foo')
def _case_endswith(self):
t1 = self.con.table('star1')
return t1.foo_id.endswith('foo')
class TestSelectSQL(unittest.TestCase, ExprTestCases):
@classmethod
def setUpClass(cls):
cls.con = MockConnection()
def _compare_sql(self, expr, expected):
result = Compiler.to_sql(expr)
assert result == expected
def test_nameless_table(self):
# Generate a unique table name when we haven't passed on
nameless = api.table([('key', 'string')])
assert Compiler.to_sql(nameless) == 'SELECT *\nFROM {}'.format(
nameless.op().name
)
with_name = api.table([('key', 'string')], name='baz')
result = Compiler.to_sql(with_name)
assert result == 'SELECT *\nFROM baz'
def test_physical_table_reference_translate(self):
# If an expression's table leaves all reference database tables, verify
# we translate correctly
table = self.con.table('alltypes')
query = _get_query(table)
sql_string = query.compile()
expected = "SELECT *\nFROM alltypes"
assert sql_string == expected
def test_simple_joins(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
pred = t1['foo_id'] == t2['foo_id']
pred2 = t1['bar_id'] == t2['foo_id']
cases = [
(
t1.inner_join(t2, [pred])[[t1]],
"""SELECT t0.*
FROM star1 t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`""",
),
(
t1.left_join(t2, [pred])[[t1]],
"""SELECT t0.*
FROM star1 t0
LEFT OUTER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`""",
),
(
t1.outer_join(t2, [pred])[[t1]],
"""SELECT t0.*
FROM star1 t0
FULL OUTER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`""",
),
# multiple predicates
(
t1.inner_join(t2, [pred, pred2])[[t1]],
"""SELECT t0.*
FROM star1 t0
INNER JOIN star2 t1
ON (t0.`foo_id` = t1.`foo_id`) AND
(t0.`bar_id` = t1.`foo_id`)""",
),
]
for expr, expected_sql in cases:
result_sql = Compiler.to_sql(expr)
assert result_sql == expected_sql
def test_multiple_joins(self):
what = self._case_multiple_joins()
result_sql = Compiler.to_sql(what)
expected_sql = """SELECT t0.*, t1.`value1`, t2.`value2`
FROM star1 t0
LEFT OUTER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`
INNER JOIN star3 t2
ON t0.`bar_id` = t2.`bar_id`"""
assert result_sql == expected_sql
def test_join_between_joins(self):
projected = self._case_join_between_joins()
result = Compiler.to_sql(projected)
expected = """SELECT t0.*, t1.`value3`, t1.`value4`
FROM (
SELECT t2.*, t3.`value2`
FROM `first` t2
INNER JOIN second t3
ON t2.`key1` = t3.`key1`
) t0
INNER JOIN (
SELECT t2.*, t3.`value4`
FROM third t2
INNER JOIN fourth t3
ON t2.`key3` = t3.`key3`
) t1
ON t0.`key2` = t1.`key2`"""
assert result == expected
def test_join_just_materialized(self):
joined = self._case_join_just_materialized()
result = Compiler.to_sql(joined)
expected = """SELECT *
FROM tpch_nation t0
INNER JOIN tpch_region t1
ON t0.`n_regionkey` = t1.`r_regionkey`
INNER JOIN tpch_customer t2
ON t0.`n_nationkey` = t2.`c_nationkey`"""
assert result == expected
result = Compiler.to_sql(joined.materialize())
assert result == expected
def test_semi_anti_joins(self):
sj, aj = self._case_semi_anti_joins()
result = Compiler.to_sql(sj)
expected = """SELECT t0.*
FROM star1 t0
LEFT SEMI JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""
assert result == expected
result = Compiler.to_sql(aj)
expected = """SELECT t0.*
FROM star1 t0
LEFT ANTI JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""
assert result == expected
def test_self_reference_simple(self):
expr = self._case_self_reference_simple()
result_sql = Compiler.to_sql(expr)
expected_sql = "SELECT *\nFROM star1"
assert result_sql == expected_sql
def test_join_self_reference(self):
result = self._case_self_reference_join()
result_sql = Compiler.to_sql(result)
expected_sql = """SELECT t0.*
FROM star1 t0
INNER JOIN star1 t1
ON t0.`foo_id` = t1.`bar_id`"""
assert result_sql == expected_sql
def test_join_projection_subquery_broken_alias(self):
expr = self._case_join_projection_subquery_bug()
result = Compiler.to_sql(expr)
expected = """SELECT t1.*, t0.*
FROM (
SELECT t2.`n_nationkey`, t2.`n_name` AS `nation`, t3.`r_name` AS `region`
FROM nation t2
INNER JOIN region t3
ON t2.`n_regionkey` = t3.`r_regionkey`
) t0
INNER JOIN customer t1
ON t0.`n_nationkey` = t1.`c_nationkey`"""
assert result == expected
def test_where_simple_comparisons(self):
what = self._case_where_simple_comparisons()
result = Compiler.to_sql(what)
expected = """SELECT *
FROM star1
WHERE (`f` > 0) AND
(`c` < (`f` * 2))"""
assert result == expected
def test_where_in_array_literal(self):
# e.g.
# where string_col in (v1, v2, v3)
raise unittest.SkipTest
def test_where_with_join(self):
e1 = self._case_where_with_join()
expected_sql = """SELECT t0.*, t1.`value1`, t1.`value3`
FROM star1 t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`
WHERE (t0.`f` > 0) AND
(t1.`value3` < 1000)"""
result_sql = Compiler.to_sql(e1)
assert result_sql == expected_sql
# result2_sql = to_sql(e2)
# assert result2_sql == expected_sql
def test_where_no_pushdown_possible(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
joined = t1.inner_join(t2, [t1.foo_id == t2.foo_id])[
t1, (t1.f - t2.value1).name('diff')
]
filtered = joined[joined.diff > 1]
# TODO: I'm not sure if this is exactly what we want
expected_sql = """SELECT *
FROM (
SELECT t0.*, t0.`f` - t1.`value1` AS `diff`
FROM star1 t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`
WHERE t0.`f` > 0 AND
t1.`value3` < 1000
)
WHERE `diff` > 1"""
raise unittest.SkipTest
result_sql = Compiler.to_sql(filtered)
assert result_sql == expected_sql
def test_where_with_between(self):
t = self.con.table('alltypes')
what = t.filter([t.a > 0, t.f.between(0, 1)])
result = Compiler.to_sql(what)
expected = """SELECT *
FROM alltypes
WHERE (`a` > 0) AND
(`f` BETWEEN 0 AND 1)"""
assert result == expected
def test_where_analyze_scalar_op(self):
# root cause of #310
table = self.con.table('functional_alltypes')
expr = table.filter(
[
table.timestamp_col
< (ibis.timestamp('2010-01-01') + ibis.interval(months=3)),
table.timestamp_col < (ibis.now() + ibis.interval(days=10)),
]
).count()
result = Compiler.to_sql(expr)
expected = """\
SELECT count(*) AS `count`
FROM functional_alltypes
WHERE (`timestamp_col` < date_add(cast({} as timestamp), INTERVAL 3 MONTH)) AND
(`timestamp_col` < date_add(cast(now() as timestamp), INTERVAL 10 DAY))""" # noqa: E501
assert result == expected.format("'2010-01-01 00:00:00'")
def test_bug_duplicated_where(self):
# GH #539
table = self.con.table('airlines')
t = table['arrdelay', 'dest']
expr = t.group_by('dest').mutate(
dest_avg=t.arrdelay.mean(), dev=t.arrdelay - t.arrdelay.mean()
)
tmp1 = expr[expr.dev.notnull()]
tmp2 = tmp1.sort_by(ibis.desc('dev'))
worst = tmp2.limit(10)
result = Compiler.to_sql(worst)
# TODO(cpcloud): We should be able to flatten the second subquery into
# the first
expected = """\
SELECT t0.*
FROM (
SELECT *, avg(`arrdelay`) OVER (PARTITION BY `dest`) AS `dest_avg`,
`arrdelay` - avg(`arrdelay`) OVER (PARTITION BY `dest`) AS `dev`
FROM (
SELECT `arrdelay`, `dest`
FROM airlines
) t2
) t0
WHERE t0.`dev` IS NOT NULL
ORDER BY t0.`dev` DESC
LIMIT 10"""
assert result == expected
def test_simple_aggregate_query(self):
expected = [
"""SELECT `foo_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1""",
"""SELECT `foo_id`, `bar_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1, 2""",
]
cases = self._case_simple_aggregate_query()
for expr, expected_sql in zip(cases, expected):
result_sql = Compiler.to_sql(expr)
assert result_sql == expected_sql
def test_aggregate_having(self):
e1, e2 = self._case_aggregate_having()
result = Compiler.to_sql(e1)
expected = """SELECT `foo_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1
HAVING sum(`f`) > 10"""
assert result == expected
result = Compiler.to_sql(e2)
expected = """SELECT `foo_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1
HAVING count(*) > 100"""
assert result == expected
def test_aggregate_table_count_metric(self):
expr = self.con.table('star1').count()
result = Compiler.to_sql(expr)
expected = """SELECT count(*) AS `count`
FROM star1"""
assert result == expected
def test_aggregate_count_joined(self):
expr = self._case_aggregate_count_joined()
result = Compiler.to_sql(expr)
expected = """SELECT count(*) AS `count`
FROM (
SELECT t2.*, t1.`r_name` AS `region`
FROM tpch_region t1
INNER JOIN tpch_nation t2
ON t1.`r_regionkey` = t2.`n_regionkey`
) t0"""
assert result == expected
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_expr_template_field_name_binding(self):
# Given an expression with no concrete links to actual database tables,
# indicate a mapping between the distinct unbound table leaves of the
# expression and some database tables with compatible schemas but
# potentially different column names
assert False
def test_no_aliases_needed(self):
table = api.table(
[('key1', 'string'), ('key2', 'string'), ('value', 'double')]
)
expr = table.aggregate(
[table['value'].sum().name('total')], by=['key1', 'key2']
)
query = _get_query(expr)
context = query.context
assert not context.need_aliases()
def test_table_names_overlap_default_aliases(self):
# see discussion in #104; this actually is not needed for query
# correctness, and only makes the generated SQL nicer
raise unittest.SkipTest
t0 = api.table([('key', 'string'), ('v1', 'double')], 't1')
t1 = api.table([('key', 'string'), ('v2', 'double')], 't0')
expr = t0.join(t1, t0.key == t1.key)[t0.key, t0.v1, t1.v2]
result = Compiler.to_sql(expr)
expected = """\
SELECT t2.`key`, t2.`v1`, t3.`v2`
FROM t0 t2
INNER JOIN t1 t3
ON t2.`key` = t3.`key`"""
assert result == expected
def test_context_aliases_multiple_join(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
t3 = self.con.table('star3')
expr = t1.left_join(t2, [t1['foo_id'] == t2['foo_id']]).inner_join(
t3, [t1['bar_id'] == t3['bar_id']]
)[[t1, t2['value1'], t3['value2']]]
query = _get_query(expr)
context = query.context
assert context.get_ref(t1) == 't0'
assert context.get_ref(t2) == 't1'
assert context.get_ref(t3) == 't2'
def test_fuse_projections(self):
table = api.table(
[('foo', 'int32'), ('bar', 'int64'), ('value', 'double')],
name='tbl',
)
# Cases where we project in both cases using the base table reference
f1 = (table['foo'] + table['bar']).name('baz')
pred = table['value'] > 0
table2 = table[table, f1]
table2_filtered = table2[pred]
f2 = (table2['foo'] * 2).name('qux')
f3 = (table['foo'] * 2).name('qux')
table3 = table2.projection([table2, f2])
# fusion works even if there's a filter
table3_filtered = table2_filtered.projection([table2, f2])
expected = table[table, f1, f3]
expected2 = table[pred][table, f1, f3]
assert table3.equals(expected)
assert table3_filtered.equals(expected2)
ex_sql = """SELECT *, `foo` + `bar` AS `baz`, `foo` * 2 AS `qux`
FROM tbl"""
ex_sql2 = """SELECT *, `foo` + `bar` AS `baz`, `foo` * 2 AS `qux`
FROM tbl
WHERE `value` > 0"""
table3_sql = Compiler.to_sql(table3)
table3_filt_sql = Compiler.to_sql(table3_filtered)
assert table3_sql == ex_sql
assert table3_filt_sql == ex_sql2
# Use the intermediate table refs
table3 = table2.projection([table2, f2])
# fusion works even if there's a filter
table3_filtered = table2_filtered.projection([table2, f2])
expected = table[table, f1, f3]
expected2 = table[pred][table, f1, f3]
assert table3.equals(expected)
assert table3_filtered.equals(expected2)
def test_projection_filter_fuse(self):
expr1, expr2, expr3 = self._case_projection_fuse_filter()
sql1 = Compiler.to_sql(expr1)
sql2 = Compiler.to_sql(expr2)
sql3 = Compiler.to_sql(expr3)
assert sql1 == sql2
assert sql1 == sql3
def test_bug_project_multiple_times(self):
# 108
con = self.con
customer = con.table('tpch_customer')
nation = con.table('tpch_nation')
region = con.table('tpch_region')
joined = customer.inner_join(
nation, [customer.c_nationkey == nation.n_nationkey]
).inner_join(region, [nation.n_regionkey == region.r_regionkey])
proj1 = [customer, nation.n_name, region.r_name]
step1 = joined[proj1]
topk_by = step1.c_acctbal.cast('double').sum()
pred = step1.n_name.topk(10, by=topk_by)
proj_exprs = [step1.c_name, step1.r_name, step1.n_name]
step2 = step1[pred]
expr = step2.projection(proj_exprs)
# it works!
result = Compiler.to_sql(expr)
expected = """\
WITH t0 AS (
SELECT t2.*, t3.`n_name`, t4.`r_name`
FROM tpch_customer t2
INNER JOIN tpch_nation t3
ON t2.`c_nationkey` = t3.`n_nationkey`
INNER JOIN tpch_region t4
ON t3.`n_regionkey` = t4.`r_regionkey`
)
SELECT `c_name`, `r_name`, `n_name`
FROM t0
LEFT SEMI JOIN (
SELECT *
FROM (
SELECT `n_name`, sum(CAST(`c_acctbal` AS double)) AS `sum`
FROM t0
GROUP BY 1
) t2
ORDER BY `sum` DESC
LIMIT 10
) t1
ON t0.`n_name` = t1.`n_name`"""
assert result == expected
def test_aggregate_projection_subquery(self):
t = self.con.table('alltypes')
proj = t[t.f > 0][t, (t.a + t.b).name('foo')]
result = Compiler.to_sql(proj)
expected = """SELECT *, `a` + `b` AS `foo`
FROM alltypes
WHERE `f` > 0"""
assert result == expected
def agg(x):
return x.aggregate([x.foo.sum().name('foo total')], by=['g'])
# predicate gets pushed down
filtered = proj[proj.g == 'bar']
result = Compiler.to_sql(filtered)
expected = """SELECT *, `a` + `b` AS `foo`
FROM alltypes
WHERE (`f` > 0) AND
(`g` = 'bar')"""
assert result == expected
agged = agg(filtered)
result = Compiler.to_sql(agged)
expected = """SELECT `g`, sum(`foo`) AS `foo total`
FROM (
SELECT *, `a` + `b` AS `foo`
FROM alltypes
WHERE (`f` > 0) AND
(`g` = 'bar')
) t0
GROUP BY 1"""
assert result == expected
# Pushdown is not possible (in Impala, Postgres, others)
agged2 = agg(proj[proj.foo < 10])
result = Compiler.to_sql(agged2)
expected = """SELECT `g`, sum(`foo`) AS `foo total`
FROM (
SELECT *, `a` + `b` AS `foo`
FROM alltypes
WHERE `f` > 0
) t0
WHERE `foo` < 10
GROUP BY 1"""
assert result == expected
def test_subquery_aliased(self):
case = self._case_subquery_aliased()
expected = """SELECT t0.*, t1.`value1`
FROM (
SELECT `foo_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1
) t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""
self._compare_sql(case, expected)
def test_double_nested_subquery_no_aliases(self):
# We don't require any table aliasing anywhere
t = api.table(
[
('key1', 'string'),
('key2', 'string'),
('key3', 'string'),
('value', 'double'),
],
'foo_table',
)
agg1 = t.aggregate(
[t.value.sum().name('total')], by=['key1', 'key2', 'key3']
)
agg2 = agg1.aggregate(
[agg1.total.sum().name('total')], by=['key1', 'key2']
)
agg3 = agg2.aggregate([agg2.total.sum().name('total')], by=['key1'])
result = Compiler.to_sql(agg3)
expected = """SELECT `key1`, sum(`total`) AS `total`
FROM (
SELECT `key1`, `key2`, sum(`total`) AS `total`
FROM (
SELECT `key1`, `key2`, `key3`, sum(`value`) AS `total`
FROM foo_table
GROUP BY 1, 2, 3
) t1
GROUP BY 1, 2
) t0
GROUP BY 1"""
assert result == expected
def test_aggregate_projection_alias_bug(self):
# Observed in use
t1 = self.con.table('star1')
t2 = self.con.table('star2')
what = t1.inner_join(t2, [t1.foo_id == t2.foo_id])[[t1, t2.value1]]
what = what.aggregate(
[what.value1.sum().name('total')], by=[what.foo_id]
)
# TODO: Not fusing the aggregation with the projection yet
result = Compiler.to_sql(what)
expected = """SELECT `foo_id`, sum(`value1`) AS `total`
FROM (
SELECT t1.*, t2.`value1`
FROM star1 t1
INNER JOIN star2 t2
ON t1.`foo_id` = t2.`foo_id`
) t0
GROUP BY 1"""
assert result == expected
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_aggregate_fuse_with_projection(self):
# see above test case
assert False
def test_subquery_used_for_self_join(self):
expr = self._case_subquery_used_for_self_join()
result = Compiler.to_sql(expr)
expected = """WITH t0 AS (
SELECT `g`, `a`, `b`, sum(`f`) AS `total`
FROM alltypes
GROUP BY 1, 2, 3
)
SELECT t0.`g`, max(t0.`total` - t1.`total`) AS `metric`
FROM t0
INNER JOIN t0 t1
ON t0.`a` = t1.`b`
GROUP BY 1"""
assert result == expected
def test_subquery_in_union(self):
t = self.con.table('alltypes')
expr1 = t.group_by(['a', 'g']).aggregate(t.f.sum().name('metric'))
expr2 = expr1.view()
join1 = expr1.join(expr2, expr1.g == expr2.g)[[expr1]]
join2 = join1.view()
expr = join1.union(join2)
result = Compiler.to_sql(expr)
expected = """\
WITH t0 AS (
SELECT `a`, `g`, sum(`f`) AS `metric`
FROM alltypes
GROUP BY 1, 2
),
t1 AS (
SELECT t0.*
FROM t0
INNER JOIN t0 t3
ON t0.`g` = t3.`g`
)
SELECT *
FROM t1
UNION ALL
SELECT t0.*
FROM t0
INNER JOIN t0 t3
ON t0.`g` = t3.`g`"""
assert result == expected
def test_subquery_factor_correlated_subquery(self):
# #173, #183 and other issues
expr = self._case_subquery_factor_correlated_subquery()
result = Compiler.to_sql(expr)
expected = """\
WITH t0 AS (
SELECT t6.*, t1.`r_name` AS `region`, t3.`o_totalprice` AS `amount`,
CAST(t3.`o_orderdate` AS timestamp) AS `odate`
FROM tpch_region t1
INNER JOIN tpch_nation t2
ON t1.`r_regionkey` = t2.`n_regionkey`
INNER JOIN tpch_customer t6
ON t6.`c_nationkey` = t2.`n_nationkey`
INNER JOIN tpch_orders t3
ON t3.`o_custkey` = t6.`c_custkey`
)
SELECT t0.*
FROM t0
WHERE t0.`amount` > (
SELECT avg(t4.`amount`) AS `mean`
FROM t0 t4
WHERE t4.`region` = t0.`region`
)
LIMIT 10"""
assert result == expected
def test_self_join_subquery_distinct_equal(self):
expr = self._case_self_join_subquery_distinct_equal()
result = Compiler.to_sql(expr)
expected = """\
WITH t0 AS (
SELECT t2.*, t3.*
FROM tpch_region t2
INNER JOIN tpch_nation t3
ON t2.`r_regionkey` = t3.`n_regionkey`
)
SELECT t0.`r_name`, t1.`n_name`
FROM t0
INNER JOIN t0 t1
ON t0.`r_regionkey` = t1.`r_regionkey`"""
assert result == expected
def test_limit_with_self_join(self):
t = self.con.table('functional_alltypes')
t2 = t.view()
expr = t.join(t2, t.tinyint_col < t2.timestamp_col.minute()).count()
# it works
result = Compiler.to_sql(expr)
expected = """\
SELECT count(*) AS `count`
FROM functional_alltypes t0
INNER JOIN functional_alltypes t1
ON t0.`tinyint_col` < extract(t1.`timestamp_col`, 'minute')"""
assert result == expected
def test_cte_factor_distinct_but_equal(self):
expr = self._case_cte_factor_distinct_but_equal()
result = Compiler.to_sql(expr)
expected = """\
WITH t0 AS (
SELECT `g`, sum(`f`) AS `metric`
FROM alltypes
GROUP BY 1
)
SELECT t0.*
FROM t0
INNER JOIN t0 t1
ON t0.`g` = t1.`g`"""
assert result == expected
def test_tpch_self_join_failure(self):
yoy = self._case_tpch_self_join_failure()
Compiler.to_sql(yoy)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_extract_subquery_nested_lower(self):
# We may have a join between two tables requiring subqueries, and
# buried inside these there may be a common subquery. Let's test that
# we find it and pull it out to the top level to avoid repeating
# ourselves.
assert False
def test_subquery_in_filter_predicate(self):
expr, expr2 = self._case_subquery_in_filter_predicate()
result = Compiler.to_sql(expr)
expected = """SELECT *
FROM star1
WHERE `f` > (
SELECT avg(`f`) AS `mean`
FROM star1
)"""
assert result == expected
result = Compiler.to_sql(expr2)
expected = """SELECT *
FROM star1
WHERE `f` > (
SELECT avg(`f`) AS `mean`
FROM star1
WHERE `foo_id` = 'foo'
)"""
assert result == expected
def test_filter_subquery_derived_reduction(self):
expr3, expr4 = self._case_filter_subquery_derived_reduction()
result = Compiler.to_sql(expr3)
expected = """SELECT *
FROM star1
WHERE `f` > (
SELECT ln(avg(`f`)) AS `tmp`
FROM star1
WHERE `foo_id` = 'foo'
)"""
assert result == expected
result = Compiler.to_sql(expr4)
expected = """SELECT *
FROM star1
WHERE `f` > (
SELECT ln(avg(`f`)) + 1 AS `tmp`
FROM star1
WHERE `foo_id` = 'foo'
)"""
assert result == expected
def test_topk_operation(self):
filtered, filtered2 = self._case_topk_operation()
query = Compiler.to_sql(filtered)
expected = """SELECT t0.*
FROM tbl t0
LEFT SEMI JOIN (
SELECT *
FROM (
SELECT `city`, avg(`v2`) AS `mean`
FROM tbl
GROUP BY 1
) t2
ORDER BY `mean` DESC
LIMIT 10
) t1
ON t0.`city` = t1.`city`"""
assert query == expected
query = Compiler.to_sql(filtered2)
expected = """SELECT t0.*
FROM tbl t0
LEFT SEMI JOIN (
SELECT *
FROM (
SELECT `city`, count(`city`) AS `count`
FROM tbl
GROUP BY 1
) t2
ORDER BY `count` DESC
LIMIT 10
) t1
ON t0.`city` = t1.`city`"""
assert query == expected
def test_topk_predicate_pushdown_bug(self):
# Observed on TPCH data
cplusgeo = customer.inner_join(
nation, [customer.c_nationkey == nation.n_nationkey]
).inner_join(region, [nation.n_regionkey == region.r_regionkey])[
customer, nation.n_name, region.r_name
]
pred = cplusgeo.n_name.topk(10, by=cplusgeo.c_acctbal.sum())
expr = cplusgeo.filter([pred])
result = Compiler.to_sql(expr)
expected = """\
WITH t0 AS (
SELECT t2.*, t3.`n_name`, t4.`r_name`
FROM customer t2
INNER JOIN nation t3
ON t2.`c_nationkey` = t3.`n_nationkey`
INNER JOIN region t4
ON t3.`n_regionkey` = t4.`r_regionkey`
)
SELECT t0.*
FROM t0
LEFT SEMI JOIN (
SELECT *
FROM (
SELECT `n_name`, sum(`c_acctbal`) AS `sum`
FROM t0
GROUP BY 1
) t2
ORDER BY `sum` DESC
LIMIT 10
) t1
ON t0.`n_name` = t1.`n_name`"""
assert result == expected
def test_topk_analysis_bug(self):
# GH #398
airlines = ibis.table(
[('dest', 'string'), ('origin', 'string'), ('arrdelay', 'int32')],
'airlines',
)
dests = ['ORD', 'JFK', 'SFO']
dests_formatted = repr(tuple(set(dests)))
delay_filter = airlines.dest.topk(10, by=airlines.arrdelay.mean())
t = airlines[airlines.dest.isin(dests)]
expr = t[delay_filter].group_by('origin').size()
result = Compiler.to_sql(expr)
expected = """\
SELECT t0.`origin`, count(*) AS `count`
FROM airlines t0
LEFT SEMI JOIN (
SELECT *
FROM (
SELECT `dest`, avg(`arrdelay`) AS `mean`
FROM airlines
GROUP BY 1
) t2
ORDER BY `mean` DESC
LIMIT 10
) t1
ON t0.`dest` = t1.`dest`
WHERE t0.`dest` IN {}
GROUP BY 1""".format(
dests_formatted
)
assert result == expected
def test_topk_to_aggregate(self):
t = ibis.table(
[('dest', 'string'), ('origin', 'string'), ('arrdelay', 'int32')],
'airlines',
)
top = t.dest.topk(10, by=t.arrdelay.mean())
result = Compiler.to_sql(top)
expected = Compiler.to_sql(top.to_aggregation())
assert result == expected
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_bottomk(self):
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_topk_antijoin(self):
# Get the "other" category somehow
assert False
def test_case_in_projection(self):
t = self.con.table('alltypes')
expr = (
t.g.case()
.when('foo', 'bar')
.when('baz', 'qux')
.else_('default')
.end()
)
expr2 = (
api.case().when(t.g == 'foo', 'bar').when(t.g == 'baz', t.g).end()
)
proj = t[expr.name('col1'), expr2.name('col2'), t]
result = Compiler.to_sql(proj)
expected = """SELECT
CASE `g`
WHEN 'foo' THEN 'bar'
WHEN 'baz' THEN 'qux'
ELSE 'default'
END AS `col1`,
CASE
WHEN `g` = 'foo' THEN 'bar'
WHEN `g` = 'baz' THEN `g`
ELSE CAST(NULL AS string)
END AS `col2`, *
FROM alltypes"""
assert result == expected
def test_identifier_quoting(self):
data = api.table([('date', 'int32'), ('explain', 'string')], 'table')
expr = data[data.date.name('else'), data.explain.name('join')]
result = Compiler.to_sql(expr)
expected = """SELECT `date` AS `else`, `explain` AS `join`
FROM `table`"""
assert result == expected
def test_scalar_subquery_different_table(self):
t1, t2 = self.foo, self.bar
expr = t1[t1.y > t2.x.max()]
result = Compiler.to_sql(expr)
expected = """SELECT *
FROM foo
WHERE `y` > (
SELECT max(`x`) AS `max`
FROM bar
)"""
assert result == expected
def test_where_uncorrelated_subquery(self):
expr = self._case_where_uncorrelated_subquery()
result = Compiler.to_sql(expr)
expected = """SELECT *
FROM foo
WHERE `job` IN (
SELECT `job`
FROM bar
)"""
assert result == expected
def test_where_correlated_subquery(self):
expr = self._case_where_correlated_subquery()
result = Compiler.to_sql(expr)
expected = """SELECT t0.*
FROM foo t0
WHERE t0.`y` > (
SELECT avg(t1.`y`) AS `mean`
FROM foo t1
WHERE t0.`dept_id` = t1.`dept_id`
)"""
assert result == expected
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_where_array_correlated(self):
# Test membership in some record-dependent values, if this is supported
assert False
def test_exists(self):
e1, e2 = self._case_exists()
result = Compiler.to_sql(e1)
expected = """SELECT t0.*
FROM foo t0
WHERE EXISTS (
SELECT 1
FROM bar t1
WHERE t0.`key1` = t1.`key1`
)"""
assert result == expected
result = Compiler.to_sql(e2)
expected = """SELECT t0.*
FROM foo t0
WHERE EXISTS (
SELECT 1
FROM bar t1
WHERE (t0.`key1` = t1.`key1`) AND
(t1.`key2` = 'foo')
)"""
assert result == expected
def test_exists_subquery_repr(self):
# GH #660
t1, t2 = self.t1, self.t2
cond = t1.key1 == t2.key1
expr = t1[cond.any()]
stmt = _get_query(expr)
repr(stmt.where[0])
def test_not_exists(self):
expr = self._case_not_exists()
result = Compiler.to_sql(expr)
expected = """SELECT t0.*
FROM foo t0
WHERE NOT EXISTS (
SELECT 1
FROM bar t1
WHERE t0.`key1` = t1.`key1`
)"""
assert result == expected
def test_filter_inside_exists(self):
events = ibis.table(
[
('session_id', 'int64'),
('user_id', 'int64'),
('event_type', 'int32'),
('ts', 'timestamp'),
],
'events',
)
purchases = ibis.table(
[
('item_id', 'int64'),
('user_id', 'int64'),
('price', 'double'),
('ts', 'timestamp'),
],
'purchases',
)
filt = purchases.ts > '2015-08-15'
cond = (events.user_id == purchases[filt].user_id).any()
expr = events[cond]
result = Compiler.to_sql(expr)
expected = """\
SELECT t0.*
FROM events t0
WHERE EXISTS (
SELECT 1
FROM (
SELECT *
FROM purchases
WHERE `ts` > '2015-08-15'
) t1
WHERE t0.`user_id` = t1.`user_id`
)"""
assert result == expected
def test_self_reference_in_exists(self):
semi, anti = self._case_self_reference_in_exists()
result = Compiler.to_sql(semi)
expected = """\
SELECT t0.*
FROM functional_alltypes t0
WHERE EXISTS (
SELECT 1
FROM functional_alltypes t1
WHERE t0.`string_col` = t1.`string_col`
)"""
assert result == expected
result = Compiler.to_sql(anti)
expected = """\
SELECT t0.*
FROM functional_alltypes t0
WHERE NOT EXISTS (
SELECT 1
FROM functional_alltypes t1
WHERE t0.`string_col` = t1.`string_col`
)"""
assert result == expected
def test_self_reference_limit_exists(self):
case = self._case_self_reference_limit_exists()
expected = """\
WITH t0 AS (
SELECT *
FROM functional_alltypes
LIMIT 100
)
SELECT *
FROM t0
WHERE NOT EXISTS (
SELECT 1
FROM t0 t1
WHERE t0.`string_col` = t1.`string_col`
)"""
self._compare_sql(case, expected)
def test_limit_cte_extract(self):
case = self._case_limit_cte_extract()
result = Compiler.to_sql(case)
expected = """\
WITH t0 AS (
SELECT *
FROM functional_alltypes
LIMIT 100
)
SELECT t0.*
FROM t0
INNER JOIN t0 t1"""
assert result == expected
def test_sort_by(self):
cases = self._case_sort_by()
expected = [
"""SELECT *
FROM star1
ORDER BY `f`""",
"""SELECT *
FROM star1
ORDER BY `f` DESC""",
"""SELECT *
FROM star1
ORDER BY `c`, `f` DESC""",
]
for case, ex in zip(cases, expected):
result = Compiler.to_sql(case)
assert result == ex
def test_limit(self):
cases = self._case_limit()
expected = [
"""SELECT *
FROM star1
LIMIT 10""",
"""SELECT *
FROM star1
LIMIT 10 OFFSET 5""",
"""SELECT *
FROM star1
WHERE `f` > 0
LIMIT 10""",
"""SELECT *
FROM (
SELECT *
FROM star1
LIMIT 10
) t0
WHERE `f` > 0""",
]
for case, ex in zip(cases, expected):
result = Compiler.to_sql(case)
assert result == ex
def test_join_with_limited_table(self):
joined = self._case_join_with_limited_table()
result = Compiler.to_sql(joined)
expected = """SELECT t0.*
FROM (
SELECT *
FROM star1
LIMIT 100
) t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""
assert result == expected
def test_sort_by_on_limit_yield_subquery(self):
# x.limit(...).sort_by(...)
# is semantically different from
# x.sort_by(...).limit(...)
# and will often yield different results
t = self.con.table('functional_alltypes')
expr = (
t.group_by('string_col')
.aggregate([t.count().name('nrows')])
.limit(5)
.sort_by('string_col')
)
result = Compiler.to_sql(expr)
expected = """SELECT *
FROM (
SELECT `string_col`, count(*) AS `nrows`
FROM functional_alltypes
GROUP BY 1
LIMIT 5
) t0
ORDER BY `string_col`"""
assert result == expected
def test_multiple_limits(self):
t = self.con.table('functional_alltypes')
expr = t.limit(20).limit(10)
stmt = _get_query(expr)
assert stmt.limit['n'] == 10
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_top_convenience(self):
# x.top(10, by=field)
# x.top(10, by=[field1, field2])
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_self_aggregate_in_predicate(self):
# Per ibis #43
assert False
def test_self_join_filter_analysis_bug(self):
expr, _ = self._case_filter_self_join_analysis_bug()
expected = """\
SELECT t0.`region`, t0.`total` - t1.`total` AS `diff`
FROM (
SELECT `region`, `kind`, sum(`amount`) AS `total`
FROM purchases
WHERE `kind` = 'foo'
GROUP BY 1, 2
) t0
INNER JOIN (
SELECT `region`, `kind`, sum(`amount`) AS `total`
FROM purchases
WHERE `kind` = 'bar'
GROUP BY 1, 2
) t1
ON t0.`region` = t1.`region`"""
self._compare_sql(expr, expected)
def test_join_filtered_tables_no_pushdown(self):
# #790, #781
tbl_a = ibis.table(
[
('year', 'int32'),
('month', 'int32'),
('day', 'int32'),
('value_a', 'double'),
],
'a',
)
tbl_b = ibis.table(
[
('year', 'int32'),
('month', 'int32'),
('day', 'int32'),
('value_b', 'double'),
],
'b',
)
tbl_a_filter = tbl_a.filter(
[tbl_a.year == 2016, tbl_a.month == 2, tbl_a.day == 29]
)
tbl_b_filter = tbl_b.filter(
[tbl_b.year == 2016, tbl_b.month == 2, tbl_b.day == 29]
)
joined = tbl_a_filter.left_join(tbl_b_filter, ['year', 'month', 'day'])
result = joined[tbl_a_filter.value_a, tbl_b_filter.value_b]
join_op = result.op().table.op()
assert join_op.left.equals(tbl_a_filter)
assert join_op.right.equals(tbl_b_filter)
result_sql = Compiler.to_sql(result)
expected_sql = """\
SELECT t0.`value_a`, t1.`value_b`
FROM (
SELECT *
FROM a
WHERE (`year` = 2016) AND
(`month` = 2) AND
(`day` = 29)
) t0
LEFT OUTER JOIN (
SELECT *
FROM b
WHERE (`year` = 2016) AND
(`month` = 2) AND
(`day` = 29)
) t1
ON (t0.`year` = t1.`year`) AND
(t0.`month` = t1.`month`) AND
(t0.`day` = t1.`day`)"""
assert result_sql == expected_sql
def test_loj_subquery_filter_handling(self):
# #781
left = ibis.table([('id', 'int32'), ('desc', 'string')], 'foo')
right = ibis.table([('id', 'int32'), ('desc', 'string')], 'bar')
left = left[left.id < 2]
right = right[right.id < 3]
joined = left.left_join(right, ['id', 'desc'])
joined = joined[
[left[name].name('left_' + name) for name in left.columns]
+ [right[name].name('right_' + name) for name in right.columns]
]
result = Compiler.to_sql(joined)
expected = """\
SELECT t0.`id` AS `left_id`, t0.`desc` AS `left_desc`, t1.`id` AS `right_id`,
t1.`desc` AS `right_desc`
FROM (
SELECT *
FROM foo
WHERE `id` < 2
) t0
LEFT OUTER JOIN (
SELECT *
FROM bar
WHERE `id` < 3
) t1
ON (t0.`id` = t1.`id`) AND
(t0.`desc` = t1.`desc`)"""
assert result == expected
def test_startswith(self):
expr = self._case_startswith()
expected = """\
SELECT `foo_id` like concat('foo', '%') AS `tmp`
FROM star1"""
assert Compiler.to_sql(expr) == expected
def test_endswith(self):
expr = self._case_endswith()
expected = """\
SELECT `foo_id` like concat('%', 'foo') AS `tmp`
FROM star1"""
assert Compiler.to_sql(expr) == expected
class TestUnions(unittest.TestCase, ExprTestCases):
def setUp(self):
self.con = MockConnection()
def test_union(self):
union1 = self._case_union()
result = Compiler.to_sql(union1)
expected = """\
SELECT `string_col` AS `key`, CAST(`float_col` AS double) AS `value`
FROM functional_alltypes
WHERE `int_col` > 0
UNION ALL
SELECT `string_col` AS `key`, `double_col` AS `value`
FROM functional_alltypes
WHERE `int_col` <= 0"""
assert result == expected
def test_union_distinct(self):
union = self._case_union(distinct=True)
result = Compiler.to_sql(union)
expected = """\
SELECT `string_col` AS `key`, CAST(`float_col` AS double) AS `value`
FROM functional_alltypes
WHERE `int_col` > 0
UNION
SELECT `string_col` AS `key`, `double_col` AS `value`
FROM functional_alltypes
WHERE `int_col` <= 0"""
assert result == expected
def test_union_project_column(self):
# select a column, get a subquery
union1 = self._case_union()
expr = union1[[union1.key]]
result = Compiler.to_sql(expr)
expected = """SELECT `key`
FROM (
SELECT `string_col` AS `key`, CAST(`float_col` AS double) AS `value`
FROM functional_alltypes
WHERE `int_col` > 0
UNION ALL
SELECT `string_col` AS `key`, `double_col` AS `value`
FROM functional_alltypes
WHERE `int_col` <= 0
) t0"""
assert result == expected
class TestIntersect(unittest.TestCase, ExprTestCases):
def setUp(self):
self.con = MockConnection()
def test_table_intersect(self):
intersection = self._case_intersect()
result = Compiler.to_sql(intersection)
expected = """\
SELECT `string_col` AS `key`, CAST(`float_col` AS double) AS `value`
FROM functional_alltypes
WHERE `int_col` > 0
INTERSECT
SELECT `string_col` AS `key`, `double_col` AS `value`
FROM functional_alltypes
WHERE `int_col` <= 0"""
assert result == expected
def test_table_difference(self):
difference = self._case_difference()
result = Compiler.to_sql(difference)
expected = """\
SELECT `string_col` AS `key`, CAST(`float_col` AS double) AS `value`
FROM functional_alltypes
WHERE `int_col` > 0
EXCEPT
SELECT `string_col` AS `key`, `double_col` AS `value`
FROM functional_alltypes
WHERE `int_col` <= 0"""
assert result == expected
class TestDistinct(unittest.TestCase):
def setUp(self):
self.con = MockConnection()
def test_table_distinct(self):
t = self.con.table('functional_alltypes')
expr = t[t.string_col, t.int_col].distinct()
result = Compiler.to_sql(expr)
expected = """SELECT DISTINCT `string_col`, `int_col`
FROM functional_alltypes"""
assert result == expected
def test_array_distinct(self):
t = self.con.table('functional_alltypes')
expr = t.string_col.distinct()
result = Compiler.to_sql(expr)
expected = """SELECT DISTINCT `string_col`
FROM functional_alltypes"""
assert result == expected
def test_count_distinct(self):
t = self.con.table('functional_alltypes')
metric = t.int_col.nunique().name('nunique')
expr = t[t.bigint_col > 0].group_by('string_col').aggregate([metric])
result = Compiler.to_sql(expr)
expected = """\
SELECT `string_col`, count(DISTINCT `int_col`) AS `nunique`
FROM functional_alltypes
WHERE `bigint_col` > 0
GROUP BY 1"""
assert result == expected
def test_multiple_count_distinct(self):
# Impala and some other databases will not execute multiple
# count-distincts in a single aggregation query. This error reporting
# will be left to the database itself, for now.
t = self.con.table('functional_alltypes')
metrics = [
t.int_col.nunique().name('int_card'),
t.smallint_col.nunique().name('smallint_card'),
]
expr = t.group_by('string_col').aggregate(metrics)
result = Compiler.to_sql(expr)
expected = """\
SELECT `string_col`, count(DISTINCT `int_col`) AS `int_card`,
count(DISTINCT `smallint_col`) AS `smallint_card`
FROM functional_alltypes
GROUP BY 1"""
assert result == expected
def test_pushdown_with_or():
t = ibis.table(
[
('double_col', 'double'),
('string_col', 'string'),
('int_col', 'int32'),
('float_col', 'float'),
],
'functional_alltypes',
)
subset = t[(t.double_col > 3.14) & t.string_col.contains('foo')]
filt = subset[(subset.int_col - 1 == 0) | (subset.float_col <= 1.34)]
result = Compiler.to_sql(filt)
expected = """\
SELECT *
FROM functional_alltypes
WHERE (`double_col` > 3.14) AND
(locate('foo', `string_col`) - 1 >= 0) AND
(((`int_col` - 1) = 0) OR (`float_col` <= 1.34))"""
assert result == expected
def test_having_size():
t = ibis.table(
[
('double_col', 'double'),
('string_col', 'string'),
('int_col', 'int32'),
('float_col', 'float'),
],
'functional_alltypes',
)
expr = t.group_by(t.string_col).having(t.double_col.max() == 1).size()
result = Compiler.to_sql(expr)
assert (
result
== """\
SELECT `string_col`, count(*) AS `count`
FROM functional_alltypes
GROUP BY 1
HAVING max(`double_col`) = 1"""
)
def test_having_from_filter():
t = ibis.table([('a', 'int64'), ('b', 'string')], 't')
filt = t[t.b == 'm']
gb = filt.group_by(filt.b)
having = gb.having(filt.a.max() == 2)
agg = having.aggregate(filt.a.sum().name('sum'))
result = Compiler.to_sql(agg)
expected = """\
SELECT `b`, sum(`a`) AS `sum`
FROM t
WHERE `b` = 'm'
GROUP BY 1
HAVING max(`a`) = 2"""
assert result == expected
def test_simple_agg_filter():
t = ibis.table([('a', 'int64'), ('b', 'string')], name='my_table')
filt = t[t.a < 100]
expr = filt[filt.a == filt.a.max()]
result = Compiler.to_sql(expr)
expected = """\
SELECT *
FROM (
SELECT *
FROM my_table
WHERE `a` < 100
) t0
WHERE `a` = (
SELECT max(`a`) AS `max`
FROM my_table
WHERE `a` < 100
)"""
assert result == expected
def test_agg_and_non_agg_filter():
t = ibis.table([('a', 'int64'), ('b', 'string')], name='my_table')
filt = t[t.a < 100]
expr = filt[filt.a == filt.a.max()]
expr = expr[expr.b == 'a']
result = Compiler.to_sql(expr)
expected = """\
SELECT *
FROM (
SELECT *
FROM my_table
WHERE `a` < 100
) t0
WHERE (`a` = (
SELECT max(`a`) AS `max`
FROM my_table
WHERE `a` < 100
)) AND
(`b` = 'a')"""
assert result == expected
def test_agg_filter():
t = ibis.table([('a', 'int64'), ('b', 'int64')], name='my_table')
t = t.mutate(b2=t.b * 2)
t = t[['a', 'b2']]
filt = t[t.a < 100]
expr = filt[filt.a == filt.a.max().name('blah')]
result = Compiler.to_sql(expr)
expected = """\
WITH t0 AS (
SELECT *, `b` * 2 AS `b2`
FROM my_table
),
t1 AS (
SELECT t0.`a`, t0.`b2`
FROM t0
WHERE t0.`a` < 100
)
SELECT t1.*
FROM t1
WHERE t1.`a` = (
SELECT max(`a`) AS `blah`
FROM t1
)"""
assert result == expected
def test_agg_filter_with_alias():
t = ibis.table([('a', 'int64'), ('b', 'int64')], name='my_table')
t = t.mutate(b2=t.b * 2)
t = t[['a', 'b2']]
filt = t[t.a < 100]
expr = filt[filt.a.name('A') == filt.a.max().name('blah')]
result = Compiler.to_sql(expr)
expected = """\
WITH t0 AS (
SELECT *, `b` * 2 AS `b2`
FROM my_table
),
t1 AS (
SELECT t0.`a`, t0.`b2`
FROM t0
WHERE t0.`a` < 100
)
SELECT t1.*
FROM t1
WHERE t1.`a` = (
SELECT max(`a`) AS `blah`
FROM t1
)"""
assert result == expected
def test_table_drop_with_filter():
left = ibis.table(
[('a', 'int64'), ('b', 'string'), ('c', 'timestamp')], name='t'
).relabel({'c': 'C'})
left = left.filter(left.C == datetime.datetime(2018, 1, 1))
left = left.drop(['C'])
left = left.mutate(the_date=datetime.datetime(2018, 1, 1))
right = ibis.table([('b', 'string')], name='s')
joined = left.join(right, left.b == right.b)
joined = joined[left.a]
joined = joined.filter(joined.a < 1.0)
result = Compiler.to_sql(joined)
# previously this was generating incorrect aliases due to not binding the
# self to expressions when calling projection:
# SELECT t0.`a`
# FROM (
# SELECT t2.`a`, t2.`b`, '2018-01-01 00:00:00' AS `the_date`
# FROM (
# SELECT `a`, `b`, `c` AS `C`
# FROM t
# ) t3
# WHERE t3.`C` = '2018-01-01 00:00:00'
# ) t0
# INNER JOIN s t1
# ON t0.`b` = t1.`b`
# WHERE t0.`a` < 1.0
expected = """\
SELECT t0.`a`
FROM (
SELECT `a`, `b`, '2018-01-01 00:00:00' AS `the_date`
FROM (
SELECT *
FROM (
SELECT `a`, `b`, `c` AS `C`
FROM t
) t3
WHERE `C` = '2018-01-01 00:00:00'
) t2
) t0
INNER JOIN s t1
ON t0.`b` = t1.`b`
WHERE t0.`a` < 1.0"""
assert result == expected
| apache-2.0 | 6,561,207,592,762,721,000 | 26.559984 | 94 | 0.550832 | false |
FabriceSalvaire/grouped-purchase-order | GroupedPurchaseOrder/views/account.py | 1 | 13035 | ####################################################################################################
#
# GroupedPurchaseOrder - A Django Application.
# Copyright (C) 2014 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
# from django.forms.widgets import HiddenInput
from django.contrib import messages
from django.contrib.auth import forms
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.forms import ModelForm
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.utils import translation
from django.utils.translation import ugettext as _
####################################################################################################
from GroupedPurchaseOrder.models import Profile
from GroupedPurchaseOrder.utils import send_localized_mail
####################################################################################################
class AuthenticationForm(forms.AuthenticationForm):
"""Override the default AuthenticationForm in order to add HTML5 attributes. This is the only
change done and needed
"""
##############################################
def __init__(self, *args, **kwargs):
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Add HTML5 attributes
self.fields['password'].widget.attrs['class'] = 'form-control'
self.fields['password'].widget.attrs['placeholder'] = _('Password')
self.fields['username'].widget.attrs['autofocus'] = 'autofocus'
self.fields['username'].widget.attrs['class'] = 'form-control'
self.fields['username'].widget.attrs['placeholder'] = _('Username')
####################################################################################################
class PasswordChangeForm(forms.PasswordChangeForm):
"""Override the default PasswordChangeForm in order to add HTML5 attributes. This is the only
change done and needed
"""
def __init__(self, *args, **kwargs):
super(PasswordChangeForm, self).__init__(*args, **kwargs)
# Add HTML5 attributes
self.fields['new_password1'].widget.attrs['class'] = 'form-control'
self.fields['new_password1'].widget.attrs['placeholder'] = _('New password')
self.fields['new_password2'].widget.attrs['class'] = 'form-control'
self.fields['new_password2'].widget.attrs['placeholder'] = _('New password')
self.fields['old_password'].widget.attrs['autofocus'] = 'autofocus'
self.fields['old_password'].widget.attrs['class'] = 'form-control'
self.fields['old_password'].widget.attrs['placeholder'] = _('Old password')
####################################################################################################
class PasswordResetForm(forms.PasswordResetForm):
"""Override the default PasswordResetForm in order to add HTML5 attributes. This is the only
change done and needed
"""
##############################################
def __init__(self, *args, **kwargs):
super(PasswordResetForm, self).__init__(*args, **kwargs)
# Add HTML5 attributes
self.fields['email'].widget.attrs['autofocus'] = 'autofocus'
self.fields['email'].widget.attrs['class'] = 'form-control'
self.fields['email'].widget.attrs['placeholder'] = _('email')
####################################################################################################
class SetPasswordForm(forms.SetPasswordForm):
"""Override the default SetPasswordForm in order to add HTML5 attributes. This is the only change
done and needed
"""
##############################################
def __init__(self, *args, **kwargs):
super(SetPasswordForm, self).__init__(*args, **kwargs)
# Add HTML5 attributes
self.fields['new_password1'].widget.attrs['autofocus'] = 'autofocus'
self.fields['new_password1'].widget.attrs['class'] = 'form-control'
self.fields['new_password1'].widget.attrs['placeholder'] = _('New password')
self.fields['new_password2'].widget.attrs['class'] = 'form-control'
self.fields['new_password2'].widget.attrs['placeholder'] = _('New password')
####################################################################################################
class UserCreationForm(forms.UserCreationForm):
"""Override the default UserCreationForm in order to add HTML5 attributes.
"""
##############################################
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2', 'first_name', 'last_name')
##############################################
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
# email, first_name and last_name are required
self.fields['email'].required = True
self.fields['first_name'].required = True
# Add HTML5 attributes
self.fields['email'].widget.attrs['class'] = 'form-control'
self.fields['first_name'].widget.attrs['class'] = 'form-control'
self.fields['last_name'].widget.attrs['class'] = 'form-control'
self.fields['password1'].widget.attrs['class'] = 'form-control'
self.fields['password1'].widget.attrs['placeholder'] = _('Password')
self.fields['password2'].widget.attrs['class'] = 'form-control'
self.fields['password2'].widget.attrs['placeholder'] = _('Password')
self.fields['username'].widget.attrs['autofocus'] = 'autofocus'
self.fields['username'].widget.attrs['class'] = 'form-control'
self.fields['username'].widget.attrs['placeholder'] = _('Username')
##############################################
def save(self, commit=True):
"""Create the new User and the associated Profile The User is not activated until the
register_confirm url has been visited
"""
if not commit:
raise NotImplementedError('Cannot create Profile and User without commit')
user = super(UserCreationForm, self).save(commit=False)
user.is_active = False
user.save()
profile = Profile(user=user)
profile.save()
return user
####################################################################################################
class UserUpdateForm(ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name')
##############################################
def __init__(self, *args, **kwargs):
super(UserUpdateForm, self).__init__(*args, **kwargs)
# first_name and last_name are required
self.fields['first_name'].required = True
self.fields['first_name'].widget.attrs['autofocus'] = 'autofocus'
self.fields['first_name'].widget.attrs['class'] = 'form-control'
self.fields['last_name'].widget.attrs['class'] = 'form-control'
####################################################################################################
class ProfileUpdateForm(ModelForm):
class Meta:
model = Profile
fields = ('phone_number', 'language', 'timezone')
##############################################
def __init__(self, *args, **kwargs):
super(ProfileUpdateForm, self).__init__(*args, **kwargs)
self.fields['language'].required = True
self.fields['timezone'].required = True
self.fields['language'].widget.attrs['class'] = 'form-control'
self.fields['phone_number'].widget.attrs['class'] = 'form-control'
self.fields['timezone'].widget.attrs['class'] = 'form-control'
####################################################################################################
def register(request):
if request.method == 'POST':
user_form = UserCreationForm(request.POST)
if user_form.is_valid():
new_user = user_form.save()
send_localized_mail(new_user, _('Subscription to G.P.O.'),
'GroupedPurchaseOrder/account/register_email.html',
{'URL': request.build_absolute_uri(reverse('accounts.register.confirm',
args=[new_user.pk,
new_user.profile.hash_id])),
'fullname': new_user.get_full_name()})
return render(request, 'GroupedPurchaseOrder/account/register_end.html')
else:
messages.error(request, _("Some information are missing or mistyped"))
else:
user_form = UserCreationForm()
return render(request, 'GroupedPurchaseOrder/account/register.html', {'user_form': user_form})
####################################################################################################
def register_confirm(request, user_id, user_hash):
"""Check that the User and the Hash are correct before activating the User
"""
user = get_object_or_404(User, pk=user_id, profile__hash_id=user_hash)
user.is_active = True
user.save()
return render(request, 'GroupedPurchaseOrder/account/confirm.html', {'user': user})
####################################################################################################
@login_required
def profile(request):
# Force the user to provide language and timezone
if not request.user.profile.language or request.user.profile.timezone == 'UTC':
messages.error(request, _("You should update your timezone. Without it G.P.O. will not work as expected."))
return HttpResponseRedirect(reverse('accounts.profile.update'))
return render(request, 'GroupedPurchaseOrder/account/profile.html')
####################################################################################################
@login_required
def update(request):
profile = get_object_or_404(Profile, user__pk=request.user.pk)
if request.method == 'POST':
user_form = UserUpdateForm(request.POST, instance=request.user)
profile_form = ProfileUpdateForm(request.POST, instance=profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile = profile_form.save()
# Update the language code and activate it for the message
if profile.language:
request.session['django_language'] = profile.language
translation.activate(profile.language)
# Update the timezone if needed
if profile.timezone:
request.session['django_timezone'] = profile.timezone
# Print the message
messages.success(request, _("Personnal information updated"))
return HttpResponseRedirect(reverse('accounts.profile'))
else:
user_form = UserUpdateForm(instance=request.user)
profile_form = ProfileUpdateForm(instance=profile)
return render(request, 'GroupedPurchaseOrder/account/update.html',
{'user_form': user_form, 'profile_form': profile_form})
####################################################################################################
@login_required
def password_change_done(request):
messages.success(request, _('Password changed successfully'))
return HttpResponseRedirect(reverse('accounts.profile'))
####################################################################################################
def password_reset_done(request):
return render(request, 'GroupedPurchaseOrder/account/password_reset_done.html')
####################################################################################################
@login_required
def delete(request):
request.user.delete()
return HttpResponseRedirect(reverse('index'))
####################################################################################################
#
# End
#
####################################################################################################
| agpl-3.0 | -8,662,595,971,610,037,000 | 38.984663 | 115 | 0.531109 | false |
bros-bioinfo/bros-bioinfo.github.io | COURS/M1/SEMESTRE1/ALGO_PROG/ALGO/Eliot/Pile_File.py | 1 | 1911 | import random as rd
class Pile(list):
def creer_pile(self): # Renvoie une nouvelle pile vide.
return self
def empiler(self, e): # Empile l ’ élément ’ e ’ dans la pile ’P’.
self.append(e)
def depiler(self): # Dépile un élément de la pile ’P’ et renvoie cet élément .
return self.pop()
def taille_pile(self): # renvoie le nombre d’ éléments contenu dans la pile ’P’.
return len(self)
# class File(deque):
#
# def creer_file(self): # Renvoie une nouvelle f i l e vide .
# return self
#
# def enfiler(self, e): # Enfile l ’ élément ’e’ dans la file ’F’.
# self.append(e)
#
# def defiler(self): # Dé f i l e un é l ément de la f i l e ’F ’ et renvoie cet é l ément .
# return self.popleft()
#
# def taille_file(self): # renvoie l e nombre d ’ é l éments contenu dans la f i l e ’F ’.
# return len(self)
#
#
# pile = Pile()
# pile.empiler(1)
# pile.empiler(2)
# pile.empiler(3)
# print('LENGTH = ', pile.taille_pile())
# print(pile.depiler())
# print(pile.depiler())
# print(pile.depiler())
#
# file = File()
# file.enfiler(1)
# file.enfiler(2)
# file.enfiler(3)
# print('LENGTH = ', file.taille_file())
# print(file.defiler())
# print(file.defiler())
# print(file.defiler())
p_entree = Pile(range(10))
rd.shuffle(p_entree)
# print(enter)
p_sortie = Pile([])
# print(sortie)
def trier(p_entree, p_sortie):
for _ in range(len(p_entree)):
print(p_entree, p_sortie, "\n")
mini = p_entree.depiler()
n = len(p_entree)
for x in range(n):
a = p_entree.depiler()
if a < mini:
mini, a = a, mini
p_sortie.empiler(a)
for x in range(n):
p_entree.empiler(p_sortie.depiler())
p_sortie.empiler(mini)
print(p_sortie)
| mit | -1,065,698,792,782,147,600 | 23.706667 | 105 | 0.571506 | false |
offby1/icalendar | src/doctest.py | 1 | 100415 | # Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters ([email protected]).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
'is_private',
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re, types
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
# Don't whine about the deprecated is_private function in this
# module's tests.
warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
__name__, 0)
real_pdb_set_trace = pdb.set_trace
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
flag = 1 << len(OPTIONFLAGS_BY_NAME)
OPTIONFLAGS_BY_NAME[name] = flag
return flag
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
>>> is_private("a.b", "my_func")
False
>>> is_private("____", "_my_func")
True
>>> is_private("someclass", "__init__")
False
>>> is_private("sometypo", "__init_")
True
>>> is_private("x.y.z", "_")
True
>>> is_private("_x.y.z", "__")
False
>>> is_private("", "") # senseless but consistent
False
"""
warnings.warn("is_private is deprecated; it wasn't useful; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning, stacklevel=2)
return base[:1] == "_" and not base[:2] == "__" == base[-2:]
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
pdb.Pdb.__init__(self)
def set_trace(self):
self.__debugger_used = True
pdb.Pdb.set_trace(self)
def set_continue(self):
# Calling set_continue unconditionally would break unit test coverage
# reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
result = pdb.Pdb.trace_dispatch(self, *args)
# Restore stdout.
sys.stdout = save_stdout
return result
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, _namefilter=None, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
# _namefilter is undocumented, and exists only for temporary backward-
# compatibility support of testmod's deprecated isprivate mess.
self._namefilter = _namefilter
def find(self, obj, name=None, module=None, globs=None,
extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
return tests
def _filter(self, obj, prefix, base):
"""
Return true if the given object should not be examined.
"""
return (self._namefilter is not None and
self._namefilter(prefix, base))
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> for test in tests:
... print runner.run(test)
(0, 2)
(0, 1)
(0, 2)
(0, 2)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
(0, 7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
else:
return self.save_linecache_getlines(filename)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print "*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
(0, 1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__. Unless isprivate is specified, private names
are not skipped.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Deprecated in Python 2.4:
Optional keyword arg "isprivate" specifies a function used to
determine whether a name is private. The default function is
treat all functions as public. Optionally, "isprivate" can be
set to doctest.is_private to skip over functions marked as private
using the underscore naming convention; see its docs for details.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if isprivate is not None:
warnings.warn("the isprivate argument is deprecated; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning)
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser()):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if module_relative:
package = _normalize_module(package)
filename = _module_relative_path(package, filename)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
s = open(filename).read()
test = parser.get_doctest(s, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None,
isprivate=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.isprivate = isprivate
self.optionflags = optionflags
self.testfinder = DocTestFinder(_namefilter=isprivate)
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import new
m = new.module(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import new
m = new.module(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> old = _unittest_reportflags
>>> set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> import doctest
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
_para_re = re.compile('\s*\n\s*\n\s*')
def _unittest_count(docstring):
words = 0
count = 0
for p in _para_re.split(docstring):
p = p.strip()
if not p:
continue
if p.startswith('>>> '):
if words:
count += 1
words = 0
else:
words = 1
return count or 1
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
self._dt_count = _unittest_count(test.docstring)
def countTestCases(self):
return self._dt_count
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(), **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
if module_relative:
package = _normalize_module(package)
path = _module_relative_path(package, path)
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
doc = open(path).read()
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
return '\n'.join(output)
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
r = unittest.TextTestRunner()
r.run(DocTestSuite())
if __name__ == "__main__":
_test()
| lgpl-2.1 | -6,574,874,216,653,466,000 | 36.135725 | 79 | 0.569975 | false |
Adamssss/projectEuler | Problem 001-150 Python/pb102.py | 1 | 1225 | import math
import time
t1 = time.time()
# read the trianlges into a list
f = open('pb102_triangles.txt','r')
tris= f.read().split('\n')
f.close()
def totri(tl):
triangle = []
temp = tl.split(',')
for i in range(0,3):
triangle.append([tonumber(temp[2*i]),tonumber(temp[2*i+1])])
return triangle
def tonumber(ts):
neg = False
result = 0
temp = ts[:]
if temp[0] == '-':
neg = True
temp = temp[1:]
for i in temp:
result = result*10 + ord(i)-48
if neg:
result *= -1
return result
def contain(tri):
if not onthesameside(tri[0],tri[1],tri[2]):
return False
if not onthesameside(tri[1],tri[2],tri[0]):
return False
if not onthesameside(tri[2],tri[0],tri[1]):
return False
return True
def onthesameside(A,B,C):
BA = [A[0]-B[0],A[1]-B[1]]
BO = [-B[0],-B[1]]
BC = [C[0]-B[0],C[1]-B[1]]
k = BO[1]/BO[0]
if BA[0]*k > BA[1] and BC[0]*k > BC[1]:
return False
if BA[0]*k < BA[1] and BC[0]*k < BC[1]:
return False
return True
count = 0
for i in range(0,1000):
if contain(totri(tris[i])):
count += 1
print(count)
print("time:",time.time()-t1)
| mit | -3,754,334,336,298,502,700 | 19.081967 | 68 | 0.533061 | false |
quimaguirre/diana | diana/toolbox/parse_clinical_trials.py | 1 | 12367 | ##############################################################################
# Clinical trials parser
#
# eg 2013-2016
##############################################################################
import cPickle, os, re
def main():
#base_dir = "../data/ct/"
base_dir = "/home/eguney/data/ct/"
file_name = base_dir + "ct.csv"
output_data(base_dir, file_name)
return
def output_data(base_dir, file_name):
drug_to_ctids = get_interventions(base_dir, include_other_names=True) #False)
print len(drug_to_ctids), drug_to_ctids.items()[:5]
ctid_to_conditions = get_ctid_to_conditions(base_dir)
print len(ctid_to_conditions), ctid_to_conditions.items()[:5]
ctid_to_values = get_ctid_to_details(base_dir)
print len(ctid_to_values), ctid_to_values.items()[:5]
f = open(file_name, 'w')
f.write("Drug\tClinical trial Id\tPhase\tStatus\tFDA regulated\tWhy stopped\tResults date\tConditions\n")
for drug, ctids in drug_to_ctids.iteritems():
for ctid in ctids:
values = [ drug, ctid ]
if ctid in ctid_to_values:
#phase, status, fda_regulated, why_stopped, results_date = ctid_to_values[ctid]
values.extend(ctid_to_values[ctid])
if ctid in ctid_to_conditions:
conditions = ctid_to_conditions[ctid]
values.append(" | ".join(conditions))
f.write("%s\n" % "\t".join(values))
f.close()
return
def get_disease_specific_drugs(drug_to_diseases, phenotype_to_mesh_id):
disease_to_drugs = {}
mesh_id_to_phenotype = {}
for phenotype, mesh_id in phenotype_to_mesh_id.items():
mesh_id_to_phenotype[mesh_id] = phenotype
for drugbank_id, diseases in drug_to_diseases.iteritems():
for phenotype, dui, val in diseases:
if val > 0:
if dui in mesh_id_to_phenotype: # In the disease data set
disease = mesh_id_to_phenotype[dui].lower()
disease_to_drugs.setdefault(disease, set()).add(drugbank_id)
return disease_to_drugs
def get_drug_disease_mapping(base_dir, selected_drugs, name_to_drug, synonym_to_drug, mesh_id_to_name, mesh_id_to_name_with_synonyms, dump_file):
if os.path.exists(dump_file):
drug_to_diseases = cPickle.load(open(dump_file))
return drug_to_diseases
# Get mesh name to mesh id mapping
mesh_name_to_id = {}
for mesh_id, names in mesh_id_to_name_with_synonyms.iteritems():
for name in names:
for name_mod in [ name, name.replace(",", ""), name.replace("-", " "), name.replace(",", "").replace("-", " ") ]:
mesh_name_to_id[name_mod] = mesh_id
# Get CT info
drug_to_ctids, ctid_to_conditions, ctid_to_values = get_ct_data(base_dir, include_other_names=True)
# Get CT - MeSH disease mapping
intervention_to_mesh_name = {}
interventions = reduce(lambda x,y: x|y, ctid_to_conditions.values())
for intervention in interventions:
if intervention.endswith('s'):
intervention = intervention[:-1]
idx = intervention.find("(")
if idx != -1:
intervention = intervention[:idx].rstrip()
try:
exp = re.compile(r"\b%ss{,1}\b" % re.escape(intervention))
except:
print "Problem with regular expression:", intervention
for mesh_name, dui in mesh_name_to_id.iteritems():
m = exp.search(mesh_name)
if m is None:
continue
elif len(mesh_name.split()) != len(intervention.split()): # no partial overlap
continue
phenotype = mesh_id_to_name[dui]
intervention_to_mesh_name[intervention] = phenotype
break
#print len(intervention_to_mesh_name), intervention_to_mesh_name.items()[:5]
# Get interventions
phase_to_value = { "Phase 0": 0.5, "Phase 1": 0.6, "Phase 1/Phase 2": 0.65, "Phase 2": 0.7, "Phase 2/Phase 3": 0.75, "Phase 3": 0.8, "Phase 3/Phase 4":0.85, "Phase 4": 0.9, "N/A": 0.5 }
status_to_value = { "Terminated": -0.5, "Withdrawn": -1} #,"Completed", "Recruiting", "Not yet recruiting"
drug_to_diseases = {}
drug_to_diseases_n_study = {}
non_matching_drugs = set()
for drug, ctids in drug_to_ctids.iteritems():
drugbank_id = None
if name_to_drug is None:
drugbank_id = drug
else:
if drug in name_to_drug:
drugbank_id = name_to_drug[drug]
elif drug in synonym_to_drug:
drugbank_id = synonym_to_drug[drug]
else:
non_matching_drugs.add(drug)
continue
if selected_drugs is not None and drugbank_id not in selected_drugs:
continue
phenotype_to_count = {}
for ctid in ctids:
phase, status, fda_regulated, why_stopped, results_date = ctid_to_values[ctid]
val = 0.5
if phase not in phase_to_value:
print "Unknown phase:", phase
if status in status_to_value and phase in phase_to_value:
val = phase_to_value[phase] - 0.1
for intervention in ctid_to_conditions[ctid]:
if intervention not in intervention_to_mesh_name:
continue
phenotype = intervention_to_mesh_name[intervention]
i = phenotype_to_count.setdefault(phenotype, 0)
phenotype_to_count[phenotype] = i + 1
dui = mesh_name_to_id[phenotype]
# Phase based value assignment
drug_to_diseases.setdefault(drugbank_id, set()).add((phenotype, dui, val))
# Number of study based value assignment
for phenotype, val in phenotype_to_count.iteritems():
dui = mesh_name_to_id[phenotype]
drug_to_diseases_n_study.setdefault(drugbank_id, set()).add((phenotype, dui, val))
#drug_to_diseases = drug_to_diseases_n_study
#print "Non matching drugs:", len(non_matching_drugs)
#print len(drug_to_diseases), drug_to_diseases.items()[:5]
cPickle.dump(drug_to_diseases, open(dump_file, 'w'))
return drug_to_diseases
def get_ct_data(base_dir, include_other_names=True, dump_file=None):
if dump_file is not None and os.path.exists(dump_file):
values = cPickle.load(open(dump_file))
#drug_to_ctids, ctid_to_conditions, ctid_to_values = values
return values
drug_to_ctids = get_interventions(base_dir, include_other_names)
ctid_to_conditions = get_ctid_to_conditions(base_dir)
ctid_to_values = get_ctid_to_details(base_dir)
values = drug_to_ctids, ctid_to_conditions, ctid_to_values
if dump_file is not None:
cPickle.dump(values, open(dump_file, 'w'))
return values
def get_ctid_to_conditions(base_dir):
condition_file = base_dir + "conditions.txt"
condition_file2 = base_dir + "condition_browse.txt"
# Get conditions
ctid_to_conditions = {}
f = open(condition_file)
f.readline()
for line in f:
words = line.strip().split("|")
ctid = words[1]
condition = words[2] #.lower()
ctid_to_conditions.setdefault(ctid, set()).add(condition)
f.close()
return ctid_to_conditions
f = open(condition_file2)
f.readline()
for line in f:
words = line.strip().split("|")
ctid = words[1]
condition = words[2] #.lower()
ctid_to_conditions.setdefault(ctid, set()).add(condition)
f.close()
return ctid_to_conditions
def get_ctid_to_details(base_dir):
study_file = base_dir + "clinical_study.txt" # _noclob
# Get phase etc information
f = open(study_file)
line = f.readline()
words = line.strip().split("|")
header_to_idx = dict((word.lower(), i) for i, word in enumerate(words))
text = None
ctid_to_values = {}
while line:
line = f.readline()
if line.startswith("NCT"):
if text is not None:
words = text.strip().split("|")
ctid = words[0]
try:
phase = words[header_to_idx["phase"]]
status = words[header_to_idx["overall_status"]]
fda_regulated = words[header_to_idx["is_fda_regulated"]]
why_stopped = words[header_to_idx["why_stopped"]]
results_date = words[header_to_idx["firstreceived_results_date"]]
except:
print words
return
if phase.strip() != "":
ctid_to_values[ctid] = [phase, status, fda_regulated, why_stopped, results_date]
text = line
else:
text += line
f.close()
words = text.strip().split("|")
ctid = words[0]
phase = words[header_to_idx["phase"]]
status = words[header_to_idx["overall_status"]]
if phase.strip() != "":
ctid_to_values[ctid] = [phase, status, fda_regulated, why_stopped, results_date]
return ctid_to_values
def get_interventions(base_dir, include_other_names=True):
#ctid_to_drugs = {}
drug_to_ctids = {}
intervention_file = base_dir + "interventions.txt"
f = open(intervention_file)
f.readline()
#prev_row = 0
ignored_intervention_types = set()
for line in f:
words = line.strip().split("|")
try:
row = int(words[0])
#if row != prev_row + 1:
# continue
except:
continue
#prev_row += 1
if len(words) < 5:
#print words
continue
ctid = words[1]
intervention = words[2]
drug = words[3]
drug = drug.decode("ascii", errors="ignore").encode("ascii")
drug = drug.strip("\"'")
if intervention != "Drug" and intervention != "Biological" :
ignored_intervention_types.add(intervention)
continue
drug_to_ctids.setdefault(drug, set()).add(ctid)
#ctid_to_drugs.setdefault(ctid, set()).add(drug)
#conditions = drug_to_interventions.setdefault(drug, set())
#conditions |= ctid_to_conditions[ctid]
f.close()
print "Ignored intervention types:", ignored_intervention_types
if include_other_names:
intervention_file = base_dir + "intervention_browse.txt"
f = open(intervention_file)
f.readline()
for line in f:
words = line.strip().split("|")
row = int(words[0])
ctid = words[1]
drug = words[2] #.lower()
drug = drug.decode("ascii", errors="ignore").encode("ascii")
drug = drug.strip("\"'")
drug_to_ctids.setdefault(drug, set()).add(ctid)
#ctid_to_drugs.setdefault(ctid, set()).add(drug)
f.close()
intervention_file = base_dir + "intervention_other_names.txt"
f = open(intervention_file)
f.readline()
for line in f:
words = line.strip().split("|")
row = int(words[0])
ctid = words[1]
drug = words[3] #.lower()
drug = drug.decode("ascii", errors="ignore").encode("ascii")
drug = drug.strip("\"'")
drug_to_ctids.setdefault(drug, set()).add(ctid)
#ctid_to_drugs.setdefault(ctid, set()).add(drug)
f.close()
return drug_to_ctids #ctid_to_drugs
def get_drug_to_interventions(drug_to_ctids):
drug_to_interventions = {}
non_matching_drugs = set()
for drug, ctids in drug_to_ctids.iteritems():
drugbank_id = None
if name_to_drug is None:
drugbank_id = drug
else:
if drug in name_to_drug:
drugbank_id = name_to_drug[drug]
elif drug in synonym_to_drug:
drugbank_id = synonym_to_drug[drug]
else:
non_matching_drugs.add(drug)
continue
values = set()
for ctid in ctids:
#if ctid_to_values[ctid][0] != "Phase 3":
# continue
values |= ctid_to_conditions[ctid]
if len(values) == 0:
continue
drug_to_interventions.setdefault(drugbank_id, values)
#print "Non matching drugs:", len(non_matching_drugs)
#phenotypes = disease_to_drugs.keys()
#disease_to_interventions = {}
#for drug, interventions in drug_to_interventions.iteritems():
# for intervention in interventions:
# intervention = intervention.lower()
# for disease in phenotypes:
# values = text_utilities.tokenize_disease_name(disease)
# if all([ intervention.find(word.strip()) != -1 for word in values ]): # disease.split(",") ]):
# disease_to_drugs_ct.setdefault(disease, set()).add(drug)
# disease_to_interventions.setdefault(disease, set()).add(intervention)
#for disease, interventions in disease_to_interventions.iteritems():
# print disease, interventions
#print len(drug_to_interventions), drug_to_interventions.items()[:5]
#print drug_to_ctids["voriconazole"], print ctid_to_conditions["NCT00005912"], print ctid_to_values["NCT00005912"]
#print drug_to_interventions["DB00582"]
return drug_to_interventions
def get_frequent_interventions(drug_to_interventions):
condition_to_count = {}
for drug, interventions in drug_to_interventions.iteritems():
for condition in interventions:
if condition in condition_to_count:
condition_to_count[condition] += 1
else:
condition_to_count[condition] = 1
values = []
for condition, count in condition_to_count.iteritems():
values.append((count, condition))
values.sort()
values.reverse()
#print values[:50]
return values
if __name__ == "__main__":
main()
| mit | 6,743,207,231,642,477,000 | 35.266862 | 189 | 0.646802 | false |
openstack/vitrage | vitrage/tests/unit/evaluator/template_validation/content/v1/test_parameters_validator.py | 1 | 1656 | # Copyright 2019 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vitrage.evaluator.template_validation.content.v1.get_param_validator \
import GetParamValidator
from vitrage.tests.unit.evaluator.template_validation.content.base import \
ValidatorTest
class ParametersValidatorTest(ValidatorTest):
"""Tests for the parameters validator of version 1
All tests should succeed, as long as there is no get_param reference in
the template itself
"""
def test_validate_no_parameters(self):
result = GetParamValidator.validate(
template={'alarm_name': "Don't add a comment"}, actual_params=None)
self._assert_correct_result(result)
def test_validate_empty_parameters(self):
result = GetParamValidator.validate(
template={'alarm_name': '+2 for everybody'}, actual_params={})
self._assert_correct_result(result)
def test_validate_with_parameter(self):
template = {'alarm_name': 'get_param(param1)'}
result = \
GetParamValidator.validate(template=template, actual_params={})
self._assert_fault_result(result, 160)
| apache-2.0 | 2,761,177,479,609,274,000 | 38.428571 | 79 | 0.71256 | false |
skitoo/kivy-particle | kivyparticle/engine.py | 1 | 18189 | # -*- coding: utf-8 -*-
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.graphics import Color, Callback, Rotate, PushMatrix, PopMatrix, Translate, Quad
from kivy.graphics.opengl import glBlendFunc, GL_SRC_ALPHA, GL_ONE, GL_ZERO, GL_SRC_COLOR, GL_ONE_MINUS_SRC_COLOR, GL_ONE_MINUS_SRC_ALPHA, GL_DST_ALPHA, GL_ONE_MINUS_DST_ALPHA, GL_DST_COLOR, GL_ONE_MINUS_DST_COLOR
from kivy.core.image import Image
from kivy.logger import Logger
from xml.dom.minidom import parse as parse_xml
from .utils import random_variance, random_color_variance
from kivy.properties import NumericProperty, BooleanProperty, ListProperty, StringProperty, ObjectProperty
import sys
import os
import math
__all__ = ['EMITTER_TYPE_GRAVITY', 'EMITTER_TYPE_RADIAL', 'Particle', 'ParticleSystem']
EMITTER_TYPE_GRAVITY = 0
EMITTER_TYPE_RADIAL = 1
BLEND_FUNC = {0: GL_ZERO,
1: GL_ONE,
0x300: GL_SRC_COLOR,
0x301: GL_ONE_MINUS_SRC_COLOR,
0x302: GL_SRC_ALPHA,
0x303: GL_ONE_MINUS_SRC_ALPHA,
0x304: GL_DST_ALPHA,
0x305: GL_ONE_MINUS_DST_ALPHA,
0x306: GL_DST_COLOR,
0x307: GL_ONE_MINUS_DST_COLOR
}
class Particle(object):
x, y, rotation, current_time = -256, -256, 0, 0
scale, total_time = 1.0, 0.
color = [1.0, 1.0, 1.0, 1.0]
color_delta = [0.0, 0.0, 0.0, 0.0]
start_x, start_y, velocity_x, velocity_y = 0, 0, 0, 0
radial_acceleration, tangent_acceleration = 0, 0
emit_radius, emit_radius_delta = 0, 0
emit_rotation, emit_rotation_delta = 0, 0
rotation_delta, scale_delta = 0, 0
class ParticleSystem(Widget):
max_num_particles = NumericProperty(200)
life_span = NumericProperty(2)
texture = ObjectProperty(None)
texture_path = StringProperty(None)
life_span_variance = NumericProperty(0)
start_size = NumericProperty(16)
start_size_variance = NumericProperty(0)
end_size = NumericProperty(16)
end_size_variance = NumericProperty(0)
emit_angle = NumericProperty(0)
emit_angle_variance = NumericProperty(0)
start_rotation = NumericProperty(0)
start_rotation_variance = NumericProperty(0)
end_rotation = NumericProperty(0)
end_rotation_variance = NumericProperty(0)
emitter_x_variance = NumericProperty(100)
emitter_y_variance = NumericProperty(100)
gravity_x = NumericProperty(0)
gravity_y = NumericProperty(0)
speed = NumericProperty(0)
speed_variance = NumericProperty(0)
radial_acceleration = NumericProperty(100)
radial_acceleration_variance = NumericProperty(0)
tangential_acceleration = NumericProperty(0)
tangential_acceleration_variance = NumericProperty(0)
max_radius = NumericProperty(100)
max_radius_variance = NumericProperty(0)
min_radius = NumericProperty(50)
rotate_per_second = NumericProperty(0)
rotate_per_second_variance = NumericProperty(0)
start_color = ListProperty([1., 1., 1., 1.])
start_color_variance = ListProperty([1., 1., 1., 1.])
end_color = ListProperty([1., 1., 1., 1.])
end_color_variance = ListProperty([1., 1., 1., 1.])
blend_factor_source = NumericProperty(770)
blend_factor_dest = NumericProperty(1)
emitter_type = NumericProperty(0)
update_interval = NumericProperty(1. / 30.)
_is_paused = BooleanProperty(False)
def __init__(self, config, **kwargs):
super(ParticleSystem, self).__init__(**kwargs)
self.capacity = 0
self.particles = list()
self.particles_dict = dict()
self.emission_time = 0.0
self.frame_time = 0.0
self.num_particles = 0
if config is not None:
self._parse_config(config)
self.emission_rate = self.max_num_particles / self.life_span
self.initial_capacity = self.max_num_particles
self.max_capacity = self.max_num_particles
self._raise_capacity(self.initial_capacity)
with self.canvas.before:
Callback(self._set_blend_func)
with self.canvas.after:
Callback(self._reset_blend_func)
Clock.schedule_once(self._update, self.update_interval)
def start(self, duration=sys.maxint):
if self.emission_rate != 0:
self.emission_time = duration
def stop(self, clear=False):
self.emission_time = 0.0
if clear:
self.num_particles = 0
self.particles_dict = dict()
self.canvas.clear()
def on_max_num_particles(self, instance, value):
self.max_capacity = value
if self.capacity < value:
self._raise_capacity(self.max_capacity - self.capacity)
elif self.capacity > value:
self._lower_capacity(self.capacity - self.max_capacity)
self.emission_rate = self.max_num_particles / self.life_span
def on_texture(self, instance, value):
for p in self.particles:
try:
self.particles_dict[p]['rect'].texture = self.texture
except KeyError:
# if particle isn't initialized yet, you can't change its texture.
pass
def on_life_span(self, instance, value):
self.emission_rate = self.max_num_particles / value
def _set_blend_func(self, instruction):
#glBlendFunc(self.blend_factor_source, self.blend_factor_dest)
#glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glBlendFunc(GL_SRC_ALPHA, GL_ONE)
def _reset_blend_func(self, instruction):
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def _parse_config(self, config):
self._config = parse_xml(config)
texture_path = self._parse_data('texture', 'name')
config_dir_path = os.path.dirname(os.path.abspath(config))
path = os.path.join(config_dir_path, texture_path)
if os.path.exists(path):
self.texture_path = path
else:
self.texture_path = texture_path
self.texture = Image(self.texture_path).texture
self.emitter_x = float(self._parse_data('sourcePosition', 'x'))
self.emitter_y = float(self._parse_data('sourcePosition', 'y'))
self.emitter_x_variance = float(self._parse_data('sourcePositionVariance', 'x'))
self.emitter_y_variance = float(self._parse_data('sourcePositionVariance', 'y'))
self.gravity_x = float(self._parse_data('gravity', 'x'))
self.gravity_y = float(self._parse_data('gravity', 'y'))
self.emitter_type = int(self._parse_data('emitterType'))
self.max_num_particles = int(self._parse_data('maxParticles'))
self.life_span = max(0.01, float(self._parse_data('particleLifeSpan')))
self.life_span_variance = float(self._parse_data('particleLifespanVariance'))
self.start_size = float(self._parse_data('startParticleSize'))
self.start_size_variance = float(self._parse_data('startParticleSizeVariance'))
self.end_size = float(self._parse_data('finishParticleSize'))
self.end_size_variance = float(self._parse_data('FinishParticleSizeVariance'))
self.emit_angle = math.radians(float(self._parse_data('angle')))
self.emit_angle_variance = math.radians(float(self._parse_data('angleVariance')))
self.start_rotation = math.radians(float(self._parse_data('rotationStart')))
self.start_rotation_variance = math.radians(float(self._parse_data('rotationStartVariance')))
self.end_rotation = math.radians(float(self._parse_data('rotationEnd')))
self.end_rotation_variance = math.radians(float(self._parse_data('rotationEndVariance')))
self.speed = float(self._parse_data('speed'))
self.speed_variance = float(self._parse_data('speedVariance'))
self.radial_acceleration = float(self._parse_data('radialAcceleration'))
self.radial_acceleration_variance = float(self._parse_data('radialAccelVariance'))
self.tangential_acceleration = float(self._parse_data('tangentialAcceleration'))
self.tangential_acceleration_variance = float(self._parse_data('tangentialAccelVariance'))
self.max_radius = float(self._parse_data('maxRadius'))
self.max_radius_variance = float(self._parse_data('maxRadiusVariance'))
self.min_radius = float(self._parse_data('minRadius'))
self.rotate_per_second = math.radians(float(self._parse_data('rotatePerSecond')))
self.rotate_per_second_variance = math.radians(float(self._parse_data('rotatePerSecondVariance')))
self.start_color = self._parse_color('startColor')
self.start_color_variance = self._parse_color('startColorVariance')
self.end_color = self._parse_color('finishColor')
self.end_color_variance = self._parse_color('finishColorVariance')
self.blend_factor_source = self._parse_blend('blendFuncSource')
self.blend_factor_dest = self._parse_blend('blendFuncDestination')
def _parse_data(self, name, attribute='value'):
return self._config.getElementsByTagName(name)[0].getAttribute(attribute)
def _parse_color(self, name):
return [float(self._parse_data(name, 'red')), float(self._parse_data(name, 'green')), float(self._parse_data(name, 'blue')), float(self._parse_data(name, 'alpha'))]
def _parse_blend(self, name):
value = int(self._parse_data(name))
return BLEND_FUNC[value]
def pause(self):
self._is_paused = True
def resume(self):
self._is_paused = False
Clock.schedule_once(self._update, self.update_interval)
def _update(self, dt):
self._advance_time(dt)
self._render()
if not self._is_paused:
Clock.schedule_once(self._update, self.update_interval)
def _create_particle(self):
return Particle()
def _init_particle(self, particle):
life_span = random_variance(self.life_span, self.life_span_variance)
if life_span <= 0.0:
return
particle.current_time = 0.0
particle.total_time = life_span
particle.x = random_variance(self.emitter_x, self.emitter_x_variance)
particle.y = random_variance(self.emitter_y, self.emitter_y_variance)
particle.start_x = self.emitter_x
particle.start_y = self.emitter_y
angle = random_variance(self.emit_angle, self.emit_angle_variance)
speed = random_variance(self.speed, self.speed_variance)
particle.velocity_x = speed * math.cos(angle)
particle.velocity_y = speed * math.sin(angle)
particle.emit_radius = random_variance(self.max_radius, self.max_radius_variance)
particle.emit_radius_delta = (self.max_radius - self.min_radius) / life_span
particle.emit_rotation = random_variance(self.emit_angle, self.emit_angle_variance)
particle.emit_rotation_delta = random_variance(self.rotate_per_second, self.rotate_per_second_variance)
particle.radial_acceleration = random_variance(self.radial_acceleration, self.radial_acceleration_variance)
particle.tangent_acceleration = random_variance(self.tangential_acceleration, self.tangential_acceleration_variance)
start_size = random_variance(self.start_size, self.start_size_variance)
end_size = random_variance(self.end_size, self.end_size_variance)
start_size = max(0.1, start_size)
end_size = max(0.1, end_size)
particle.scale = start_size / self.texture.width
particle.scale_delta = ((end_size - start_size) / life_span) / self.texture.width
# colors
start_color = random_color_variance(self.start_color, self.start_color_variance)
end_color = random_color_variance(self.end_color, self.end_color_variance)
particle.color_delta = [(end_color[i] - start_color[i]) / life_span for i in range(4)]
particle.color = start_color
# rotation
start_rotation = random_variance(self.start_rotation, self.start_rotation_variance)
end_rotation = random_variance(self.end_rotation, self.end_rotation_variance)
particle.rotation = start_rotation
particle.rotation_delta = (end_rotation - start_rotation) / life_span
def _advance_particle(self, particle, passed_time):
passed_time = min(passed_time, particle.total_time - particle.current_time)
particle.current_time += passed_time
if self.emitter_type == EMITTER_TYPE_RADIAL:
particle.emit_rotation += particle.emit_rotation_delta * passed_time
particle.emit_radius -= particle.emit_radius_delta * passed_time
particle.x = self.emitter_x - math.cos(particle.emit_rotation) * particle.emit_radius
particle.y = self.emitter_y - math.sin(particle.emit_rotation) * particle.emit_radius
if particle.emit_radius < self.min_radius:
particle.current_time = particle.total_time
else:
distance_x = particle.x - particle.start_x
distance_y = particle.y - particle.start_y
distance_scalar = math.sqrt(distance_x * distance_x + distance_y * distance_y)
if distance_scalar < 0.01:
distance_scalar = 0.01
radial_x = distance_x / distance_scalar
radial_y = distance_y / distance_scalar
tangential_x = radial_x
tangential_y = radial_y
radial_x *= particle.radial_acceleration
radial_y *= particle.radial_acceleration
new_y = tangential_x
tangential_x = -tangential_y * particle.tangent_acceleration
tangential_y = new_y * particle.tangent_acceleration
particle.velocity_x += passed_time * (self.gravity_x + radial_x + tangential_x)
particle.velocity_y += passed_time * (self.gravity_y + radial_y + tangential_y)
particle.x += particle.velocity_x * passed_time
particle.y += particle.velocity_y * passed_time
particle.scale += particle.scale_delta * passed_time
particle.rotation += particle.rotation_delta * passed_time
particle.color = [particle.color[i] + particle.color_delta[i] * passed_time for i in range(4)]
def _raise_capacity(self, by_amount):
old_capacity = self.capacity
new_capacity = min(self.max_capacity, self.capacity + by_amount)
for i in range(int(new_capacity - old_capacity)):
self.particles.append(self._create_particle())
self.num_particles = int(new_capacity)
self.capacity = new_capacity
def _lower_capacity(self, by_amount):
old_capacity = self.capacity
new_capacity = max(0, self.capacity - by_amount)
for i in range(int(old_capacity - new_capacity)):
try:
self.canvas.remove(self.particles_dict[self.particles.pop()]['rect'])
except:
pass
self.num_particles = int(new_capacity)
self.capacity = new_capacity
def _advance_time(self, passed_time):
particle_index = 0
# advance existing particles
while particle_index < self.num_particles:
particle = self.particles[particle_index]
if particle.current_time < particle.total_time:
self._advance_particle(particle, passed_time)
particle_index += 1
else:
if particle_index != self.num_particles - 1:
next_particle = self.particles[self.num_particles - 1]
self.particles[self.num_particles - 1] = particle
self.particles[particle_index] = next_particle
self.num_particles -= 1
if self.num_particles == 0:
Logger.debug('Particle: COMPLETE')
# create and advance new particles
if self.emission_time > 0:
time_between_particles = 1.0 / self.emission_rate
self.frame_time += passed_time
while self.frame_time > 0:
if self.num_particles < self.max_capacity:
if self.num_particles == self.capacity:
self._raise_capacity(self.capacity)
particle = self.particles[self.num_particles]
self.num_particles += 1
self._init_particle(particle)
self._advance_particle(particle, self.frame_time)
self.frame_time -= time_between_particles
if self.emission_time != sys.maxint:
self.emission_time = max(0.0, self.emission_time - passed_time)
def _render(self):
if self.num_particles == 0:
return
for i in range(self.num_particles):
particle = self.particles[i]
size = (self.texture.size[0] * particle.scale, self.texture.size[1] * particle.scale)
if particle not in self.particles_dict:
self.particles_dict[particle] = dict()
color = particle.color[:]
with self.canvas:
self.particles_dict[particle]['color'] = Color(color[0], color[1], color[2], color[3])
PushMatrix()
self.particles_dict[particle]['translate'] = Translate()
self.particles_dict[particle]['rotate'] = Rotate()
self.particles_dict[particle]['rotate'].set(particle.rotation, 0, 0, 1)
self.particles_dict[particle]['rect'] = Quad(texture=self.texture, points=(-size[0] * 0.5, -size[1] * 0.5, size[0] * 0.5, -size[1] * 0.5, size[0] * 0.5, size[1] * 0.5, -size[0] * 0.5, size[1] * 0.5))
self.particles_dict[particle]['translate'].xy = (particle.x, particle.y)
PopMatrix()
else:
self.particles_dict[particle]['rotate'].angle = particle.rotation
self.particles_dict[particle]['translate'].xy = (particle.x, particle.y)
self.particles_dict[particle]['color'].rgba = particle.color
self.particles_dict[particle]['rect'].points = (-size[0] * 0.5, -size[1] * 0.5, size[0] * 0.5, -size[1] * 0.5, size[0] * 0.5, size[1] * 0.5, -size[0] * 0.5, size[1] * 0.5)
| mit | -5,275,556,113,248,403,000 | 44.246269 | 221 | 0.633185 | false |
benpicco/mate-panflute | src/panflute/tests/runner.py | 1 | 13587 | #! /usr/bin/env python
# Panflute
# Copyright (C) 2010 Paul Kuliniewicz <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301, USA.
"""
Base classes for running tests against a player configuration.
"""
from __future__ import absolute_import, print_function
import panflute.defs
import panflute.util
import dbus
import glib
import os
import os.path
import shutil
import subprocess
import sys
import threading
import time
import traceback
class Launcher (object):
"""
Launch a subprocess for running a series of tests against the same
player configuration.
"""
def __init__ (self, daemon_prefix, prefix, user, password, test_names, owner, data, player_name):
self.__daemon_prefix = daemon_prefix
self.__prefix = prefix
self.__user = user
self.__password = password
self.__test_names = test_names
self.__owner = owner
self.__data = data
self.__player_name = player_name
self.__child = None
self.__env = os.environ.copy ()
def start (self):
"""
Start the subprocess and begin collecting results from it.
"""
with open ("/dev/null", "r") as null:
child = subprocess.Popen ([sys.argv[0], "--subprocess", self.__player_name, self.__daemon_prefix,
self.__prefix, self.__user, self.__password] + self.__test_names,
shell = False, close_fds = True, preexec_fn = os.setsid,
stdin = null, stdout = subprocess.PIPE, env = self.__env)
glib.io_add_watch (child.stdout, glib.IO_IN | glib.IO_HUP, self.__child_io_cb)
def augment_env_path (self, name, value):
"""
Augment a PATH-style environment variable, creating it if it doesn't
already exist.
"""
if self.__env.has_key (name):
self.__env[name] = "{0}:{1}".format (value, self.__env[name])
else:
self.__env[name] = value
def set_env (self, name, value):
"""
Set an environment variable for the subprocess.
"""
self.__env[name] = value
def __child_io_cb (self, source, cond):
"""
Called when the subprocess produces more data, or closes the pipe.
"""
if cond == glib.IO_IN:
more = self.__process_message (source)
if not more:
self.__owner.start_next_launcher ()
return more
else:
# glib.IO_HUP indicates the subprocess crashed, probably leaving
# behind its own children. Abort testing since any results
# produced without cleaning up the mess will be unreliable.
self.__owner.abort_testing ()
return False
def __process_message (self, source):
"""
Process a message from the subprocess, returning True if more
messages are expected.
"""
# Read until reaching a blank line (record separator).
try:
line = source.readline ().rstrip ()
[result, test_name] = line.split (" ")
if result != "***":
detail = ""
line = source.readline ().rstrip ()
while line != "":
detail += line + "\n"
line = source.readline ().rstrip ()
if result == "START":
self.__owner.process_start (self.__data, test_name)
else:
self.__owner.process_result (self.__data, test_name, result, detail)
return True
else:
return False
except ValueError:
# The split() failed, so data was truncated, probably because the
# subprocess got killed.
return False
class Runner (threading.Thread):
"""
Runs a series of tests against a player configuration.
"""
TONE_PATHS = [os.path.join (panflute.defs.PKG_DATA_DIR, filename)
for filename in ["test220.ogg", "test440.ogg", "test660.ogg"]]
TONE_URIS = map (panflute.util.make_url, TONE_PATHS)
def __init__ (self, main_loop, daemon_prefix, prefix, user, password, tests):
threading.Thread.__init__ (self, name = "Test runner")
self.__main_loop = main_loop
self.__daemon_prefix = daemon_prefix
self.__prefix = prefix
self.__user = user
self.__password = password
self.__tests = tests
self.__panflute = None
self.__child = None
self.bus = dbus.SessionBus ()
proxy = self.bus.get_object ("org.freedesktop.DBus", "/org/freedesktop/DBus")
self.bus_obj = dbus.Interface (proxy, "org.freedesktop.DBus")
def run (self):
if len (self.__tests) == 0:
# For debugging, just start the player as it would be invoked for
# running tests, then quit.
try:
print ("DEBUG: prepare_persistent", file = sys.stderr)
self.prepare_persistent ()
time.sleep (3)
print ("DEBUG: prepare_single", file = sys.stderr)
self.prepare_single (self.__prefix, self.__user, self.__password)
time.sleep (3)
finally:
self.__main_loop.quit ()
else:
# The calls to sleep are to give various things a chance to settle
# down before continuing; having two instances of the same player
# running at the same time, for example, causes problems.
try:
print ("DEBUG: prepare_persistent", file = sys.stderr)
self.prepare_persistent ()
print ("DEBUG: start_daemon", file = sys.stderr)
self.start_daemon ()
print ("DEBUG: sleep", file = sys.stderr)
time.sleep (3)
for test in self.__tests:
try:
print ("START {0}".format (test.__class__.__name__))
print ()
sys.stdout.flush ()
print ("DEBUG: prepare_single", file = sys.stderr)
self.prepare_single (self.__prefix, self.__user, self.__password)
time.sleep (3)
print ("DEBUG: create_proxies", file = sys.stderr)
player, player_ex = self.create_proxies ()
print ("DEBUG: sleep", file = sys.stderr)
time.sleep (3)
print ("DEBUG: should_be_run", file = sys.stderr)
if test.should_be_run (player_ex):
print ("DEBUG: test", file = sys.stderr)
test.test (player, player_ex)
result = "PASS"
detail = ""
else:
result = "SKIP"
detail = ""
except Exception, e:
result = "FAIL"
detail = traceback.format_exc (e)
finally:
try:
print ("DEBUG: cleanup_single", file = sys.stderr)
self.cleanup_single ()
print ("DEBUG: wait_for", file = sys.stderr)
self.wait_for ("org.mpris.panflute", False)
print ("DEBUG: sleep", file = sys.stderr)
time.sleep (3)
except Exception, e:
if result != "FAIL":
result = "FAIL"
detail = traceback.format_exc (e)
finally:
if self.__child is not None:
print ("DEBUG: end_process", file = sys.stderr)
self.end_process (self.__child)
self.__child = None
print ("{0} {1}".format (result, test.__class__.__name__))
if detail != "":
print (detail.rstrip ())
print ()
sys.stdout.flush ()
print ("DEBUG: stop_daemon", file = sys.stderr)
self.stop_daemon ()
print ("DEBUG: cleanup_persistent", file = sys.stderr)
self.cleanup_persistent ()
print ("DEBUG: sleep", file = sys.stderr)
time.sleep (3)
finally:
print ("*** ***")
sys.stdout.flush ()
print ("DEBUG: quit", file = sys.stderr)
self.__main_loop.quit ()
def prepare_persistent (self):
"""
Perform any pre-test setup that doesn't have to be re-done every
time a test is started.
"""
pass
def cleanup_persistent (self):
"""
Perform any post-test cleanup after all the tests are done.
"""
pass
def prepare_single (self, prefix, user, password):
"""
Perform setup before each test.
"""
pass
def cleanup_single (self):
"""
Perform cleanup after each test.
"""
pass
def set_child (self, child):
"""
Set a child to be forcibly terminated at the end of the test.
"""
self.__child = child
def rmdirs (self, path):
"""
Recursive remove a directory.
"""
try:
shutil.rmtree (os.path.expanduser (path))
except OSError:
# don't care if directory didn't exist
pass
def rmfile (self, path):
"""
Delete a file, ignoring errors.
"""
try:
os.unlink (os.path.expanduser (path))
except OSError:
# don't care if file didn't exist
pass
def mkdir (self, path):
"""
Create a directory, ignoring errors.
"""
try:
os.makedirs (os.path.expanduser (path))
except OSError:
# don't care if directory already exists
pass
def run_command (self, command):
"""
Run a shell command.
"""
with open ("/dev/null", "r+") as null:
return subprocess.Popen (command, shell = False, close_fds = True, preexec_fn = os.setsid,
stdin = null, stdout = null, stderr = null)
def end_process (self, child):
"""
Forcibly terminate a subprocess.
"""
try:
if child.poll () is None:
child.terminate ()
time.sleep (3)
if child.poll () is None:
child.kill ()
child.wait ()
except OSError, e:
# 3 == "no such process" == not a problem
if e.errno != 3:
raise e
def start_daemon (self):
"""
Start the Panflute daemon.
"""
daemon = os.path.join (self.__daemon_prefix, "bin/panflute-daemon")
#self.__panflute = self.run_command ([daemon, "-d"])
with open ("/dev/null", "r") as null:
with open ("/tmp/panflute-daemon.out", "a") as out:
with open ("/tmp/panflute-daemon.err", "a") as err:
self.__panflute = subprocess.Popen ([daemon, "-d"],
shell = False, close_fds = True, preexec_fn = os.setsid,
stdin = null, stdout = out, stderr = err)
self.wait_for ("org.kuliniewicz.Panflute", True)
def stop_daemon (self):
"""
Stop the Panflute daemon.
"""
try:
self.end_process (self.__panflute)
self.wait_for ("org.kuliniewicz.Panflute", False, 1)
finally:
self.__panflute = None
def create_proxies (self):
"""
Create the player proxy objects.
"""
self.wait_for ("org.mpris.panflute", True)
proxy = self.bus.get_object ("org.mpris.panflute", "/Player")
player = dbus.Interface (proxy, panflute.mpris.INTERFACE)
player_ex = dbus.Interface (proxy, "org.kuliniewicz.Panflute")
return player, player_ex
def wait_for (self, name, wanted, tries = 20):
"""
Wait for a D-Bus name to appear, giving up after too many tries.
"""
while tries > 0:
time.sleep (1)
if self.bus_obj.NameHasOwner (name) == wanted:
return
tries = tries - 1
raise TestError
class TestError (Exception):
pass
| gpl-2.0 | -8,133,091,465,736,039,000 | 30.894366 | 112 | 0.502024 | false |
savionok/RemoteHID | RemoteHidPythonServer/udpEchoServer.py | 1 | 2375 | #############################################################################
# Copyright 2012 Virtosu Sava #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#############################################################################
import socket
from threading import Thread
from configs import SERVER
class UDPEchoServer(Thread):
'''This UDP Server is used only for echo back message from client
The message with which server response back can be also specified.
'''
def __init__(self, port = SERVER.UDP_ECHO_PORT, response_msg = ''):
Thread.__init__(self)
self.running = True
self.UDPSocket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
self.response_msg = response_msg
try:
self.UDPSocket.bind(("", SERVER.UDP_ECHO_PORT))
except:
print 'Error 16: Port are in use. Fail to run server.'
def stop_server(self):
self.running = False
# kick new socket connection to past accept step in main loop.
temp_socket = socket.socket ( socket.AF_INET, socket.SOCK_DGRAM )
temp_socket.connect( (SERVER.UDP_HOST, SERVER.UDP_ECHO_PORT) )
def run (self):
while self.running:
data, address = self.UDPSocket.recvfrom(SERVER.MAX_BUFFER_SIZE)
#empty string counts as False
if self.response_msg:
self.UDPSocket.sendto( self.response_msg , (address[0], address[1]) )
else:
self.UDPSocket.sendto( data , (address[0], address[1]) )
#TODO: manage debug info
print "( " ,address[0], " " , address[1] , " ) said : ", data | apache-2.0 | 1,390,608,810,056,242,400 | 45.54 | 77 | 0.546947 | false |
stamaimer/Hackthon | benchmark/stress.py | 1 | 11486 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import collections
import contextlib
import functools
import itertools
import json
import math
import os
import random
import signal
import time
import redis
import pymysql
try:
import httplib
except ImportError:
import http.client as httplib
try:
import urllib.parse as urllib
except ImportError:
import urllib
from multiprocessing.pool import Pool, ThreadPool
from multiprocessing import Process
KEY_PREFIX = "stress_test:make_order"
USER_KEY = "{}:user".format(KEY_PREFIX)
SUCCESS_KEY = "{}:success".format(KEY_PREFIX)
FAILURE_KEY = "{}:failure".format(KEY_PREFIX)
ORDER_RESP_TIME_KEY = "{}:order_resp_time".format(KEY_PREFIX)
REQ_RESP_TIME_KEY = "{}:req_resp_time".format(KEY_PREFIX)
REQUEST_SUCCESS_KEY = "{}:request_success".format(KEY_PREFIX)
REQUEST_FAILURE_KEY = "{}:request_failure".format(KEY_PREFIX)
REQ_FINISH_TIME_KEY = "{}:req_finish_time".format(KEY_PREFIX)
ORDER_FINISH_TIME_KEY = "{}:order_finish_time".format(KEY_PREFIX)
redis_store = redis.Redis()
users, foods = {}, []
@contextlib.contextmanager
def db_query():
db = pymysql.connect(host=os.getenv("DB_HOST", "localhost"),
port=int(os.getenv("DB_PORT", 3306)),
user=os.getenv("DB_USER", "root"),
passwd=os.getenv("DB_PASS", "toor"),
db=os.getenv("DB_NAME", "eleme"))
try:
yield db
finally:
db.close()
def load_users():
global users
with db_query() as db:
cur = db.cursor()
# load users
cur.execute("SELECT id, name, password FROM user")
for i, name, pw in cur.fetchall():
users[i] = {"username": name, "password": pw}
redis_store.sadd(USER_KEY, *users.keys())
return users
def load_foods():
global foods
with db_query() as db:
cur = db.cursor()
cur.execute("SELECT id, stock, price FROM food")
for i, stock, price in cur.fetchall():
foods.append({"id": i, "stock": stock})
return foods
def safe_loads(data):
try:
return json.loads(data)
except:
return data
class QueryException(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return "{} {}".format(self.code, self.message)
class Query(object):
__slots__ = ["access_token", "user_id", "cart_id", "client"]
def __init__(self, host, port):
self.client = httplib.HTTPConnection(host, port, timeout=3)
self.access_token = None
self.user_id = None
self.cart_id = None
def request(self, method, url, headers=None, data=None):
data = data or {}
headers = headers or {}
headers["Content-Type"] = "application/json"
start = time.time()
status = None
try:
self.client.request(method, url, body=json.dumps(data),
headers=headers)
response = self.client.getresponse()
status = response.status
data = response.read().decode("utf-8")
self.client.close()
return {"status": status, "data": safe_loads(data)}
finally:
now = time.time()
elapsed = now - start
with redis_store.pipeline() as p:
if status in (200, 204):
p.incr(REQUEST_SUCCESS_KEY)
p.lpush(REQ_FINISH_TIME_KEY, now)
else:
p.incr(REQUEST_FAILURE_KEY)
p.lpush(REQ_RESP_TIME_KEY, elapsed)
p.execute()
def url(self, path):
assert self.access_token
params = {"access_token": self.access_token}
qs = urllib.urlencode(params)
return "{}?{}".format(path, qs) if qs else path
def _do_login(self, username, password):
data = {
"username": username,
"password": password
}
response = self.request("POST", "/login", data=data)
if response["status"] == 200:
self.access_token = response["data"]["access_token"]
return True
return False
def login(self):
user_id = redis_store.spop(USER_KEY)
if not user_id:
return False
self.user_id = int(user_id)
user = users[self.user_id]
return self._do_login(user["username"], user["password"])
def get_foods(self):
res = self.request("GET", self.url("/foods"))
return res["status"] == 200
def get_orders(self):
res = self.request("GET", self.url("/orders"))
return res["status"] == 200
def create_cart(self):
response = self.request("POST", self.url("/carts"))
try:
self.cart_id = response["data"].get("cart_id")
except:
return False
return response["status"] == 200
def cart_add_food(self):
food = random.choice(foods)
data = {"food_id": food["id"], "count": 1}
path = "/carts/{}".format(self.cart_id)
res = self.request("PATCH", self.url(path), data=data)
return res["status"] == 204
def make_order(self):
chain = [self.login, self.get_foods, self.create_cart,
self.cart_add_food, self.cart_add_food]
for action in chain:
if not action():
return False
data = {"cart_id": self.cart_id}
res = self.request("POST", self.url("/orders"), data=data)
return res["status"] == 200
def job(host, port):
q = Query(host, port)
start = time.time()
try:
ok = q.make_order()
except:
ok = False
end = time.time()
elapsed = end - start
with redis_store.pipeline() as p:
if ok:
p.incr(SUCCESS_KEY)
p.lpush(ORDER_FINISH_TIME_KEY, end)
else:
p.incr(FAILURE_KEY)
p.lpush(ORDER_RESP_TIME_KEY, elapsed)
p.execute()
def progress():
try:
prev = 0
while True:
time.sleep(1)
cur = get_value(SUCCESS_KEY)
msg = "Orders Per Second: {:4d}/s".format(cur - prev)
print(msg, end='')
print('\r' * len(msg), end='')
prev = cur
except KeyboardInterrupt:
pass
finally:
print('\n')
def thread(host, port, threads, num):
pool = ThreadPool(threads)
for _ in range(num):
pool.apply_async(job, (host, port))
time.sleep(0.001)
pool.close()
pool.join()
def divide(n, m):
"""Divide integer n to m chunks
"""
avg = int(n / m)
remain = n - m * avg
data = list(itertools.repeat(avg, m))
for i in range(len(data)):
if not remain:
break
data[i] += 1
remain -= 1
return data
def work(host, port, processes, threads, times):
pool = Pool(processes,
lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
p = Process(target=progress)
p.daemon = True
start = time.time()
try:
for chunk in divide(times, processes):
pool.apply_async(thread, (host, port, threads, chunk))
p.start()
pool.close()
pool.join()
p.terminate()
p.join()
except KeyboardInterrupt:
pool.terminate()
p.terminate()
p.join()
pool.join()
return time.time() - start
def get_value(key):
v = redis_store.get(key)
return 0 if v is None else int(v)
def get_range(key):
v = redis_store.lrange(key, 0, -1)
return [float(i) for i in v]
def safe_div(a, b):
return a / b if b else 0
def get_avg(l):
return safe_div(sum(l), float(len(l)))
def report(processes, threads, total_time, total_order):
success = get_value(SUCCESS_KEY)
failure = get_value(FAILURE_KEY)
req_success = get_value(REQUEST_SUCCESS_KEY)
req_failure = get_value(REQUEST_FAILURE_KEY)
req_resp_time = get_range(REQ_RESP_TIME_KEY)
order_resp_time = get_range(ORDER_RESP_TIME_KEY)
req_finish_time = get_range(REQ_FINISH_TIME_KEY)
order_finish_time = get_range(ORDER_FINISH_TIME_KEY)
assert len(order_resp_time) == success + failure
assert len(req_resp_time) == req_success + req_failure
req_avg = safe_div(sum(req_resp_time), float(req_success))
order_avg = safe_div(sum(order_resp_time), success)
req_sec = collections.Counter(int(t) for t in req_finish_time)
order_sec = collections.Counter(int(t) for t in order_finish_time)
# remove the highest and lowest score
stats_req_sec = sorted(req_sec.values())[1:-1]
max_req_sec = int(get_avg(stats_req_sec[-5:]))
min_req_sec = int(get_avg(stats_req_sec[:5]))
mean_req_sec = int(get_avg(stats_req_sec))
# remove the highest and lowest score
stats_order_sec = sorted(order_sec.values())[1:-1]
max_order_sec = int(get_avg(stats_order_sec[-5:]))
min_order_sec = int(get_avg(stats_order_sec[:5]))
mean_order_sec = int(get_avg(stats_order_sec))
p = functools.partial(print, sep='')
p("Score: ", max_order_sec)
p("Correct Rate: ", round(success / total_order * 100, 2), "%")
p("\nStats")
p("Concurrent Level: ", processes, " x ", threads)
p("Time taken for tests: ", round(total_time * 1000, 2), "ms")
p("Complete requests: ", req_success)
p("Failed requests: ", req_failure)
p("Complete orders: ", success)
p("Failed orders: ", failure)
p("Time per request: ", round(req_avg * 1000, 2), "ms", " (mean)")
p("Time per order: ", round(order_avg * 1000, 2), "ms", " (mean)")
p("Request per second: ", max_req_sec, " (max) ", min_req_sec, " (min) ", mean_req_sec, " (mean)") # noqa
p("Order per second: ", max_order_sec, " (max) ", min_order_sec, " (min) ", mean_order_sec, " (mean)") # noqa
p("\nPercentage of orders made within a certain time (ms)")
order_resp_time = sorted(set(order_resp_time)) if order_resp_time else [0]
l = len(order_resp_time)
for e in (0.5, 0.75, 0.8, 0.9, 0.95, 0.98, 1):
idx = int(l * e)
idx = 0 if idx == 0 else idx - 1
p(" {:>4.0%} ".format(e),
int(math.ceil(order_resp_time[idx] * 1000)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-H", "--host", default="localhost",
help="server host name")
parser.add_argument("-p", "--port", default=8080, type=int,
help="server port")
parser.add_argument("-c", "--processes", default=2, type=int,
help="processes")
parser.add_argument("-t", "--threads", default=4, type=int,
help="threads")
parser.add_argument("-n", "--num", default=10000, type=int,
help="requests")
args = parser.parse_args()
redis_store.delete(
USER_KEY, SUCCESS_KEY, FAILURE_KEY,
ORDER_RESP_TIME_KEY, REQ_RESP_TIME_KEY,
REQUEST_SUCCESS_KEY, REQUEST_FAILURE_KEY,
REQ_FINISH_TIME_KEY, ORDER_FINISH_TIME_KEY)
load_users()
load_foods()
total_time = work(
args.host, args.port, args.processes, args.threads, args.num)
report(args.processes, args.threads, total_time, float(args.num))
if __name__ == "__main__":
main()
| gpl-2.0 | 2,749,461,268,001,201,000 | 27.014634 | 118 | 0.566255 | false |
taurenk/Crossfit-Project-API | app/models.py | 1 | 1269 | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
athlete_teams = db.Table('athlete_teams',
db.Column('athlete_id', db.Integer, db.ForeignKey('athletes.id')),
db.Column('team_id', db.Integer, db.ForeignKey('teams.id'))
)
class Athlete(db.Model):
__tablename__ = 'athletes'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
age = db.Column(db.Integer)
hieght = db.Column(db.String(4))
wieght = db.Column(db.Integer)
clean_and_jerk = db.Column(db.String(32))
snatch = db.Column(db.String(32))
deadlift = db.Column(db.String(32))
back_squat = db.Column(db.String(32))
max_pullups = db.Column(db.Integer)
run_5k = db.Column(db.String(32))
def __repr__(self):
return "{'name' : '%s'}" % self.name
class Team(db.Model):
__tablename__ = 'teams'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
captain = db.Column(db.String(64))
athletes = db.relationship('Athlete', secondary=athlete_teams,
backref=db.backref('teams', lazy='dynamic'))
def __repr__(self):
return "<%s, %s>" % (self.name, self.athletes) | mit | 4,826,109,988,930,293,000 | 27.863636 | 70 | 0.5855 | false |
bird-house/esgf-pyclient | pyesgf/search/context.py | 1 | 11482 | """
Module :mod:`pyesgf.search.context`
===================================
Defines the :class:`SearchContext` class which represents each ESGF search
query.
"""
import copy
from ..multidict import MultiDict
from .constraints import GeospatialConstraint
from .consts import (TYPE_DATASET, TYPE_FILE, TYPE_AGGREGATION,
QUERY_KEYWORD_TYPES, DEFAULT_BATCH_SIZE)
from .results import ResultSet
from .exceptions import EsgfSearchException
class SearchContext(object):
"""
Instances of this class represent the state of a current search.
It exposes what facets are available to select and the facet counts
if they are available.
Subclasses of this class can restrict the search options. For instance
FileSearchContext, DatasetSerachContext or CMIP5SearchContext
SearchContext instances are connected to SearchConnection instances. You
normally create SearchContext instances via one of:
1. Calling SearchConnection.new_context()
2. Calling SearchContext.constrain()
:ivar constraints: A dictionary of facet constraints currently in effect.
``constraint[facet_name] = [value, value, ...]``
:property facet_counts: A dictionary of available hits with each
facet value for the search as currently constrained.
This property returns a dictionary of dictionaries where
``facet_counts[facet][facet_value] == hit_count``
:property hit_count: The total number of hits available with current
constraints.
"""
DEFAULT_SEARCH_TYPE = NotImplemented
def __init__(self, connection, constraints, search_type=None,
latest=None, facets=None, fields=None,
from_timestamp=None, to_timestamp=None,
replica=None, shards=None):
"""
:param connection: The SearchConnection
:param constraints: A dictionary of initial constraints
:param search_type: One of TYPE_* constants defining the document
type to search for. Overrides SearchContext.DEFAULT_SEARCH_TYPE
:param facets: The list of facets for which counts will be retrieved
and constraints be validated against. Or None to represent all
facets.
:param fields: A list of field names to return in search responses
:param replica: A boolean defining whether to return master records
or replicas, or None to return both.
:param latest: A boolean defining whether to return only latest
versions or only non-latest versions, or None to return both.
:param shards: list of shards to restrict searches to. Should be from
the list self.connection.get_shard_list()
:param from_timestamp: Date-time string to specify start of search
range (e.g. "2000-01-01T00:00:00Z").
:param to_timestamp: Date-time string to specify end of search range
(e.g. "2100-12-31T23:59:59Z").
"""
self.connection = connection
self.__facet_counts = None
self.__hit_count = None
if search_type is None:
search_type = self.DEFAULT_SEARCH_TYPE
# Constraints
self.freetext_constraint = None
self.facet_constraints = MultiDict()
self.temporal_constraint = [from_timestamp, to_timestamp]
self.geosplatial_constraint = None
self._update_constraints(constraints)
# Search configuration parameters
self.timestamp_range = (from_timestamp, to_timestamp)
search_types = [TYPE_DATASET, TYPE_FILE, TYPE_AGGREGATION]
if search_type not in search_types:
raise EsgfSearchException('search_type must be one of %s'
% ','.join(search_types))
self.search_type = search_type
self.latest = latest
self.facets = facets
self.fields = fields
self.replica = replica
self.shards = shards
# -------------------------------------------------------------------------
# Functional search interface
# These do not change the constraints on self.
def search(self, batch_size=DEFAULT_BATCH_SIZE, ignore_facet_check=False,
**constraints):
"""
Perform the search with current constraints returning a set of results.
:batch_size: The number of results to get per HTTP request.
:param constraints: Further constraints for this query. Equivilent
to calling self.constrain(**constraints).search()
:return: A ResultSet for this query
"""
if constraints:
sc = self.constrain(**constraints)
else:
sc = self
sc.__update_counts(ignore_facet_check=ignore_facet_check)
return ResultSet(sc, batch_size=batch_size)
def constrain(self, **constraints):
"""
Return a *new* instance with the additional constraints.
"""
new_sc = copy.deepcopy(self)
new_sc._update_constraints(constraints)
return new_sc
def get_download_script(self, **constraints):
"""
Download a script for downloading all files in the set of results.
:param constraints: Further constraints for this query. Equivilent
to calling self.constrain(**constraints).get_download_script()
:return: A string containing the script
"""
if constraints:
sc = self.constrain(**constraints)
else:
sc = self
sc.__update_counts()
query_dict = sc._build_query()
# !TODO: allow setting limit
script = sc.connection.send_wget(query_dict,
shards=self.shards)
return script
@property
def facet_counts(self):
self.__update_counts()
return self.__facet_counts
@property
def hit_count(self):
self.__update_counts()
return self.__hit_count
def get_facet_options(self):
"""
Return a dictionary of facet counts filtered to remove all
facets that are completely constrained. This method is
similar to the property ``facet_counts`` except facet values
which are not relevant for further constraining are removed.
"""
facet_options = {}
hits = self.hit_count
for facet, counts in self.facet_counts.items():
# filter out counts that match total hits
counts = dict(items for items in counts.items()
if items[1] < hits)
if len(counts) > 1:
facet_options[facet] = counts
return facet_options
def __update_counts(self, ignore_facet_check=False):
# If hit_count is set the counts are already retrieved
if self.__hit_count is not None:
return
self.__facet_counts = {}
self.__hit_count = None
query_dict = self._build_query()
if not ignore_facet_check:
query_dict['facets'] = '*'
if self.facets:
query_dict['facets'] = self.facets
response = self.connection.send_search(query_dict, limit=0)
for facet, counts in (response['facet_counts']['facet_fields']
.items()):
d = self.__facet_counts[facet] = {}
while counts:
d[counts.pop()] = counts.pop()
self.__hit_count = response['response']['numFound']
# -------------------------------------------------------------------------
# Constraint mutation interface
# These functions update the instance in-place.
# Use constrain() and search() to generate new contexts with tighter
# constraints.
def _update_constraints(self, constraints):
"""
Update the constraints in-place by calling _constrain_*() methods.
"""
constraints_split = self._split_constraints(constraints)
self._constrain_facets(constraints_split['facet'])
if 'query' in constraints_split['freetext']:
new_freetext = constraints_split['freetext']['query']
self._constrain_freetext(new_freetext)
# !TODO: implement temporal and geospatial constraints
if 'from_timestamp' in constraints_split['temporal']:
self.temporal_constraint[0] = (constraints_split['temporal']
['from_timestamp'])
if 'to_timestamp' in constraints_split['temporal']:
self.temporal_constraint[1] = (constraints_split['temporal']
['to_timestamp'])
# self._constrain_geospatial()
# reset cached values
self.__hit_count = None
self.__facet_counts = None
def _constrain_facets(self, facet_constraints):
for key, values in facet_constraints.mixed().items():
current_values = self.facet_constraints.getall(key)
if isinstance(values, list):
for value in values:
if value not in current_values:
self.facet_constraints.add(key, value)
else:
if values not in current_values:
self.facet_constraints.add(key, values)
def _constrain_freetext(self, query):
self.freetext_constraint = query
def _constrain_geospatial(self, lat=None, lon=None, bbox=None,
location=None, radius=None, polygon=None):
self.geospatial_constraint = GeospatialConstraint(
lat, lon, bbox, location,
radius, polygon)
raise NotImplementedError
# -------------------------------------------------------------------------
def _split_constraints(self, constraints):
"""
Divide a constraint dictionary into 4 types of constraints:
1. Freetext query
2. Facet constraints
3. Temporal constraints
4. Geospatial constraints
:return: A dictionary of the 4 types of constraint.
"""
# local import to prevent circular importing
from .connection import query_keyword_type
constraints_split = dict((kw, MultiDict()) for kw
in QUERY_KEYWORD_TYPES)
for kw, val in constraints.items():
constraint_type = query_keyword_type(kw)
constraints_split[constraint_type][kw] = val
return constraints_split
def _build_query(self):
"""
Build query string parameters as a dictionary.
"""
query_dict = MultiDict({"query": self.freetext_constraint,
"type": self.search_type,
"latest": self.latest,
"facets": self.facets,
"fields": self.fields,
"replica": self.replica})
query_dict.extend(self.facet_constraints)
# !TODO: encode datetime
start, end = self.temporal_constraint
query_dict.update(start=start, end=end)
return query_dict
class DatasetSearchContext(SearchContext):
DEFAULT_SEARCH_TYPE = TYPE_DATASET
class FileSearchContext(SearchContext):
DEFAULT_SEARCH_TYPE = TYPE_FILE
class AggregationSearchContext(SearchContext):
DEFAULT_SEARCH_TYPE = TYPE_AGGREGATION
| bsd-3-clause | 6,040,383,211,571,339,000 | 34.438272 | 79 | 0.592144 | false |
wutienyang/ES_pttmovie | auto/pttmovie/crawler.py | 1 | 8864 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import os
import re
import sys
import json
import requests
import argparse
import time
import codecs
from bs4 import BeautifulSoup
from six import u
__version__ = '1.0'
# if python 2, disable verify flag in requests.get()
VERIFY = True
if sys.version_info[0] < 3:
VERIFY = False
requests.packages.urllib3.disable_warnings()
class PttWebCrawler(object):
PTT_URL = 'https://www.ptt.cc'
"""docstring for PttWebCrawler"""
def __init__(self, cmdline=None, as_lib=False):
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description='''
A crawler for the web version of PTT, the largest online community in Taiwan.
Input: board name and page indices (or articla ID)
Output: BOARD_NAME-START_INDEX-END_INDEX.json (or BOARD_NAME-ID.json)
''')
parser.add_argument('-b', metavar='BOARD_NAME', help='Board name', required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-i', metavar=('START_INDEX', 'END_INDEX'), type=int, nargs=2, help="Start and end index")
group.add_argument('-a', metavar='ARTICLE_ID', help="Article ID")
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
if not as_lib:
if cmdline:
args = parser.parse_args(cmdline)
else:
args = parser.parse_args()
board = args.b
if args.i:
start = args.i[0]
if args.i[1] == -1:
end = self.getLastPage(board)
else:
end = args.i[1]
self.parse_articles(start, end, board)
else: # args.a
article_id = args.a
self.parse_article(article_id, board)
def parse_articles(self, start, end, board, path='.', timeout=3):
filename = board + '-' + str(start) + '-' + str(end) + '.json'
filename = os.path.join(path, filename)
self.store(filename, u'{"articles": [', 'w')
for i in range(end-start+1):
index = start + i
print('Processing index:', str(index))
resp = requests.get(
url = self.PTT_URL + '/bbs/' + board + '/index' + str(index) + '.html',
cookies={'over18': '1'}, verify=VERIFY, timeout=timeout
)
if resp.status_code != 200:
print('invalid url:', resp.url)
continue
soup = BeautifulSoup(resp.text, 'html.parser')
divs = soup.find_all("div", "r-ent")
for div in divs:
try:
# ex. link would be <a href="/bbs/PublicServan/M.1127742013.A.240.html">Re: [問題] 職等</a>
href = div.find('a')['href']
link = self.PTT_URL + href
article_id = re.sub('\.html', '', href.split('/')[-1])
if div == divs[-1] and i == end-start: # last div of last page
self.store(filename, self.parse(link, article_id, board), 'a')
else:
self.store(filename, self.parse(link, article_id, board) + ',\n', 'a')
except:
pass
time.sleep(0.1)
self.store(filename, u']}', 'a')
return filename
def parse_article(self, article_id, board, path='.'):
link = self.PTT_URL + '/bbs/' + board + '/' + article_id + '.html'
filename = board + '-' + article_id + '.json'
filename = os.path.join(path, filename)
self.store(filename, self.parse(link, article_id, board), 'w')
return filename
@staticmethod
def parse(link, article_id, board, timeout=3):
print('Processing article:', article_id)
resp = requests.get(url=link, cookies={'over18': '1'}, verify=VERIFY, timeout=timeout)
if resp.status_code != 200:
print('invalid url:', resp.url)
return json.dumps({"error": "invalid url"}, sort_keys=True, ensure_ascii=False)
soup = BeautifulSoup(resp.text, 'html.parser')
main_content = soup.find(id="main-content")
metas = main_content.select('div.article-metaline')
author = ''
title = ''
date = ''
if metas:
author = metas[0].select('span.article-meta-value')[0].string if metas[0].select('span.article-meta-value')[0] else author
title = metas[1].select('span.article-meta-value')[0].string if metas[1].select('span.article-meta-value')[0] else title
date = metas[2].select('span.article-meta-value')[0].string if metas[2].select('span.article-meta-value')[0] else date
# remove meta nodes
for meta in metas:
meta.extract()
for meta in main_content.select('div.article-metaline-right'):
meta.extract()
# remove and keep push nodes
pushes = main_content.find_all('div', class_='push')
for push in pushes:
push.extract()
try:
ip = main_content.find(text=re.compile(u'※ 發信站:'))
ip = re.search('[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*', ip).group()
except:
ip = "None"
# 移除 '※ 發信站:' (starts with u'\u203b'), '◆ From:' (starts with u'\u25c6'), 空行及多餘空白
# 保留英數字, 中文及中文標點, 網址, 部分特殊符號
filtered = [ v for v in main_content.stripped_strings if v[0] not in [u'※', u'◆'] and v[:2] not in [u'--'] ]
expr = re.compile(u(r'[^\u4e00-\u9fa5\u3002\uff1b\uff0c\uff1a\u201c\u201d\uff08\uff09\u3001\uff1f\u300a\u300b\s\w:/-_.?~%()]'))
for i in range(len(filtered)):
filtered[i] = re.sub(expr, '', filtered[i])
filtered = [_f for _f in filtered if _f] # remove empty strings
filtered = [x for x in filtered if article_id not in x] # remove last line containing the url of the article
content = ' '.join(filtered)
content = re.sub(r'(\s)+', ' ', content)
# print 'content', content
# push messages
p, b, n = 0, 0, 0
messages = []
for push in pushes:
if not push.find('span', 'push-tag'):
continue
push_tag = push.find('span', 'push-tag').string.strip(' \t\n\r')
push_userid = push.find('span', 'push-userid').string.strip(' \t\n\r')
# if find is None: find().strings -> list -> ' '.join; else the current way
push_content = push.find('span', 'push-content').strings
push_content = ' '.join(push_content)[1:].strip(' \t\n\r') # remove ':'
push_ipdatetime = push.find('span', 'push-ipdatetime').string.strip(' \t\n\r')
messages.append( {'push_tag': push_tag, 'push_userid': push_userid, 'push_content': push_content, 'push_ipdatetime': push_ipdatetime} )
if push_tag == u'推':
p += 1
elif push_tag == u'噓':
b += 1
else:
n += 1
# count: 推噓文相抵後的數量; all: 推文總數
message_count = {'all': p+b+n, 'count': p-b, 'push': p, 'boo': b, "neutral": n}
# print 'msgs', messages
# print 'mscounts', message_count
# json data
data = {
'url': link,
'board': board,
'article_id': article_id,
'article_title': title,
'author': author,
'date': date,
'content': content,
'ip': ip,
'message_conut': message_count,
'messages': messages
}
# print 'original:', d
return json.dumps(data, sort_keys=True, ensure_ascii=False)
@staticmethod
def getLastPage(board, timeout=3):
content = requests.get(
url= 'https://www.ptt.cc/bbs/' + board + '/index.html',
cookies={'over18': '1'}, timeout=timeout
).content.decode('utf-8')
first_page = re.search(r'href="/bbs/' + board + '/index(\d+).html">‹', content)
if first_page is None:
return 1
return int(first_page.group(1)) + 1
@staticmethod
def store(filename, data, mode):
with codecs.open(filename, mode, encoding='utf-8') as f:
f.write(data)
@staticmethod
def get(filename, mode='r'):
with codecs.open(filename, mode, encoding='utf-8') as f:
return json.load(f)
if __name__ == '__main__':
c = PttWebCrawler()
| mit | -7,976,881,766,051,306,000 | 40.647619 | 147 | 0.529614 | false |
rjkeller/gentoo-installer | BuildKernel.py | 1 | 1521 | #!/usr/bin/env python
import fileinput
import sys
///<summary>
///Provides a bunch of operations to format and manage hard disks.
///
///One cool thing that this class does is allow you to generate a fstab file
///based on disk formatting operations conducted earlier using this class. This
///is helpful when installing a new Gentoo installation.
///</summary>
class BuildKernel:
def compileNewKernel(self, kernelType, initSettings):
f = fopen('/etc/superGentoo/kernel', 'w')
f.write(kernelType + "," + initSettings)
f.close()
os.system("emerge " + kernelType)
os.system("mv /usr/src/.config /usr/src/linux/.config")
os.system("touch /usr/src/linux/.config")
os.system("cd /usr/src/linux")
os.system("make")
os.system("make modules_install")
os.system("cp arch/x86_64/boot/bzImage /boot/kernel-`find /usr/src -name linux-3* | awk -Flinux- '{print \$NF }'`")
def upgradeKernel(self):
kernelData = open('/etc/superGentoo/kernel').read(1000).split(",")
os.system("emerge --update ". kernelData[0])
os.system()
//--------------------------------------------------------------------------//
// MAIN FUNCTION
//--------------------------------------------------------------------------//
if __name__ == '__main__':
bk = BuildKernel()
if sys.argv[1] == "upgrade":
bk.upgradeKernel()
elif sys.argv[1] == "newKernel":
bk.compileNewKernel(sys.argv[2], sys.argv[3])
| apache-2.0 | 1,754,916,898,967,739,600 | 33.568182 | 123 | 0.560158 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/available_providers_list.py | 1 | 1183 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AvailableProvidersList(Model):
"""List of available countries with details.
All required parameters must be populated in order to send to Azure.
:param countries: Required. List of available countries.
:type countries:
list[~azure.mgmt.network.v2017_09_01.models.AvailableProvidersListCountry]
"""
_validation = {
'countries': {'required': True},
}
_attribute_map = {
'countries': {'key': 'countries', 'type': '[AvailableProvidersListCountry]'},
}
def __init__(self, **kwargs):
super(AvailableProvidersList, self).__init__(**kwargs)
self.countries = kwargs.get('countries', None)
| mit | 3,548,818,472,367,509,000 | 32.8 | 85 | 0.605241 | false |
hjoliver/cylc | cylc/flow/command_polling.py | 1 | 3469 | # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Encapsulates polling activity for CLI commands."""
import sys
from time import sleep
class Poller:
"""Encapsulates polling activity for cylc commands. Derived classes
must override the check() method to test the polling condition."""
@classmethod
def add_to_cmd_options(cls, parser, d_interval=60, d_max_polls=10):
"""Add command line options for commands that can do polling"""
parser.add_option(
"--max-polls",
help="Maximum number of polls (default " + str(d_max_polls) + ").",
metavar="INT",
action="store",
dest="max_polls",
default=d_max_polls)
parser.add_option(
"--interval",
help=(
"Polling interval in seconds (default " + str(d_interval) +
")."
),
metavar="SECS",
action="store",
dest="interval",
default=d_interval)
def __init__(self, condition, interval, max_polls, args):
self.condition = condition # e.g. "workflow stopped"
# check max_polls is an int
try:
self.max_polls = int(max_polls)
except ValueError:
sys.exit("max_polls must be an int")
# check interval is an int
try:
self.interval = int(interval)
except ValueError:
sys.exit("interval must be an integer")
self.n_polls = 0
self.args = args # any extra parameters needed by check()
def check(self):
"""Abstract method. Test polling condition."""
raise NotImplementedError()
def poll(self):
"""Poll for the condition embodied by self.check().
Return True if condition met, or False if polling exhausted."""
if self.max_polls == 0:
# exit 1 as we can't know if the condition is satisfied
sys.exit("WARNING: nothing to do (--max-polls=0)")
elif self.max_polls == 1:
sys.stdout.write("checking for '%s'" % self.condition)
else:
sys.stdout.write("polling for '%s'" % self.condition)
while self.n_polls < self.max_polls:
self.n_polls += 1
if self.check():
sys.stdout.write(": satisfied\n")
return True
if self.max_polls > 1:
sys.stdout.write(".")
sleep(self.interval)
sys.stdout.write("\n")
if self.max_polls > 1:
sys.stderr.write(
"ERROR: condition not satisfied after %d polls\n" %
self.max_polls)
else:
sys.stderr.write("ERROR: condition not satisfied\n")
return False
| gpl-3.0 | -7,614,361,080,997,373,000 | 34.762887 | 79 | 0.59066 | false |
brglng/zobject | .ycm_extra_conf.py | 1 | 6274 | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-W',
'-Wall',
'-Wextra',
'-Wno-unused-parameter',
'-Wno-multichar',
#'-fexceptions',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=gnu11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c',
'-isystem',
'../BoostParts',
'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-I',
'./include',
'-I',
'./src',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| lgpl-3.0 | 2,080,756,639,750,032,000 | 34.050279 | 82 | 0.687121 | false |
imanhodjaev/django-userapp | django_userapp/backends.py | 1 | 2120 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.conf import settings
from .request import login
import re
UserModel = get_user_model()
class UserappBackend(object):
def authenticate(self, username=None, password=None, request=None, **kwargs):
result = login(request, username=username, password=password)
try:
if result is None:
raise UserModel.DoesNotExist("Userapp account not found")
user = result[0]
default_email = getattr(settings, "USERAPP_DEFAULT_EMAIL", "[email protected]")
if self.passes_checks(user):
email = getattr(user, "email", default_email)
our_username = re.sub(r"[@\.\-]", "_", username)
our_user, created = UserModel.objects.get_or_create(email__exact=email)
if created: # If user is new user then set username
our_user.username = our_username[0:29]
our_user.email = email
our_user.save()
if not our_user.password: # Means that user was created by our backend
return our_user
return None
else:
return None
except UserModel.DoesNotExist:
return None
def get_user(self, user_id):
try:
return UserModel._default_manager.get(pk=user_id)
except UserModel.DoesNotExist:
return None
def passes_checks(self, user):
""" Basically checks features and if finds any match returns True """
user_features = {}
features = getattr(settings, "USERAPP_FEATURES", [])
use_features = getattr(settings, "USERAPP_USE_FEATURES", False)
if "features" in user:
user_features = user["features"]
if use_features is False:
return True
for feature in features:
if feature in user_features and user_features[feature]["value"]:
return True
return False
| lgpl-3.0 | -3,423,429,394,243,385,000 | 30.641791 | 92 | 0.579245 | false |
jptomo/rpython-lang-scheme | rpython/rlib/parsing/parsing.py | 1 | 12986 | import py
from rpython.rlib.parsing.lexer import SourcePos
from rpython.rlib.parsing.tree import Node, Symbol, Nonterminal
class Rule(object):
def __init__(self, nonterminal, expansions):
self.nonterminal = nonterminal
self.expansions = expansions
def getkey(self):
return (self.nonterminal, tuple(self.expansions))
# def __hash__(self):
# return hash(self.getkey())
def __eq__(self, other):
return self.getkey() == other.getkey()
def __ne__(self, other):
return not self == other
def __str__(self):
return "%s: %s" % (
self.nonterminal, " | ".join([repr(e) for e in self.expansions]))
def __repr__(self):
return "Rule(%r, %r)" % (self.nonterminal, self.expansions)
class LazyInputStream(object):
def __init__(self, iterator):
self.iterator = iter(iterator)
self.data = []
def __getitem__(self, index):
assert index >= 0
while len(self.data) <= index:
try:
self.data.append(self.iterator.next())
except StopIteration:
raise IndexError("index out of range")
return self.data[index]
class ParseError(Exception):
def __init__(self, source_pos, errorinformation):
self.source_pos = source_pos
self.errorinformation = errorinformation
self.args = (source_pos, errorinformation)
def nice_error_message(self, filename="<unknown>", source=""):
# + 1 is because source_pos is 0-based and humans 1-based
result = [" File %s, line %s" % (filename, self.source_pos.lineno + 1)]
if source:
result.append(source.split("\n")[self.source_pos.lineno])
result.append(" " * self.source_pos.columnno + "^")
else:
result.append("<couldn't get source>")
if self.errorinformation:
failure_reasons = self.errorinformation.failure_reasons
if len(failure_reasons) > 1:
all_but_one = failure_reasons[:-1]
last = failure_reasons[-1]
expected = "%s or '%s'" % (
", ".join(["'%s'" % e for e in all_but_one]), last)
else:
expected = failure_reasons[0]
result.append("ParseError: expected %s" % (expected, ))
else:
result.append("ParseError")
return "\n".join(result)
class ErrorInformation(object):
def __init__(self, pos, failure_reasons=None):
if failure_reasons is None:
failure_reasons = []
self.failure_reasons = failure_reasons
self.pos = pos
def combine_errors(self, other):
if self is None:
return other
if (other is None or self.pos > other.pos or
len(other.failure_reasons) == 0):
return self
elif other.pos > self.pos or len(self.failure_reasons) == 0:
return other
failure_reasons = []
already_there = {}
for fr in [self.failure_reasons, other.failure_reasons]:
for reason in fr:
if reason not in already_there:
already_there[reason] = True
failure_reasons.append(reason)
return ErrorInformation(self.pos, failure_reasons)
class LazyParseTable(object):
def __init__(self, input, parser):
self.parser = parser
self.input = input
self.matched = {}
self.errorinformation = {}
def match_symbol(self, i, symbol):
#print i, symbol
#print self.matched.keys()
if (i, symbol) in self.matched:
return self.matched[i, symbol]
error = None # for the annotator
if self.parser.is_nonterminal(symbol):
rule = self.parser.get_rule(symbol)
subsymbol = None
error = None
for expansion in rule.expansions:
curr = i
children = []
for subsymbol in expansion:
node, next, error2 = self.match_symbol(curr, subsymbol)
if node is None:
error = combine_errors(error, error2)
break
children.append(node)
curr = next
else:
assert len(expansion) == len(children)
result = (Nonterminal(symbol, children), curr, error)
self.matched[i, symbol] = result
return result
self.matched[i, symbol] = None, 0, error
return None, 0, error
else:
try:
input = self.input[i]
if self.terminal_equality(symbol, input):
result = (Symbol(symbol, input.source, input), i + 1, error)
self.matched[i, symbol] = result
return result
else:
# XXX hack unnice: handles the sort of token names that
# ebnfparse produces
if (symbol.startswith("__") and
symbol.split("_")[2][0] in "0123456789"):
expected = symbol.split("_")[-1]
else:
expected = symbol
error = ErrorInformation(i, [expected])
except IndexError:
error = ErrorInformation(i)
return None, 0, error
def terminal_equality(self, symbol, input):
return symbol == input.name
class PackratParser(object):
def __init__(self, rules, startsymbol, parsetablefactory=LazyParseTable,
check_for_left_recursion=True):
self.rules = rules
self.nonterminal_to_rule = {}
for rule in rules:
self.nonterminal_to_rule[rule.nonterminal] = rule
self.startsymbol = startsymbol
if check_for_left_recursion:
assert not self.has_left_recursion()
self.parsetablefactory = parsetablefactory
def is_nonterminal(self, symbol):
return symbol in self.nonterminal_to_rule
def get_rule(self, symbol):
return self.nonterminal_to_rule[symbol]
def parse(self, tokeniterator, lazy=False):
if lazy:
input = LazyInputStream(tokeniterator)
else:
input = list(tokeniterator)
table = self.parsetablefactory(input, self)
result = table.match_symbol(0, self.startsymbol)
if result[0] is None:
error = result[2]
raise ParseError(input[error.pos].source_pos, error)
return result[0]
def has_left_recursion(self):
"""NOT_RPYTHON"""
follows = {}
for rule in self.rules:
follow = py.builtin.set()
follows[rule.nonterminal] = follow
for expansion in rule.expansions:
if expansion and self.is_nonterminal(expansion[0]):
follow.add(expansion[0])
changed = True
while changed:
changed = False
for nonterminal, follow in follows.iteritems():
for nt in follow:
subfollow = follows[nt]
update = subfollow - follow
if update:
changed = True
follow.update(update)
break
for nonterminal, follow in follows.iteritems():
if nonterminal in follow:
print "nonterminal %s is in its own follow %s" % (nonterminal, follow)
return True
return False
def __repr__(self):
from pprint import pformat
return "%s%s" % (self.__class__.__name__,
pformat((self.rules, self.startsymbol)), )
class ParserCompiler(object):
def __init__(self, parser):
self.parser = parser
self.allcode = []
self.symbol_to_number = {}
self.made = {}
def compile(self):
from rpython.tool.sourcetools import func_with_new_name
self.allcode.append("class CompileableParser(baseclass):")
self.make_matcher(self.parser.startsymbol)
self.make_fixed()
miniglobals = globals().copy()
miniglobals["baseclass"] = self.parser.__class__
#print "\n".join(self.allcode)
exec py.code.Source("\n".join(self.allcode)).compile() in miniglobals
kls = miniglobals["CompileableParser"]
# XXX
parsetable = self.parser.parsetablefactory([], self.parser)
kls.terminal_equality = func_with_new_name(
parsetable.terminal_equality.im_func,
"terminal_equality_compileable")
return kls
def get_number(self, symbol):
if symbol in self.symbol_to_number:
return self.symbol_to_number[symbol]
result = len(self.symbol_to_number)
self.symbol_to_number[symbol] = result
return result
def make_matcher(self, symbol):
if symbol not in self.made:
self.made[symbol] = True
if self.parser.is_nonterminal(symbol):
self.make_nonterminal_matcher(symbol)
else:
self.make_terminal_matcher(symbol)
def make_terminal_matcher(self, symbol):
number = self.get_number(symbol)
self.allcode.append("""
def match_terminal%(number)s(self, i):
# matcher for terminal %(number)s %(symbol)r
if i in self.matched_terminals%(number)s:
return self.matched_terminals%(number)s[i]
try:
input = self.input[i]
if self.terminal_equality(%(symbol)r, input):
symbol = Symbol(%(symbol)r, input.name, input)
result = (symbol, i + 1)
self.matched_terminals%(number)s[i] = result
return result
except IndexError:
pass
return None, i""" % vars())
def make_nonterminal_matcher(self, symbol):
number = self.get_number(symbol)
rule = self.parser.nonterminal_to_rule[symbol]
code = []
code.append("""
def match_nonterminal%(number)s(self, i):
# matcher for nonterminal %(number)s %(symbol)s
if i in self.matched_nonterminals%(number)s:
return self.matched_nonterminals%(number)s[i]
last_failed_position = 0
subsymbol = None
expansionindex = 0
while 1:""" % vars())
for expansionindex, expansion in enumerate(rule.expansions):
nextindex = expansionindex + 1
code.append("""\
if expansionindex == %s:""" % (expansionindex, ))
if not expansion:
code.append("""\
result = (Nonterminal(symbol, []), i)
self.matched_nonterminals%(number)s[i] = result
return result""" % vars())
continue
code.append("""\
curr = i
children = []""")
for subsymbol in expansion:
self.make_matcher(subsymbol)
if self.parser.is_nonterminal(subsymbol):
match = "match_nonterminal%s" % self.get_number(subsymbol)
else:
match = "match_terminal%s" % self.get_number(subsymbol)
code.append("""\
node, next = self.%(match)s(curr)
if node is None:
last_failed_position = next
expansionindex = %(nextindex)s
continue
curr = next""" % vars())
code.append("""\
result = (Nonterminal(%(symbol)r, children), curr)
self.matched_nonterminals%(number)s[i] = result
return result""" % vars())
code.append("""\
if expansionindex == %(nextindex)s:
result = None, last_failed_position
self.matched_nonterminals%(number)s[i] = result
return result""" % vars())
self.allcode.extend(code)
def make_fixed(self):
# __init__
code = ["""
def __init__(self):
self.rules = [] # dummy
self.nonterminal_to_rule = {} # dummy
self.startsymbol = "" # dummy
self.parsetablefactory = None # dummy"""]
for symbol, number in self.symbol_to_number.iteritems():
if self.parser.is_nonterminal(symbol):
name = "matched_nonterminals%s" % number
else:
name = "matched_terminals%s" % number
code.append("""\
self.%(name)s = {}""" % vars())
# parse
startsymbol = self.get_number(self.parser.startsymbol)
code.append("""
def parse(self, tokenlist, lazy=True):
self.input = tokenlist
result = self.match_nonterminal%(startsymbol)s(0)
if result[0] is None:
raise ParseError(None, self.input[result[1]])
return result[0]""" % (vars()))
self.allcode.extend(code)
| mit | -8,121,115,071,978,187,000 | 36.423631 | 86 | 0.54243 | false |
CosmicLaserShow/CosmicLaserShow | pysparc/pysparc/ftdi_chip.py | 1 | 7036 | """Access FTDI hardware.
Contents
--------
:class:`Error`
Base error class.
:class:`DeviceNotFoundError`
Raised when device is not connected.
:class:`DeviceError`
Raised for generic pylibftdi exceptions.
:class:`ReadError`
Raised on read errors.
:class:`WriteError`
Raised on write errors.
:class:`FtdiChip`
Access FTDI hardware.
"""
import logging
import time
import pylibftdi
logger = logging.getLogger(__name__)
# FTDI documentation: must be multiple of block size, which is 64 bytes
# with 2 bytes overhead. So, must be multiple of 62 bytes.
READ_SIZE = 62
# Default buffer size is 4K (64 * 64 bytes), but mind the overhead
BUFFER_SIZE = 64 * 62
# Sleep between read/write error retries in seconds
RW_ERROR_WAIT = .5
# parity for rs232 line settings in libftdi::ftdi_set_line_property
PARITY_NONE = 0
PARITY_ODD = 1
PARITY_EVEN = 2
PARITY_MARK = 3
PARITY_SPACE = 4
# bitsize for rs232 line settings in libftdi::ftdi_set_line_property
BITS_8 = 8
BITS_7 = 7
# stopbits for rs232 line settings in libftdi::ftdi_set_line_property
STOP_BIT_1 = 0
STOP_BIT_15 = 1
STOP_BIT_2 = 2
class Error(Exception):
"""Base error class."""
def __init__(self, msg):
self.ftdi_msg = msg
class DeviceNotFoundError(Error):
"""Raised when device is not connected."""
def __str__(self):
return "Device not found."
class DeviceError(Error):
"""Raised for generic pylibftdi exceptions."""
def __str__(self):
return "Device error: %s" % self.ftdi_msg
class ClosedDeviceError(Error):
"""Raised when trying a read/write operation if device is closed."""
def __str__(self):
return "Device is closed, %s" % self.ftdi_msg
class ReadError(Error):
"""Raised on read errors."""
def __str__(self):
return "Device read error: %s" % self.ftdi_msg
class WriteError(Error):
"""Raised on write errors."""
def __str__(self):
return "Device write error: %s" % self.ftdi_msg
class FtdiChip(object):
"""Access FTDI hardware.
Instantiate this class to get access to connected FTDI hardware.
The hardware device is opened during instantiation.
You can use the :meth:`find_all` static method to list all connected
devices before openening them::
>>> FtdiChip.find_all()
"""
_device = None
closed = True
def __init__(self, device_description=None, interface_select=0, device_index=0):
self._device_description = device_description
self._interface_select = interface_select
self._device_index = device_index
self.open()
def open(self):
"""Open device.
Raises :class:`DeviceNotFoundError` if the device cannot be found.
Raises :class:`DeviceError` if the device cannot be opened.
"""
if self._device is None:
try:
logger.info("Initialising Ftdi device {} {}".format(self._device_description, self._device_index))
self._device = pylibftdi.Device(self._device_description,
interface_select=self._interface_select, device_index=self._device_index)
except pylibftdi.FtdiError as exc:
if "(-3)" in str(exc):
raise DeviceNotFoundError(str(exc))
else:
raise DeviceError(str(exc))
else:
# force default latency timer of 16 ms
# on some systems, this reverts to 0 ms if not set explicitly
self._device.ftdi_fn.ftdi_set_latency_timer(16)
self.closed = False
self.flush()
else:
return
def __del__(self):
self.close()
def set_line_settings(self, bits, parity, stop_bit):
"""Set line settings (bits, parity, stop bit).
:param bits: one of BITS_8 or BITS_7
:param parity: one of PARITY_NONE, PARITY_ODD, PARITY_EVEN,
PARITY_MARK, PARITY_SPACE
:param stop_bit: one of STOP_BIT_1, STOP_BIT_15, STOP_BIT_2
"""
self._device.ftdi_fn.ftdi_set_line_property(bits, stop_bit, parity)
def close(self):
"""Close device."""
if not self.closed:
self._device.close()
self._device = None
self.closed = True
@staticmethod
def find_all():
"""Find all connected FTDI devices.
:returns: list of (manufacturer, description, serial#) tuples.
"""
return pylibftdi.Driver().list_devices()
def flush(self):
"""Flush device buffers.
To completely clear out outdated measurements, e.g. when changing
parameters, call this method. All data received after this method
is called is really newly measured.
"""
print("Starting device flush")
self._device.flush()
self.read(BUFFER_SIZE)
print("Device flush finished")
def read(self, read_size=None):
"""Read from device and retry if necessary.
A read is tried three times. When unsuccesful, raises
:class:`ReadError`. Raises :class:`ClosedDeviceError` when
attempting to read from a closed device.
:param read_size: number of bytes to read (defaults to READ_SIZE).
As per the FTDI documentation, this should be a multiple of 62
for best performance.
:returns: string containing the data.
"""
if self.closed:
logger.warning("Attempting to read from closed device.")
raise ClosedDeviceError("attempting to read.")
if not read_size:
read_size = READ_SIZE
for i in range(3):
#print("Reading from device (attempt {})".format(i))
try:
data = self._device.read(read_size)
except pylibftdi.FtdiError as exc:
logger.warning("Read failed, retrying...")
time.sleep(RW_ERROR_WAIT)
continue
else:
return data
logger.error("Read failed.")
self.close()
raise ReadError(str(exc))
def write(self, data):
"""Write to device and retry if necessary.
A write is tried three times. When unsuccesful, raises
:class:`WriteError`. Raises :class:`ClosedDeviceError` when
attempting to write from a closed device.
:param data: string containing the data to write.
"""
if self.closed:
logger.warning("Attempting to read from closed device.")
raise ClosedDeviceError("attempting to write.")
for i in range(3):
try:
self._device.write(data)
except pylibftdi.FtdiError as exc:
logger.warning("Write failed, retrying...")
time.sleep(RW_ERROR_WAIT)
continue
else:
return
logger.error("Write failed.")
self.close()
raise WriteError(str(exc))
| mit | -7,149,492,045,259,250,000 | 25.651515 | 114 | 0.599915 | false |
lucidfrontier45/PyVB | pyvb/old_ver/vbgmm1d.py | 1 | 7659 | #!/usr/bin/python
import numpy as np
from numpy.random import randn,dirichlet
from scipy.linalg import det, inv
from scipy.cluster import vq
from scipy.special import psi,gammaln
from core import normalize
try:
from _vbgmm1d import _evaluateHiddenState_C, _lnPD_C
ext_imported = True
except:
ext_imported = False
print "warning, Cython extension module was not found"
print "computation can be slower"
def testData1(n=100):
X = np.r_[randn(n*2)]
return X
def testData2(n=100):
X = np.r_[randn(n*2) / 0.3 , randn(n) + 10.0]
return X
def GaussianPDF(x,mu,s):
return np.exp(-((x - mu)**2)*s*0.5)*np.sqrt(s/(2.0*np.pi))
def lnZ_Wishart(nu,V):
# log normalization constant of 1D Wishart
lnZ = 0.5 * nu * np.log(2.0*V) + gammaln(nu * 0.5)
return lnZ
class VBGMM1D:
def __init__(self,nmix=10,m=0.0,beta=2,nu=1,s=0.1):
self._nstates = nmix
self._m0 = m
self._beta0 = beta
self._nu0 = nu
self._s0 = s
self.pi = np.ones(nmix) / float(nmix)
def _init_params(self,obs,use_emgmm=False):
self._set_posterior(obs,use_emgmm)
def _set_posterior(self,obs,use_emgmm=False):
nobs = len(obs)
nmix = self._nstates
# hidden states
self.z = dirichlet(np.tile(1.0/nmix,nmix),nobs)
# mixing coefficients
#self.u = np.tile(self._u0,nmix)
# posterior mean vector
self.m, temp = vq.kmeans2(obs,nmix)
self.beta = np.tile(self._beta0,nmix)
# posterior degree of freedom
self.nu = np.tile(float(nobs)/nmix,nmix)
# posterior precision
self.s = np.tile(self._s0,nmix)
def _VBE(self,obs,use_ext=True):
self._et = self.s * self.nu # <tau_k>
self._elnt = psi(self.nu*0.5) + np.log(2.0*self.s) # <ln(t_k)>
self.z = self._evaluateHiddenState(obs,use_ext)
def _evaluateHiddenState(self,obs,use_ext=True):
nobs = len(obs)
nmix = self._nstates
ln2pi = np.log(2.0 * np.pi)
z = np.tile(np.log(self.pi) + 0.5 * self._elnt - 0.5 * ln2pi ,(nobs,1))
if use_ext and ext_imported :
pass
else :
for k in xrange(nmix):
# very slow! need Fortran or C codes
dobs = obs - self.m[k]
z[:,k] -= 0.5 * (1.0/self.beta[k] + self.nu[k]*self.s[k]*(dobs**2))
z = z - z.max(1)[np.newaxis].T
z = np.exp(z)
z = normalize(z,1)
return z
def _VBM(self,obs):
self._calcSufficientStatistic(obs)
self._updatePosteriorParameters(obs)
def _calcSufficientStatistic(self,obs):
self.N = self.z.sum(0)
self.xbar = np.dot(obs,self.z) / self.N
self.C = np.diag(np.dot(((obs - self.xbar[np.newaxis].T)**2),self.z))
self.pi = self.N / self.N.sum()
def _updatePosteriorParameters(self,obs):
self.beta = self.N + self._beta0
self.m = (self._beta0 * self._m0 + self.N * self.xbar) / self.beta
self.nu = self._nu0 + self.N
self.s = 1.0 / (1.0/self._s0 + self.C + (self._beta0 *self.N / self.beta) \
* (self.xbar - self._m0)**2)
def _VBLowerBound(self,obs,use_ext=True):
# variational lower bound
nmix = self._nstates
self.N = self.z.sum(0) # need to be updated !!
# <lnp(X|Z,theta)>
# very slow! neew Fortran or C codes
lnpX = np.dot(self.N,(np.log(self.pi) + 0.5 * self._elnt))
for k in xrange(nmix):
dobs = obs - self.m[k]
lnpX -= self.N[k] * 1.0 / self.beta[k] + self.s[k] * self.nu[k] * \
(dobs * dobs).sum()
# H[q(z)] = -<lnq(z)>
Hz = 0.0
Hz = - np.nan_to_num(self.z * np.log(self.z)).sum()
#for k in xrange(nmix):
# Hz -= np.dot(self.z[:,k],np.log(self.z[:,k]))
# KL[q(pi)||p(pi)]
#KLpi = ( - gammaln(self.u) + self.N * psi(self.u)).sum()
KLpi = 0
# KL[q(mu,tau)||p(mu,tau)]
KLmt = 0
#KLmt = ((self.N * self._elnt + self.nu * (self.s / self._s0 - 1.0 - \
# np.log(2.0 * self.s)) + np.log(self.beta) + self._beta0 / self.beta + \
# self.nu * self.s * self._beta0 * (self.m - self._m0)**2) * 0.5 - \
# gammaln(self.nu * 0.5)).sum()
# Wishart part
KLmt = (self.N * self._elnt + self.nu * (self.s / self._s0 - 1.0)).sum() \
* 0.5 + nmix * lnZ_Wishart(self._nu0,self._s0)
for k in xrange(nmix):
KLmt -= lnZ_Wishart(self.nu[k],self.s[k])
# Conditional Gaussian part
KLmt += 0.5 * (np.log(self.beta/self._beta0) + self._beta0/self.beta - 1 \
+ self._beta0 * self.nu * self.s * (self.m-self._m0)**2).sum()
return lnpX + Hz - KLpi - KLmt
def _VBLowerBound2(self,obs,use_ext=True):
# variational lower bound
nobs = len(obs)
nmix = self._nstates
self.N = self.z.sum(0) # need to be updated !!
# KL[q(z)||p(z)]
KLz = 0.0
for k in xrange(nmix):
KLz -= np.dot(self.z[:,k],np.log(self.z[:,k]))
KLz += np.dot(self.N,np.log(self.pi))
# KL[q(mu,tau)||p(mu,tau)]
KLmt = (np.log(self.beta).sum() - nmix * self._beta0) * 0.5
KLmt += lnZ_Wishart(self._nu0,self._s0) * nmix
for k in xrange(nmix):
KLmt -= lnZ_Wishart(self.nu[k],self.s[k])
#print "%12.5e %12.5e %12.5e"%(Hz,-KLpi,-KLmt)
return KLz - KLmt
def _VBFreeEnergy(self,obs,use_ext=True):
return - self._VBLowerBound2(obs,use_ext)
def fit(self,obs,niter=200,eps=1e-4,ifreq=100,init=True,plot=False,use_ext=False):
if init : self._init_params(obs)
F_old = 1.0e50
for i in range(niter):
old_pi = np.copy(self.pi)
old_m = np.copy(self.m)
old_s = np.copy(self.s)
self._VBE(obs,use_ext)
self._VBM(obs)
F_new = self._VBFreeEnergy(obs,use_ext)
dF = F_new - F_old
if dF < 0.0:
print "%8dth iter, Free Energy = %10.4e, dF = %10.4e" %(i,F_new,dF)
else :
print "%8dth iter, Free Energy = %10.4e, dF = %10.4e warning" \
%(i,F_new,dF)
if abs(dF) < eps :
print dF, " < ", eps, "Converged"
break
#conv_u = np.allclose(self.pi,old_pi)
#conv_m = np.allclose(self.m,old_m)
#conv_s = np.allclose(self.s,old_s)
#if conv_u and conv_m and conv_s:
# break
F_old = F_new
if plot:
self.plotPDF(obs)
return self
def showModel(self,min_pi=0.01):
nmix = self._nstates
params = sorted(zip(self.pi,self.m,self._et),reverse=True)
relavent_clusters = []
for k in xrange(nmix):
if params[k][0] < min_pi:
break
relavent_clusters.append(params[k])
print "%dth component, pi = %8.3g, mu = %8.3g, tau = %8.3g" \
% (k+1,params[k][0],params[k][1],params[k][2])
return relavent_clusters
def pdf(self,x,min_pi=0.01):
params = self.showModel(min_pi)
pi = -np.sort(-self.pi)[:len(params)]
pi = pi / pi.sum()
y = np.array([GaussianPDF(x,p[1],p[2]) * pi[k] \
for k,p in enumerate(params)])
return y
def plotPDF(self,obs,bins=100,min_pi=0.01):
try :
import matplotlib.pyplot as plt
except ImportError :
print "cannot import pyplot"
return
x = np.linspace(min(obs),max(obs),bins)
y = self.pdf(x,min_pi)
plt.hist(obs,bins,label="observed",normed=True)
plt.plot(x,y.sum(0),label="sum",linewidth=3)
for k,yy in enumerate(y) :
plt.plot(x,yy,label="%dth cluster"%(k+1),linewidth=3)
plt.legend(loc=0)
plt.show()
def decode(self,obs):
z = self._evaluateHiddenState(readObs(obs))
codes = z.argmax(1)
clust = [[] for i in range(z.shape[1])]
for (o,c) in (obs,codes):
clust[c].append(obs)
for cl in clust:
cl = np.array(cl)
return codes,clust
def test1(nmix,niter=10000):
Y = testData2(500)
model = VBGMM1D(nmix)
model.fit(Y,niter)
model.plotPDF(Y)
if __name__ == "__main__":
from sys import argv
nmix = int(argv[1])
test1(nmix)
| bsd-3-clause | 8,080,979,700,107,704,000 | 29.272727 | 84 | 0.577099 | false |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/_pytest/main.py | 1 | 26632 | """ core implementation of testing process: init, session, runtest loop. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import fnmatch
import functools
import os
import pkgutil
import sys
import warnings
import attr
import py
import six
import _pytest._code
from _pytest import nodes
from _pytest.config import directory_arg
from _pytest.config import hookimpl
from _pytest.config import UsageError
from _pytest.deprecated import PYTEST_CONFIG_GLOBAL
from _pytest.outcomes import exit
from _pytest.runner import collect_one_node
# exitcodes for the command line
EXIT_OK = 0
EXIT_TESTSFAILED = 1
EXIT_INTERRUPTED = 2
EXIT_INTERNALERROR = 3
EXIT_USAGEERROR = 4
EXIT_NOTESTSCOLLECTED = 5
def pytest_addoption(parser):
parser.addini(
"norecursedirs",
"directory patterns to avoid for recursion",
type="args",
default=[".*", "build", "dist", "CVS", "_darcs", "{arch}", "*.egg", "venv"],
)
parser.addini(
"testpaths",
"directories to search for tests when no files or directories are given in the "
"command line.",
type="args",
default=[],
)
# parser.addini("dirpatterns",
# "patterns specifying possible locations of test files",
# type="linelist", default=["**/test_*.txt",
# "**/test_*.py", "**/*_test.py"]
# )
group = parser.getgroup("general", "running and selection options")
group._addoption(
"-x",
"--exitfirst",
action="store_const",
dest="maxfail",
const=1,
help="exit instantly on first error or failed test.",
),
group._addoption(
"--maxfail",
metavar="num",
action="store",
type=int,
dest="maxfail",
default=0,
help="exit after first num failures or errors.",
)
group._addoption(
"--strict",
action="store_true",
help="marks not registered in configuration file raise errors.",
)
group._addoption(
"-c",
metavar="file",
type=str,
dest="inifilename",
help="load configuration from `file` instead of trying to locate one of the implicit "
"configuration files.",
)
group._addoption(
"--continue-on-collection-errors",
action="store_true",
default=False,
dest="continue_on_collection_errors",
help="Force test execution even if collection errors occur.",
)
group._addoption(
"--rootdir",
action="store",
dest="rootdir",
help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', "
"'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: "
"'$HOME/root_dir'.",
)
group = parser.getgroup("collect", "collection")
group.addoption(
"--collectonly",
"--collect-only",
action="store_true",
help="only collect tests, don't execute them.",
),
group.addoption(
"--pyargs",
action="store_true",
help="try to interpret all arguments as python packages.",
)
group.addoption(
"--ignore",
action="append",
metavar="path",
help="ignore path during collection (multi-allowed).",
)
group.addoption(
"--ignore-glob",
action="append",
metavar="path",
help="ignore path pattern during collection (multi-allowed).",
)
group.addoption(
"--deselect",
action="append",
metavar="nodeid_prefix",
help="deselect item during collection (multi-allowed).",
)
# when changing this to --conf-cut-dir, config.py Conftest.setinitial
# needs upgrading as well
group.addoption(
"--confcutdir",
dest="confcutdir",
default=None,
metavar="dir",
type=functools.partial(directory_arg, optname="--confcutdir"),
help="only load conftest.py's relative to specified dir.",
)
group.addoption(
"--noconftest",
action="store_true",
dest="noconftest",
default=False,
help="Don't load any conftest.py files.",
)
group.addoption(
"--keepduplicates",
"--keep-duplicates",
action="store_true",
dest="keepduplicates",
default=False,
help="Keep duplicate tests.",
)
group.addoption(
"--collect-in-virtualenv",
action="store_true",
dest="collect_in_virtualenv",
default=False,
help="Don't ignore tests in a local virtualenv directory",
)
group = parser.getgroup("debugconfig", "test session debugging and configuration")
group.addoption(
"--basetemp",
dest="basetemp",
default=None,
metavar="dir",
help=(
"base temporary directory for this test run."
"(warning: this directory is removed if it exists)"
),
)
class _ConfigDeprecated(object):
def __init__(self, config):
self.__dict__["_config"] = config
def __getattr__(self, attr):
warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2)
return getattr(self._config, attr)
def __setattr__(self, attr, val):
warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2)
return setattr(self._config, attr, val)
def __repr__(self):
return "{}({!r})".format(type(self).__name__, self._config)
def pytest_configure(config):
__import__("pytest").config = _ConfigDeprecated(config) # compatibility
def wrap_session(config, doit):
"""Skeleton command line program"""
session = Session(config)
session.exitstatus = EXIT_OK
initstate = 0
try:
try:
config._do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
session.exitstatus = doit(config, session) or 0
except UsageError:
raise
except Failed:
session.exitstatus = EXIT_TESTSFAILED
except (KeyboardInterrupt, exit.Exception):
excinfo = _pytest._code.ExceptionInfo.from_current()
exitstatus = EXIT_INTERRUPTED
if initstate <= 2 and isinstance(excinfo.value, exit.Exception):
sys.stderr.write("{}: {}\n".format(excinfo.typename, excinfo.value.msg))
if excinfo.value.returncode is not None:
exitstatus = excinfo.value.returncode
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = exitstatus
except: # noqa
excinfo = _pytest._code.ExceptionInfo.from_current()
config.notify_exception(excinfo, config.option)
session.exitstatus = EXIT_INTERNALERROR
if excinfo.errisinstance(SystemExit):
sys.stderr.write("mainloop: caught unexpected SystemExit!\n")
finally:
excinfo = None # Explicitly break reference cycle.
session.startdir.chdir()
if initstate >= 2:
config.hook.pytest_sessionfinish(
session=session, exitstatus=session.exitstatus
)
config._ensure_unconfigure()
return session.exitstatus
def pytest_cmdline_main(config):
return wrap_session(config, _main)
def _main(config, session):
""" default command line protocol for initialization, session,
running tests and reporting. """
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
if session.testsfailed:
return EXIT_TESTSFAILED
elif session.testscollected == 0:
return EXIT_NOTESTSCOLLECTED
def pytest_collection(session):
return session.perform_collect()
def pytest_runtestloop(session):
if session.testsfailed and not session.config.option.continue_on_collection_errors:
raise session.Interrupted("%d errors during collection" % session.testsfailed)
if session.config.option.collectonly:
return True
for i, item in enumerate(session.items):
nextitem = session.items[i + 1] if i + 1 < len(session.items) else None
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldfail:
raise session.Failed(session.shouldfail)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def _in_venv(path):
"""Attempts to detect if ``path`` is the root of a Virtual Environment by
checking for the existence of the appropriate activate script"""
bindir = path.join("Scripts" if sys.platform.startswith("win") else "bin")
if not bindir.isdir():
return False
activates = (
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
)
return any([fname.basename in activates for fname in bindir.listdir()])
def pytest_ignore_collect(path, config):
ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath())
ignore_paths = ignore_paths or []
excludeopt = config.getoption("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
if py.path.local(path) in ignore_paths:
return True
ignore_globs = config._getconftest_pathlist(
"collect_ignore_glob", path=path.dirpath()
)
ignore_globs = ignore_globs or []
excludeglobopt = config.getoption("ignore_glob")
if excludeglobopt:
ignore_globs.extend([py.path.local(x) for x in excludeglobopt])
if any(
fnmatch.fnmatch(six.text_type(path), six.text_type(glob))
for glob in ignore_globs
):
return True
allow_in_venv = config.getoption("collect_in_virtualenv")
if not allow_in_venv and _in_venv(path):
return True
return False
def pytest_collection_modifyitems(items, config):
deselect_prefixes = tuple(config.getoption("deselect") or [])
if not deselect_prefixes:
return
remaining = []
deselected = []
for colitem in items:
if colitem.nodeid.startswith(deselect_prefixes):
deselected.append(colitem)
else:
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
@contextlib.contextmanager
def _patched_find_module():
"""Patch bug in pkgutil.ImpImporter.find_module
When using pkgutil.find_loader on python<3.4 it removes symlinks
from the path due to a call to os.path.realpath. This is not consistent
with actually doing the import (in these versions, pkgutil and __import__
did not share the same underlying code). This can break conftest
discovery for pytest where symlinks are involved.
The only supported python<3.4 by pytest is python 2.7.
"""
if six.PY2: # python 3.4+ uses importlib instead
def find_module_patched(self, fullname, path=None):
# Note: we ignore 'path' argument since it is only used via meta_path
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
# original: path = [os.path.realpath(self.path)]
path = [self.path]
try:
file, filename, etc = pkgutil.imp.find_module(subname, path)
except ImportError:
return None
return pkgutil.ImpLoader(fullname, file, filename, etc)
old_find_module = pkgutil.ImpImporter.find_module
pkgutil.ImpImporter.find_module = find_module_patched
try:
yield
finally:
pkgutil.ImpImporter.find_module = old_find_module
else:
yield
class FSHookProxy(object):
def __init__(self, fspath, pm, remove_mods):
self.fspath = fspath
self.pm = pm
self.remove_mods = remove_mods
def __getattr__(self, name):
x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
self.__dict__[name] = x
return x
class NoMatch(Exception):
""" raised if matching cannot locate a matching names. """
class Interrupted(KeyboardInterrupt):
""" signals an interrupted test run. """
__module__ = "builtins" # for py3
class Failed(Exception):
""" signals a stop as failed test run. """
@attr.s
class _bestrelpath_cache(dict):
path = attr.ib()
def __missing__(self, path):
r = self.path.bestrelpath(path)
self[path] = r
return r
class Session(nodes.FSCollector):
Interrupted = Interrupted
Failed = Failed
def __init__(self, config):
nodes.FSCollector.__init__(
self, config.rootdir, parent=None, config=config, session=self, nodeid=""
)
self.testsfailed = 0
self.testscollected = 0
self.shouldstop = False
self.shouldfail = False
self.trace = config.trace.root.get("collection")
self._norecursepatterns = config.getini("norecursedirs")
self.startdir = py.path.local()
self._initialpaths = frozenset()
# Keep track of any collected nodes in here, so we don't duplicate fixtures
self._node_cache = {}
self._bestrelpathcache = _bestrelpath_cache(config.rootdir)
# Dirnames of pkgs with dunder-init files.
self._pkg_roots = {}
self.config.pluginmanager.register(self, name="session")
def __repr__(self):
return "<%s %s exitstatus=%r testsfailed=%d testscollected=%d>" % (
self.__class__.__name__,
self.name,
getattr(self, "exitstatus", "<UNSET>"),
self.testsfailed,
self.testscollected,
)
def _node_location_to_relpath(self, node_path):
# bestrelpath is a quite slow function
return self._bestrelpathcache[node_path]
@hookimpl(tryfirst=True)
def pytest_collectstart(self):
if self.shouldfail:
raise self.Failed(self.shouldfail)
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
@hookimpl(tryfirst=True)
def pytest_runtest_logreport(self, report):
if report.failed and not hasattr(report, "wasxfail"):
self.testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self.testsfailed >= maxfail:
self.shouldfail = "stopping after %d failures" % (self.testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path):
return path in self._initialpaths
def gethookproxy(self, fspath):
# check if we have the common case of running
# hooks with all conftest.py files
pm = self.config.pluginmanager
my_conftestmodules = pm._getconftestmodules(fspath)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
if remove_mods:
# one or more conftests are not in use at this fspath
proxy = FSHookProxy(fspath, pm, remove_mods)
else:
# all plugis are active for this fspath
proxy = self.config.hook
return proxy
def perform_collect(self, args=None, genitems=True):
hook = self.config.hook
try:
items = self._perform_collect(args, genitems)
self.config.pluginmanager.check_pending()
hook.pytest_collection_modifyitems(
session=self, config=self.config, items=items
)
finally:
hook.pytest_collection_finish(session=self)
self.testscollected = len(items)
return items
def _perform_collect(self, args, genitems):
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound = []
initialpaths = []
self._initialparts = []
self.items = items = []
for arg in args:
parts = self._parsearg(arg)
self._initialparts.append(parts)
initialpaths.append(parts[0])
self._initialpaths = frozenset(initialpaths)
rep = collect_one_node(self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
errors = []
for arg, exc in self._notfound:
line = "(no name %r in any of %r)" % (arg, exc.args[0])
errors.append("not found: %s\n%s" % (arg, line))
# XXX: test this
raise UsageError(*errors)
if not genitems:
return rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
return items
def collect(self):
for initialpart in self._initialparts:
arg = "::".join(map(str, initialpart))
self.trace("processing argument", arg)
self.trace.root.indent += 1
try:
for x in self._collect(arg):
yield x
except NoMatch:
# we are inside a make_report hook so
# we cannot directly pass through the exception
self._notfound.append((arg, sys.exc_info()[1]))
self.trace.root.indent -= 1
def _collect(self, arg):
from _pytest.python import Package
names = self._parsearg(arg)
argpath = names.pop(0)
# Start with a Session root, and delve to argpath item (dir or file)
# and stack all Packages found on the way.
# No point in finding packages when collecting doctests
if not self.config.getoption("doctestmodules", False):
pm = self.config.pluginmanager
for parent in reversed(argpath.parts()):
if pm._confcutdir and pm._confcutdir.relto(parent):
break
if parent.isdir():
pkginit = parent.join("__init__.py")
if pkginit.isfile():
if pkginit not in self._node_cache:
col = self._collectfile(pkginit, handle_dupes=False)
if col:
if isinstance(col[0], Package):
self._pkg_roots[parent] = col[0]
# always store a list in the cache, matchnodes expects it
self._node_cache[col[0].fspath] = [col[0]]
# If it's a directory argument, recurse and look for any Subpackages.
# Let the Package collector deal with subnodes, don't collect here.
if argpath.check(dir=1):
assert not names, "invalid arg %r" % (arg,)
seen_dirs = set()
for path in argpath.visit(
fil=self._visit_filter, rec=self._recurse, bf=True, sort=True
):
dirpath = path.dirpath()
if dirpath not in seen_dirs:
# Collect packages first.
seen_dirs.add(dirpath)
pkginit = dirpath.join("__init__.py")
if pkginit.exists():
for x in self._collectfile(pkginit):
yield x
if isinstance(x, Package):
self._pkg_roots[dirpath] = x
if dirpath in self._pkg_roots:
# Do not collect packages here.
continue
for x in self._collectfile(path):
key = (type(x), x.fspath)
if key in self._node_cache:
yield self._node_cache[key]
else:
self._node_cache[key] = x
yield x
else:
assert argpath.check(file=1)
if argpath in self._node_cache:
col = self._node_cache[argpath]
else:
collect_root = self._pkg_roots.get(argpath.dirname, self)
col = collect_root._collectfile(argpath, handle_dupes=False)
if col:
self._node_cache[argpath] = col
m = self.matchnodes(col, names)
# If __init__.py was the only file requested, then the matched node will be
# the corresponding Package, and the first yielded item will be the __init__
# Module itself, so just use that. If this special case isn't taken, then all
# the files in the package will be yielded.
if argpath.basename == "__init__.py":
yield next(m[0].collect())
return
for y in m:
yield y
def _collectfile(self, path, handle_dupes=True):
assert path.isfile(), "%r is not a file (isdir=%r, exists=%r, islink=%r)" % (
path,
path.isdir(),
path.exists(),
path.islink(),
)
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
if handle_dupes:
keepduplicates = self.config.getoption("keepduplicates")
if not keepduplicates:
duplicate_paths = self.config.pluginmanager._duplicatepaths
if path in duplicate_paths:
return ()
else:
duplicate_paths.add(path)
return ihook.pytest_collect_file(path=path, parent=self)
def _recurse(self, dirpath):
if dirpath.basename == "__pycache__":
return False
ihook = self.gethookproxy(dirpath.dirpath())
if ihook.pytest_ignore_collect(path=dirpath, config=self.config):
return False
for pat in self._norecursepatterns:
if dirpath.check(fnmatch=pat):
return False
ihook = self.gethookproxy(dirpath)
ihook.pytest_collect_directory(path=dirpath, parent=self)
return True
if six.PY2:
@staticmethod
def _visit_filter(f):
return f.check(file=1) and not f.strpath.endswith("*.pyc")
else:
@staticmethod
def _visit_filter(f):
return f.check(file=1)
def _tryconvertpyarg(self, x):
"""Convert a dotted module name to path."""
try:
with _patched_find_module():
loader = pkgutil.find_loader(x)
except ImportError:
return x
if loader is None:
return x
# This method is sometimes invoked when AssertionRewritingHook, which
# does not define a get_filename method, is already in place:
try:
with _patched_find_module():
path = loader.get_filename(x)
except AttributeError:
# Retrieve path from AssertionRewritingHook:
path = loader.modules[x][0].co_filename
if loader.is_package(x):
path = os.path.dirname(path)
return path
def _parsearg(self, arg):
""" return (fspath, names) tuple after checking the file exists. """
parts = str(arg).split("::")
if self.config.option.pyargs:
parts[0] = self._tryconvertpyarg(parts[0])
relpath = parts[0].replace("/", os.sep)
path = self.config.invocation_dir.join(relpath, abs=True)
if not path.check():
if self.config.option.pyargs:
raise UsageError(
"file or package not found: " + arg + " (missing __init__.py?)"
)
raise UsageError("file not found: " + arg)
parts[0] = path.realpath()
return parts
def matchnodes(self, matching, names):
self.trace("matchnodes", matching, names)
self.trace.root.indent += 1
nodes = self._matchnodes(matching, names)
num = len(nodes)
self.trace("matchnodes finished -> ", num, "nodes")
self.trace.root.indent -= 1
if num == 0:
raise NoMatch(matching, names[:1])
return nodes
def _matchnodes(self, matching, names):
if not matching or not names:
return matching
name = names[0]
assert name
nextnames = names[1:]
resultnodes = []
for node in matching:
if isinstance(node, nodes.Item):
if not names:
resultnodes.append(node)
continue
assert isinstance(node, nodes.Collector)
key = (type(node), node.nodeid)
if key in self._node_cache:
rep = self._node_cache[key]
else:
rep = collect_one_node(node)
self._node_cache[key] = rep
if rep.passed:
has_matched = False
for x in rep.result:
# TODO: remove parametrized workaround once collection structure contains parametrization
if x.name == name or x.name.split("[")[0] == name:
resultnodes.extend(self.matchnodes([x], nextnames))
has_matched = True
# XXX accept IDs that don't have "()" for class instances
if not has_matched and len(rep.result) == 1 and x.name == "()":
nextnames.insert(0, name)
resultnodes.extend(self.matchnodes([x], nextnames))
else:
# report collection failures here to avoid failing to run some test
# specified in the command line because the module could not be
# imported (#134)
node.ihook.pytest_collectreport(report=rep)
return resultnodes
def genitems(self, node):
self.trace("genitems", node)
if isinstance(node, nodes.Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, nodes.Collector)
rep = collect_one_node(node)
if rep.passed:
for subnode in rep.result:
for x in self.genitems(subnode):
yield x
node.ihook.pytest_collectreport(report=rep)
| mit | -9,025,339,223,619,723,000 | 33.452781 | 109 | 0.576938 | false |
listyque/TACTIC-Handler | thlib/ui/items/ui_commit_item.py | 1 | 4255 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'items\ui_commit_item.ui'
#
# Created: Sat Oct 5 00:17:13 2019
# by: pyside-uic 0.2.15 running on PySide 1.2.4
#
# WARNING! All changes made in this file will be lost!
from thlib.side.Qt import QtWidgets as QtGui
from thlib.side.Qt import QtGui as Qt4Gui
from thlib.side.Qt import QtCore
class Ui_commitItem(object):
def setupUi(self, commitItem):
commitItem.setObjectName("commitItem")
commitItem.resize(84, 72)
self.gridLayout = QtGui.QGridLayout(commitItem)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.previewVerticalLayout = QtGui.QVBoxLayout()
self.previewVerticalLayout.setSpacing(0)
self.previewVerticalLayout.setContentsMargins(4, 4, 4, 4)
self.previewVerticalLayout.setObjectName("previewVerticalLayout")
self.previewLabel = QtGui.QLabel(commitItem)
self.previewLabel.setMinimumSize(QtCore.QSize(64, 64))
self.previewLabel.setMaximumSize(QtCore.QSize(64, 64))
self.previewLabel.setStyleSheet("QLabel {\n"
" background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 rgba(175, 175, 175, 16), stop: 1 rgba(0, 0, 0, 0));\n"
" border: 0px;\n"
" border-radius: 4px;\n"
" padding: 0px 0px;\n"
"}")
self.previewLabel.setTextFormat(QtCore.Qt.RichText)
self.previewLabel.setAlignment(QtCore.Qt.AlignCenter)
self.previewLabel.setObjectName("previewLabel")
self.previewVerticalLayout.addWidget(self.previewLabel)
spacerItem = QtGui.QSpacerItem(0, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Ignored)
self.previewVerticalLayout.addItem(spacerItem)
self.previewVerticalLayout.setStretch(1, 1)
self.gridLayout.addLayout(self.previewVerticalLayout, 0, 0, 3, 1)
self.nameVerticalLayout = QtGui.QVBoxLayout()
self.nameVerticalLayout.setSpacing(0)
self.nameVerticalLayout.setContentsMargins(-1, -1, -1, 3)
self.nameVerticalLayout.setObjectName("nameVerticalLayout")
self.fileNameLabel = QtGui.QLabel(commitItem)
self.fileNameLabel.setMinimumSize(QtCore.QSize(0, 20))
self.fileNameLabel.setMaximumSize(QtCore.QSize(16777215, 24))
font = Qt4Gui.QFont()
font.setWeight(75)
font.setBold(True)
self.fileNameLabel.setFont(font)
self.fileNameLabel.setStyleSheet("QLabel {\n"
" background-color: transparent;\n"
" border-bottom: 2px solid qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(128, 128, 128, 64), stop:1 rgba(128, 128,128, 0));\n"
"}")
self.fileNameLabel.setTextFormat(QtCore.Qt.PlainText)
self.fileNameLabel.setObjectName("fileNameLabel")
self.nameVerticalLayout.addWidget(self.fileNameLabel)
self.gridLayout.addLayout(self.nameVerticalLayout, 0, 1, 1, 2)
self.descriptionLerticalLayout = QtGui.QVBoxLayout()
self.descriptionLerticalLayout.setSpacing(0)
self.descriptionLerticalLayout.setObjectName("descriptionLerticalLayout")
self.commentLabel = QtGui.QLabel(commitItem)
self.commentLabel.setMinimumSize(QtCore.QSize(0, 25))
self.commentLabel.setTextFormat(QtCore.Qt.PlainText)
self.commentLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.commentLabel.setWordWrap(True)
self.commentLabel.setMargin(2)
self.commentLabel.setObjectName("commentLabel")
self.descriptionLerticalLayout.addWidget(self.commentLabel)
self.gridLayout.addLayout(self.descriptionLerticalLayout, 2, 1, 1, 2)
self.infoHorizontalLayout = QtGui.QHBoxLayout()
self.infoHorizontalLayout.setSpacing(0)
self.infoHorizontalLayout.setObjectName("infoHorizontalLayout")
self.gridLayout.addLayout(self.infoHorizontalLayout, 1, 1, 1, 2)
self.gridLayout.setColumnStretch(1, 1)
self.gridLayout.setColumnStretch(2, 1)
self.retranslateUi(commitItem)
QtCore.QMetaObject.connectSlotsByName(commitItem)
def retranslateUi(self, commitItem):
commitItem.setWindowTitle(u"Form")
| epl-1.0 | 1,223,322,050,095,421,700 | 48.476744 | 147 | 0.709988 | false |
sapcc/monasca-agent | tests_to_fix/test_iis.py | 1 | 1049 | import unittest
import logging
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from tests.common import get_check
logging.basicConfig()
CONFIG = """
init_config:
instances:
- host: .
dimensions:
dim1: value1
dim2: value2
"""
class IISTestCase(unittest.TestCase):
@attr('windows')
def testIIS(self):
raise SkipTest('Requires IIS and wmi')
check, instances = get_check('iis', CONFIG)
check.check(instances[0])
metrics = check.get_metrics()
base_metrics = [m[0] for m in check.METRICS]
ret_metrics = [m[0] for m in metrics]
ret_dimensions = [m[3]['dimensions'] for m in metrics]
# Make sure each metric was captured
for metric in base_metrics:
assert metric in ret_metrics
# Make sure everything is tagged correctly
for dimensions in ret_dimensions:
assert dimensions == {'dim1': 'value1', 'dim2': 'value2'}
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -8,427,825,189,620,956,000 | 23.395349 | 69 | 0.616778 | false |
quaddra/engage-utils | engage_utils/resource_utils.py | 1 | 19792 | """
Utilities for parsing/pretty-printing resource definitions and instances.
Objects that can be converted to/from JSON have the following methods:
* from_json(parsed_data) [static method] - given json data that has been parsed
into python via json.load() or json.loads(), construct an instance of the
associated object.
* to_json() - method to convert an object instance to python structures suitable
to be converted to json via json.dump() (dict, list, string, etc.).
* pretty_print(indent_level=0) - method to convert an object instance to serialzed
json while preserving human-readability. The indent level is used to nest
objects.
"""
import os
import json
import sys
import functools
from optparse import OptionParser
import logging
logger = logging.getLogger(__name__)
import pkgmgr
class ResourceParseError(Exception):
pass
def _check_type(v, exp_type, what):
if not isinstance(v, exp_type):
raise ResourceParseError("Expecting a %s for %s, install got a %s" %
(exp_type, what, type(v)))
def _check_prop(d, prop, what):
if prop not in d:
raise ResourceParseError("%s missing required property %s" %
(what, prop))
@functools.total_ordering
class Key(object):
def __init__(self, name, version):
self.name = name
self.version = version
def __eq__(self, other):
return self.name==other.name and self.version==other.version
def __lt__(self, other):
return (self.name, self.version) < (other.name, other.version)
def __str__(self):
return "%s %s" % (self.name, self.version)
def to_json(self):
return {u'name':self.name, u'version':self.version}
def pretty_print(self, indent_level=0):
return u'{"name": %s, "version": %s}' % (json.dumps(self.name),
json.dumps(self.version))
def __repr__(self):
return self.pretty_print()
@staticmethod
def from_json(parsed_data):
_check_type(parsed_data, dict, 'Key')
for p in [u'name', u'version']:
if p not in parsed_data:
raise ResourceParseError("Key missing property '%s'" % p)
return Key(parsed_data[u'name'], parsed_data[u'version'])
def _pretty_print_dict(dct, indent_level,
value_fn=lambda v, indent: json.dumps(v)):
indent = u" " * indent_level if indent_level>0 else u""
s = u"{\n"
field_lines = []
props = sorted(dct.keys())
for p in props:
field_lines.append(indent + u" " + json.dumps(p) + u": " +
value_fn(dct[p], indent_level+2))
s += u',\n'.join(field_lines)
s += u'\n' + indent + u'}'
return s
def _pretty_print_list(lst, indent_level,
value_fn=lambda v, indent: json.dumps(v)):
indent = u" " * indent_level if indent_level>0 else u""
s = u"[\n"
field_lines = []
for p in lst:
field_lines.append(indent + u" " +
value_fn(p, indent_level+2))
s += u',\n'.join(field_lines)
s += u'\n' + indent + u']'
return s
def pp_value_fn(v, indent_level):
"""Value function for pretty printing when dict/list
elements have a pretty_print method.
"""
return v.pretty_print(indent_level)
class PortDef(object):
"""A port is just a mapping from property names to definitions.
For now, we don't validate the definitions, just take the json directly.
"""
def __init__(self, prop_defs):
self.prop_defs = prop_defs
def to_json(self):
return self.prop_defs
def pretty_print(self, indent_level=0):
return _pretty_print_dict(self.prop_defs, indent_level)
@staticmethod
def from_json(parsed_data):
_check_type(parsed_data, dict, "Port")
return PortDef(parsed_data)
def get_constraint_type(parsed_data):
_check_type(parsed_data, dict, "Constraint")
if u"all-of" in parsed_data:
assert (u'one-of' not in parsed_data) and (u'key' not in parsed_data)
return AllOfConstraint
elif u"one-of" in parsed_data:
assert u'key' not in parsed_data
return OneOfConstraint
elif u"key" in parsed_data:
return BaseConstraint
else:
raise ResourceParseError("Unparsable constraint: %s" % json.dumps(parsed_data))
class Constraint(object):
"""Abstract base class of all constraints.
"""
@staticmethod
def from_json(parsed_data):
_check_type(parsed_data, dict, "Constraint")
Ct = get_constraint_type(parsed_data)
return Ct.from_json(parsed_data)
class BaseConstraint(Constraint):
"""A constraint that references just a single resource name and
a version range.
"""
def __init__(self, key_constraint, port_mapping=None):
self.key_constraint = key_constraint
self.port_mapping = port_mapping
def to_json(self):
d = {u"key":self.key_constraint}
if self.port_mapping:
d[u'port_mapping'] = self.port_mapping
return d
def pretty_print(self, indent_level=0):
def pp_key_constraint():
return u'{"name":%s, "version":%s}' % \
(json.dumps(self.key_constraint[u'name']),
json.dumps(self.key_constraint[u'version']))
if self.port_mapping:
return u'{"key": %s,\n' % pp_key_constraint() + \
(u" " * indent_level) + u' "port_mapping": ' + \
json.dumps(self.port_mapping) + u"}"
else:
return u'{"key": %s}' % pp_key_constraint()
@staticmethod
def from_json(parsed_data):
_check_type(parsed_data, dict, "BaseConstraint")
_check_prop(parsed_data, u'key', "BaseConstraint")
key_constraint = parsed_data[u'key']
if u"port_mapping" in parsed_data:
port_mapping = parsed_data[u'port_mapping']
_check_type(port_mapping, dict, "BaseConstraint.port_mapping")
else:
port_mapping = None
return BaseConstraint(key_constraint, port_mapping)
class OneOfConstraint(Constraint):
def __init__(self, subconstraints):
for s in subconstraints:
_check_type(s, Constraint, "OneOfConstraint.subconstraints[i]")
self.subconstraints = subconstraints
def to_json(self):
return {u'one-of':[s.to_json() for s in self.subconstraints]}
def pretty_print(self, indent_level=0):
indent = u" " * indent_level if indent_level>0 else u""
return u'{ "one-of": ' + \
_pretty_print_list(self.subconstraints, indent_level+2,
value_fn=pp_value_fn) + u'}'
@staticmethod
def from_json(parsed_data):
_check_type(parsed_data, dict, "OneOfConstraint")
if u"one-of" not in parsed_data:
ResourceParseError("Expecting an one-of constraint, but missing one-of property")
raw_subconstraints = parsed_data[u'one-of']
_check_type(raw_subconstraints, list, "OneOfConstraint.subconstraints")
subconstraints = []
for rs in raw_subconstraints:
s = Constraint.from_json(rs)
if not isinstance(s, BaseConstraint):
raise ResourceParseError("one-of constraint not allowed to contain constraints of type %s" %
s.__class__.__name__)
subconstraints.append(s)
return OneOfConstraint(subconstraints)
class AllOfConstraint(Constraint):
def __init__(self, subconstraints):
for s in subconstraints:
_check_type(s, Constraint, "AllOfConstraint.subconstraints[i]")
self.subconstraints = subconstraints
def to_json(self):
return {u'all-of':[s.to_json() for s in self.subconstraints]}
def pretty_print(self, indent_level=0):
indent = u" " * indent_level if indent_level>0 else u""
return u'{ "all-of": ' + \
_pretty_print_list(self.subconstraints, indent_level+2,
value_fn=pp_value_fn) + u'}'
@staticmethod
def from_json(parsed_data):
_check_type(parsed_data, dict, "AllOfConstraint")
if u"all-of" not in parsed_data:
ResourceParseError("Expecting an all-of constraint, but missing all-of property")
raw_subconstraints = parsed_data[u'all-of']
_check_type(raw_subconstraints, list, "AllOfConstraint.subconstraints")
subconstraints = []
for rs in raw_subconstraints:
s = Constraint.from_json(rs)
if isinstance(s, AllOfConstraint):
raise ResourceParseError("all-of constraint not allowed to contain all-of constraints")
subconstraints.append(s)
return AllOfConstraint(subconstraints)
RESOURCE_PROPERTIES = [
'key', 'display_name', 'comment',
'config_port', 'input_ports', 'output_ports', 'inside',
'environment', 'peers', 'driver_module_name', 'package'
]
RESOURCE_PROPERTIES_SET = frozenset(RESOURCE_PROPERTIES)
@functools.total_ordering
class ResourceDef(object):
def __init__(self, key, display_name=None, comment=None,
config_port=None, input_ports=None, output_ports=None,
inside=None, environment=None, peers=None,
driver_module_name=None, package=None, extra_props=None):
self.key = key
self.display_name = display_name
self.comment = comment
self.config_port = config_port
self.input_ports = input_ports if input_ports!=None else {}
self.output_ports = output_ports if output_ports!=None else {}
self.inside = inside
self.environment = environment
self.peers = peers
self.driver_module_name = driver_module_name
self.package = package
self.extra_props = extra_props if extra_props!=None else {}
def __lt__(self, other):
return self.key.__lt__(other.key)
def _attributes_equal(self, other):
for attr in ['key', 'display_name', 'comment', 'config_port', 'input_ports',
'output_ports', 'inside', 'environment', 'peers',
'driver_module_name', 'package', 'extra_props']:
if getattr(self, attr)!=getattr(other, attr):
return False
return True
def __eq__(self, other):
if self.key.__eq__(other.key):
if self._attributes_equal(other):
return True
else:
# If two resources have the same key but different values, we
# don't permit a comparison
return NotImplemented
else:
return False
def to_json(self):
d = {}
for p in RESOURCE_PROPERTIES:
if hasattr(self, p):
v = getattr(self, p)
if v != None:
if isinstance(v, list):
d[p] = [e.to_json() for e in v]
if isinstance(v, dict):
if len(v)>0:
dd ={}
for (pp, vv) in v.items():
dd[pp] = vv.to_json()
d[p] = dd
elif isinstance(v, unicode) or isinstance(v, str):
d[p] = v
else:
d[p] = v.to_json()
# add extra props
for (p,v) in self.extra_props:
d[p] = v
return d
def pretty_print(self, indent_level=0):
indent = u" " * indent_level if indent_level>0 else u""
s = u"{\n"
field_lines = []
def add_prop(name, value):
field_lines.append(indent + u" " + json.dumps(name) + u": " + value)
add_prop("key", self.key.pretty_print())
if self.display_name:
add_prop("display_name", json.dumps(self.display_name))
if self.comment:
add_prop("comment", json.dumps(self.comment))
if self.config_port:
add_prop("config_port",
self.config_port.pretty_print(indent_level=indent_level+2))
if self.input_ports and len(self.input_ports)>0:
add_prop("input_ports",
_pretty_print_dict(self.input_ports, indent_level + 2,
value_fn=pp_value_fn))
if self.output_ports and len(self.output_ports)>0:
add_prop("output_ports",
_pretty_print_dict(self.output_ports, indent_level + 2,
value_fn=pp_value_fn))
if self.inside:
add_prop("inside", self.inside.pretty_print(indent_level+2))
if self.environment:
add_prop("environment", self.environment.pretty_print(indent_level+2))
if self.peers:
add_prop("peers", self.inside.pretty_print(indent_level+2))
if self.driver_module_name:
add_prop("driver_module_name", json.dumps(self.driver_module_name))
if self.package:
add_prop("package", self.package.pretty_print(indent_level=indent_level+2))
if self.extra_props:
epkeys = sorted(self.extra_props.keys())
for k in epkeys:
add_prop(k, json.dumps(self.extra_props[k]))
s += u',\n'.join(field_lines)
s += u'\n' + indent + u'}'
return s
@staticmethod
def from_json(parsed_data):
_check_type(parsed_data, dict, "Resource Definition")
if u"key" not in parsed_data:
raise ResourceParseError("Resource definition missing key property")
key = Key.from_json(parsed_data[u'key'])
logger.debug("ResourceDef.from_json(%s)" % key)
display_name = parsed_data.get(u'display_name', None)
comment = parsed_data.get(u'comment', None)
if u'config_port' in parsed_data:
config_port = PortDef.from_json(parsed_data[u'config_port'])
else:
config_port = None
if u'input_ports' in parsed_data:
input_ports = {}
for (p, v) in parsed_data[u'input_ports'].items():
input_ports[p] = PortDef.from_json(v)
else:
input_ports = None
if u'output_ports' in parsed_data:
output_ports = {}
for (p, v) in parsed_data[u'output_ports'].items():
output_ports[p] = PortDef.from_json(v)
else:
output_ports = None
if u'inside' in parsed_data:
inside = Constraint.from_json(parsed_data[u'inside'])
if inside==AllOfConstraint:
raise ResourceParseError("%s: all-of constraint not allowed for resource constraints" %
key)
else:
inside = None
if u"environment" in parsed_data:
environment = Constraint.from_json(parsed_data[u'environment'])
else:
environment = None
if u"peers" in parsed_data:
peers = Constraint.from_json(parsed_data[u'peers'])
else:
peers = None
driver_module_name = parsed_data.get(u'driver_module_name', None)
if u'package' in parsed_data:
raw_package = parsed_data[u'package']
if u'name' not in raw_package or u'version' not in raw_package:
raw_package[u'name'] = key.name
raw_package[u'version'] = key.version
package = pkgmgr.Package.from_json(raw_package)
else:
package = None
extra_prop_names = set(parsed_data.keys()).difference(RESOURCE_PROPERTIES_SET)
if len(extra_prop_names)>0:
extra_props = {}
for p in extra_prop_names:
extra_props[p] = parsed_data[p]
else:
extra_props = None
return ResourceDef(key, display_name=display_name, comment=comment,
config_port=config_port, input_ports=input_ports,
output_ports=output_ports,
inside=inside, environment=environment, peers=peers,
driver_module_name=driver_module_name,
package=package, extra_props=extra_props)
def pp_resource_defs(resource_list, version):
"""Pretty-print a ResourceDef list in the format used by resource_definitions.json -
a dict with a version property and a definitions list property.
"""
return u"{\n" + \
u' "resource_def_version": %s,\n' % json.dumps(version) + \
u' "resource_definitions": ' + \
_pretty_print_list(resource_list, 2,
value_fn=pp_value_fn) + "\n}"
def main(argv=sys.argv[1:]):
usage = "%prog [options] source dest"
parser = OptionParser(usage=usage)
parser.add_option('--sort', dest='sort', default=False,
action='store_true',
help="If specified sort the resource definitions by key")
parser.add_option('-d', '--debug', dest='debug', default=False,
action='store_true',
help="If specified, print debug logging to stderr")
(options, args) = parser.parse_args(argv)
if len(args)!=2:
parser.error("Must specify source and destination resource files")
source = args[0]
if source!='-' and (not os.path.exists(source)):
parser.error("Could not open source resource file %s" % source)
dest = args[1]
logging.basicConfig(level=logging.DEBUG if options.debug else logging.INFO,
stream=sys.stderr)
if source=='-':
try:
parsed_data = json.load(sys.stdin)
except Exception, e:
print "Got a parse error when parsing json from stdin: %s" % e
raise
else:
with open(source, 'rb') as sf:
try:
parsed_data = json.load(sf)
except Exception, e:
print "Got a parse error when parsing json from %s: %s" % (source, e)
raise
if isinstance(parsed_data, dict):
if u"resource_definitions" in parsed_data:
raw_list = parsed_data[u'resource_definitions']
dict_format = True
if u'resource_def_version' in parsed_data:
resource_def_version = parsed_data[u'resource_def_version']
else:
resource_def_version = "1.0"
else:
print "Did not find resource_definitions property in input json"
return 1
elif isinstance(parsed_data, list):
raw_list = parsed_data
dict_format = False
else:
print "Expecting a list or dict for resource defintions"
return 1
parsed_list = []
for (i, d) in enumerate(raw_list):
try:
parsed_list.append(ResourceDef.from_json(d))
except Exception:
print "Got a parse error in resource definition #%d: %s" % \
(i, json.dumps(d))
raise
if options.sort:
parsed_list.sort()
if dict_format:
pp_output = pp_resource_defs(parsed_list, resource_def_version)
else:
pp_output = _pretty_print_list(parsed_list, 0,
value_fn=pp_value_fn)
if source==dest and source!='-':
print "renaming original %s to %s" % (source, source + '.orig')
os.rename(source, source + '.orig')
if dest=='-':
print pp_output
else:
with open(dest, 'wb') as df:
df.write(pp_output)
df.write("\n")
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | -2,461,870,873,917,238,300 | 37.505837 | 108 | 0.567451 | false |
jschaf/windows-config | pycmd-init.py | 1 | 9710 | # -*- coding: utf-8 -*-
# Example init.py for documentation purposes
#
# Use this file as a template/example for creating an
# initialization/configuration script for PyCmd. Scripts are loaded and applied
# based on the following rules:
#
# * If present, an init.py script in PyCmd's installation directory is
# automatically executed and defines "global" (system-wide) settings
#
# * If present, an init.py script in %APPDATA%\PyCmd is automatically executed
# and defines "user" settings, possibly overriding the "global" ones
#
# * An additional .py script can be specified using the '-i' switch on the
# command line to define settings custom to a PyCmd session (possibly
# overriding the "global" and "user" ones)
#
# This file lists all the configuration options supported by PyCmd, together
# with default values, explanations and various advice.
#
# An important thing to have in mind: this is a regular Python script that gets
# executed in PyCmd's Python context; therefore, you can do virtually anything
# you want here, like play a song, format your hard-disk or show some custom
# greeting:
# pycmd_public is a collection of public functions, constants and objects that
# PyCmd "exports" for use within init.py files; you can safely rely on these
# being maintained throughout following versions. The documentation for this module
# can be found in pycmd_public.html.
#
# Note that importing symbols from pycmd_public is optional, as PyCmd automatically
# makes them available within the init.py files; still, having them explicitly
# imported might help you get coding assistance from your Python environment
from pycmd_public import appearance, behavior, abbrev_path # Redundant
# Color configuration is performed by including color specification sequences
# (defined by pycmd_public.color) in your strings, similarly to the ANSI escape
# sequences.
#
# "absolute" color specifications will result in the same color being used no
# matter the current color; some examples are color.Fore.YELLOW, color.Fore.SET_RED,
# color.Back.CLEAR_BRIGHT.
#
# "relative" color options define the color to use in terms of the current color;
# examples: color.Fore.TOGGLE_RED, color.Fore.TOGGLE_BRIGHT.
#
# You will notice that relative specifications are preferred in the default
# settings -- this is in order to make PyCmd work reasonably on any console color
# scheme. The absolute specs are clearer and easier to use, though, you can go
# probably go with them for your customizations.
#
# The console's default color attributes are available as color.Fore.DEFAULT and
# color.Back.DEFAULT.
import re
import os
from pycmd_public import color # Redundant
# The color of the regular user text (relative to the console's default)
#
# Note that this defines only the attributes of the user-typed text, *not* the
# default attributes of the console (i.e. the console's background or the output
# of the executed commands); use the console configuration dialog to change those.
#
# The default value is the console's default:
# appearance.colors.text = ''
appearance.colors.text = ''
# The color of the prompt (relative to the console's default); note that you can
# always override this if you define a custom prompt function -- see below
#
# The default value inverts the brightness (to make it stand out it from regular
# console text):
# appearance.colors.prompt = color.Fore.TOGGLE_BRIGHT
appearance.colors.prompt = color.Fore.TOGGLE_BRIGHT
# The color of text selected for copy/cut operations (relative to the regular
# user text as configured above)
#
# The default value inverts the background and the foreground
# appearance.colors.selection = (color.Fore.TOGGLE_RED +
# color.Fore.TOGGLE_GREEN +
# color.Fore.TOGGLE_BLUE +
# color.Back.TOGGLE_RED +
# color.Back.TOGGLE_GREEN +
# color.Back.TOGGLE_BLUE)
appearance.colors.selection = (color.Fore.TOGGLE_RED +
color.Fore.TOGGLE_GREEN +
color.Fore.TOGGLE_BLUE +
color.Back.TOGGLE_RED +
color.Back.TOGGLE_GREEN +
color.Back.TOGGLE_BLUE)
# The color of the current search filter during a history search (relative to the
# regular user text as configured above)
#
# The default is to highlight the filter by altering both background and the
# foreground:
# appearance.colors.search_filter = (color.Back.TOGGLE_RED +
# color.Back.TOGGLE_BLUE +
# color.Fore.TOGGLE_BRIGHT)
appearance.colors.search_filter = (color.Back.TOGGLE_RED +
color.Back.TOGGLE_BLUE +
color.Fore.TOGGLE_BRIGHT)
# The color of the matched substring(s) when displaying completion alternatives
# (relative to the console's default color)
#
# The default value highlights the matched substrings by toggling their RED bit:
# appearance.colors.completion_match = color.Fore.TOGGLE_RED
appearance.colors.completion_match = color.Fore.TOGGLE_RED
# The color of the current directory in the directory history listing (relative to
# the console's default color)
#
# The default is to obtain an "inverted" effect by toggling the brightness of the
# foreground and background:
# appearance.colors.dir_history_selection = (color.Fore.TOGGLE_BRIGHT +
# color.Back.TOGGLE_BRIGHT)
appearance.colors.dir_history_selection = (color.Fore.TOGGLE_BRIGHT +
color.Back.TOGGLE_BRIGHT)
def has_admin():
import os
if os.name == 'nt':
try:
# only windows users with admin privileges can read the C:\windows\temp
temp = os.listdir(os.sep.join([os.environ.get('SystemRoot',
'C:\windows'),
'temp']))
except:
return (os.environ['USERNAME'],False)
else:
return (os.environ['USERNAME'],True)
else:
if 'SUDO_USER' in os.environ and os.geteuid() == 0:
return (os.environ['SUDO_USER'],True)
else:
return (os.environ['USERNAME'],False)
class promptify(object):
def __init__(self, addon_prompt):
self._addon_prompt = addon_prompt
def __call__(self, prompt):
def wrapped_prompt(*args):
return self._addon_prompt() + prompt(*args)
return wrapped_prompt
# Custom prompt function, see below for comments on appearance.prompt
def git_prompt_addon():
"""
Custom prompt that displays the name of the current git branch in addition
to the typical "abbreviated current path" PyCmd prompt.
Requires git & grep to be present in the PATH.
"""
# Many common modules (sys, os, subprocess, time, re, ...) are readily
# shipped with PyCmd, you can directly import them for use in your
# configuration script. If you need extra modules that are not bundled,
# manipulate the sys.path so that they can be found (just make sure that the
# version is compatible with the one used to build PyCmd -- check
# README.txt)
import subprocess
stdout = subprocess.Popen(
'git branch | grep "^*"',
shell=True,
stdout=subprocess.PIPE,
stderr=-1).communicate()[0]
branch_name = stdout.strip(' \n\r*')
# The current color setting is defined by appearance.colors.prompt
prompt = ''
if branch_name != '':
blue = color.Fore.TOGGLE_BLUE
prompt += '{}[{}]{} '.format(blue, branch_name, blue)
return prompt
def conda_prompt_addon():
env = os.environ.get('CONDA_DEFAULT_ENV')
prompt = ""
if env:
prompt += color.Fore.TOGGLE_GREEN
prompt += "[{env}] ".format(env=env)
prompt += color.Fore.TOGGLE_GREEN
return prompt
@promptify(conda_prompt_addon)
@promptify(git_prompt_addon)
def prompt():
# Use a tilde to represent our home dir
path = re.sub(r"C:\\U\\j(oe)?", "~", abbrev_path())
username, is_admin = has_admin()
path += '\n'
if is_admin:
path += color.Fore.TOGGLE_RED
path += "# "
path += color.Fore.TOGGLE_RED
else:
path += "$ "
return path
# Define a custom prompt function.
#
# This is called by PyCmd whenever a prompt is to be displayed. It should return
# a string to be shown as a prompt.
#
# Before the returned string is printed, the text color is set to
# appearance.colors.prompt; but you can always alter it or add more complex
# coloring by embedding color specifications in the returned string (like we do
# in our example git_prompt).
#
# The default is the typical "abbreviated path" prompt:
# appearance.prompt = pycmd_public.abbrev_path_prompt
appearance.prompt = prompt
# Make PyCmd be "quiet", i.e. skip its welcome and goodbye messages
#
# Note that even if this is set to True, you can still override it using the
# '-q' (quiet) flag on the command line.
#
# The default is False, i.e. the splash messages are shown:
# behavior.quiet_mode = False
behavior.quiet_mode = True
# Change the way PyCmd handles Tab-completion
#
# Currently, the only accepted (and, of course, default) value is 'bash', giving
# the typical bash-like completion.
#
behavior.completion_mode = 'bash'
# Remember, you can do whatever you want in this Python script!
#
# Also note that you can directly output colored text via the color
# specifications.
| bsd-2-clause | -5,130,049,946,097,819,000 | 38.958848 | 84 | 0.669413 | false |
sato9hara/defragTrees | paper/tests/paper_synthetic2.py | 1 | 3367 | # -*- coding: utf-8 -*-
"""
@author: Satoshi Hara
"""
import sys
import os
sys.path.append(os.path.abspath('./'))
sys.path.append(os.path.abspath('./baselines/'))
sys.path.append(os.path.abspath('../'))
import numpy as np
import paper_sub
from RForest import RForest
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colorbar as colorbar
def plotTZ(filename=None):
t = np.linspace(0, 1, 101)
z = 0.25 + 0.5 / (1 + np.exp(- 20 * (t - 0.5))) + 0.05 * np.cos(t * 2 * np.pi)
cmap = cm.get_cmap('cool')
fig, (ax1, ax2) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[19, 1]})
poly1 = [[0, 0]]
poly1.extend([[t[i], z[i]] for i in range(t.size)])
poly1.extend([[1, 0], [0, 0]])
poly2 = [[0, 1]]
poly2.extend([[t[i], z[i]] for i in range(t.size)])
poly2.extend([[1, 1], [0, 1]])
poly1 = plt.Polygon(poly1,fc=cmap(0.0))
poly2 = plt.Polygon(poly2,fc=cmap(1.0))
ax1.add_patch(poly1)
ax1.add_patch(poly2)
ax1.set_xlabel('x1', size=22)
ax1.set_ylabel('x2', size=22)
ax1.set_title('True Data', size=28)
colorbar.ColorbarBase(ax2, cmap=cmap, format='%.1f')
ax2.set_ylabel('Output y', size=22)
plt.show()
if not filename is None:
plt.savefig(filename, format="pdf", bbox_inches="tight")
plt.close()
def plotForest(filename=None):
forest = RForest(modeltype='classification')
forest.fit('./result/result_synthetic2/forest/')
X = np.c_[np.kron(np.linspace(0, 1, 201), np.ones(201)), np.kron(np.ones(201), np.linspace(0, 1, 201))]
forest.plot(X, 0, 1, box0=np.array([[0.0, 0.0], [1.0, 1.0]]), filename=filename)
if __name__ == "__main__":
# setting
prefix = 'synthetic2'
seed = 0
num = 1000
dim = 2
# data - boundary
b = 0.9
t = np.linspace(0, 1, 101)
z = 0.25 + 0.5 / (1 + np.exp(- 20 * (t - 0.5))) + 0.05 * np.cos(t * 2 * np.pi)
# data - train
np.random.seed(seed)
Xtr = np.random.rand(num, dim)
ytr = np.zeros(num)
ytr = (Xtr[:, 1] > 0.25 + 0.5 / (1 + np.exp(- 20 * (Xtr[:, 0] - 0.5))) + 0.05 * np.cos(Xtr[:, 0] * 2 * np.pi))
ytr = np.logical_xor(ytr, np.random.rand(num) > b)
# data - test
Xte = np.random.rand(num, dim)
yte = np.zeros(num)
yte = (Xte[:, 1] > 0.25 + 0.5 / (1 + np.exp(- 20 * (Xte[:, 0] - 0.5))) + 0.05 * np.cos(Xte[:, 0] * 2 * np.pi))
yte = np.logical_xor(yte, np.random.rand(num) > b)
# save
dirname = './result/result_%s' % (prefix,)
if not os.path.exists('./result/'):
os.mkdir('./result/')
if not os.path.exists(dirname):
os.mkdir(dirname)
trfile = '%s/%s_train.csv' % (dirname, prefix)
tefile = '%s/%s_test.csv' % (dirname, prefix)
np.savetxt(trfile, np.c_[Xtr, ytr], delimiter=',')
np.savetxt(tefile, np.c_[Xte, yte], delimiter=',')
# demo_R
Kmax = 10
restart = 20
treenum = 100
M = range(1, 11)
#paper_sub.run(prefix, Kmax, restart, treenum=treenum, modeltype='classification', plot=True, plot_line=[[t, z]])
paper_sub.run(prefix, Kmax, restart, treenum=treenum, modeltype='classification', plot=True, plot_line=[[t, z]], M=M, compare=True)
# plot
plotTZ('%s/%s_true.pdf' % (dirname, prefix))
plotForest('%s/%s_rf_tree05_seed00.pdf' % (dirname, prefix))
| mit | -4,752,114,063,472,154,000 | 32.346535 | 135 | 0.571429 | false |
mbdriscoll/indigo | indigo/backends/backend.py | 1 | 25493 | import logging
import abc, time
import numpy as np
import scipy.sparse as spp
from contextlib import contextmanager
import indigo.operators as op
from indigo.util import profile
log = logging.getLogger(__name__)
class Backend(object):
"""
Provides the routines and data structures necessary to implement
a linear operator chain on different platforms.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, device_id=0):
profile._backend = self
class dndarray(object):
"""
N-dimensional array in device memory.
Parameters
----------
backend : indigo.backends.Backend
Backend instance.
shape : tuple
Array shape, a la numpy.
dtype : numpy.dtype
Datatype.
ld : tuple
Shape of array before slicing, used for ldb/ldc values.
own : bool
True if this object malloc'ed the underlying memory.
data : ?
Handle to underlying memory.
"""
__metaclass__ = abc.ABCMeta
_memory = dict()
def __init__(self, backend, shape, dtype,
ld=None, own=True, data=None, name=''):
assert isinstance(shape, (tuple,list))
self.dtype = dtype
self.shape = shape
self._backend = backend
self._leading_dim = ld or shape[0]
self._own = own
#assert isinstance(backend, Backend), (backend, type(backend))
if data is None:
self._arr = self._malloc(shape, dtype)
self._memory[ id(self._arr) ] = (name, shape, dtype)
else:
self._arr = data
def reshape(self, new_shape):
old_shape = self.shape
old_leading_dim = self._leading_dim
if (-1) in new_shape:
one = new_shape.index(-1)
new_size = -int(np.prod(new_shape))
old_size = self.size
factor = old_size // new_size
assert new_size * factor == old_size, \
"Cannot reshape {} into {}. (size mismatch)".format(old_shape, new_shape)
new_shape = list(new_shape)
new_shape[one] = factor
new_shape = tuple(new_shape)
if new_shape[0] > old_shape[0]:
contig = old_shape[0] == self._leading_dim
assert contig, "Cannot stack non-contiguous columns."
assert np.prod(new_shape) == self.size
# min for Kron -- make new lda
# max for VStack -- preserve original lda
#new_leading_dim = min(new_shape[0], old_leading_dim) # FIXME: need consistent semantics for reshape
#new_leading_dim = old_leading_dim # works with VStack
#new_leading_dim = new_shape[0] # works with Kron
if new_shape[0] < old_shape[0]:
#assert self.contiguous, "Cannot stack vectors of non-contiguous matrix."
new_leading_dim = new_shape[0]
else:
new_leading_dim = old_leading_dim
return self._backend.dndarray( self._backend,
new_shape, dtype=self.dtype, ld=new_leading_dim, own=False, data=self._arr)
@property
def size(self):
return np.prod(self.shape)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def nbytes(self):
return self.size * np.dtype(self.dtype).itemsize
@property
def ndim(self):
return len(self.shape)
@property
def contiguous(self):
if self.ndim == 1:
return True
else:
return self._leading_dim == self.shape[0]
def copy_from(self, arr):
''' copy from device when both arrays exist '''
assert isinstance(arr, np.ndarray)
if self.size != arr.size:
raise ValueError("size mismatch, expected {} got {}" \
.format(self.shape, arr.shape))
if self.dtype != arr.dtype:
raise TypeError("dtype mismatch, expected {} got {}" \
.format(self.dtype, arr.dtype))
if not arr.flags['F_CONTIGUOUS']:
raise TypeError("order mismatch, expected 'F' got {}" \
.format(arr.flags['F_CONTIGUOUS']))
self._copy_from(arr)
def copy_to(self, arr):
''' copy to device when both arrays exist '''
assert isinstance(arr, np.ndarray)
if self.size != arr.size:
raise ValueError("size mismatch, expected {} got {}" \
.format(self.shape, arr.shape))
if self.dtype != arr.dtype:
raise TypeError("dtype mismatch, expected {} got {}" \
.format(self.dtype, arr.dtype))
self._copy_to(arr)
def to_host(self):
''' copy from device when host array doesnt exist '''
arr = np.ndarray(self.shape, self.dtype, order='F')
self.copy_to(arr)
return arr
@contextmanager
def on_host(self):
arr_h = self.to_host()
yield arr_h
self.copy_from(arr_h)
def copy(self, other=None, name=''):
''' copy array on device'''
if other:
assert isinstance(other, self._backend.dndarray)
self._copy(other)
else:
other = self._backend.zero_array(self.shape, self.dtype, name=name)
other._copy(self)
return other
@classmethod
def to_device(cls, backend, arr, name=''):
''' copy to device when device array doesnt exist '''
arr_f = np.require(arr, requirements='F')
d_arr = cls(backend, arr.shape, arr.dtype, name=name)
d_arr.copy_from(arr_f)
return d_arr
def __del__(self):
""" destructor """
if self._own and hasattr(self, '_arr'):
self._memory.pop( id(self._arr) )
self._free()
def __setitem__(self, slc, other):
#FIXME don't ignore slc
assert not(slc.start or slc.stop), "dndarray setitem cant slice"
self._copy(other)
@abc.abstractmethod
def __getitem__(self, slc):
"""
Slice notation. Slices must be contiguous in memory. Returns a view.
"""
raise NotImplementedError()
@abc.abstractmethod
def _copy_from(self, arr):
""" copy HtoD implementation """
raise NotImplementedError()
@abc.abstractmethod
def _copy_to(self, arr):
""" copy DtoH implementation """
raise NotImplementedError()
@abc.abstractmethod
def _copy(self, arr):
""" copy DtoD implementation """
raise NotImplementedError()
@abc.abstractmethod
def _malloc(self, shape, dtype):
""" malloc implementation """
raise NotImplementedError()
@abc.abstractmethod
def _free(self):
""" malloc implementation """
raise NotImplementedError()
@abc.abstractmethod
def _zero(self):
""" set to zero """
raise NotImplementedError()
@staticmethod
def from_param(obj):
""" convert _arr into ctypes object """
raise NotImplementedError()
def copy_array(self, arr, name=''):
return self.dndarray.to_device(self, arr, name=name)
def zero_array(self, shape, dtype, name=''):
d_arr = self.empty_array(shape, dtype, name=name)
d_arr._zero()
return d_arr
def zeros_like(self, other, name=''):
return self.zero_array(other.shape, other.dtype, name=name)
def empty_array(self, shape, dtype, name=''):
d_arr = self.dndarray(self, shape, dtype, name=name)
return d_arr
def rand_array(self, shape, dtype=np.dtype('complex64'), name=''):
x = np.random.random(shape) + 1j*np.random.random(shape)
x = np.require(x, dtype=np.dtype('complex64'), requirements='F')
x_d = self.copy_array(x, name=name)
return x_d
def get_max_threads(self):
return 1
def barrier(self):
pass
def mem_usage(self):
nbytes = 0
log.info("Memory report:")
table = []
for name, shape, dtype in self.dndarray._memory.values():
n = np.prod(shape) * dtype.itemsize
table.append( (name, n, shape, dtype) )
nbytes += n
for name, n, shape, dtype in sorted(table, key=lambda tup: tup[1]):
if n > 1e6:
log.info(" %40s: % 3.0f MB, %20s, %15s", name, n/1e6, shape, dtype)
return nbytes
@contextmanager
def scratch(self, shape=None, nbytes=None):
assert not (shape is not None and nbytes is not None), \
"Specify either shape or nbytes to backend.scratch()."
if nbytes is not None:
shape = (nbytes//np.dtype('complex64').itemsize,)
size = np.prod(shape)
if hasattr(self, '_scratch'):
pos = self._scratch_pos
total = self._scratch.size
assert pos + size <= total, "Not enough scratch memory (wanted %d MB, but only have %d MB available of %d MB total)." % (size/1e6, (total-pos)/1e6, total/1e6)
mem = self._scratch[pos:pos+size].reshape(shape)
self._scratch_pos += size
yield mem
self._scratch_pos -= size
else:
log.debug("dynamically allocating scratch space in shape %s", shape)
mem = self.zero_array(shape, dtype=np.complex64)
yield mem
del mem
# -----------------------------------------------------------------------
# Operator Building Interface
# -----------------------------------------------------------------------
def SpMatrix(self, M, **kwargs):
""" A := M """
assert isinstance(M, spp.spmatrix)
return op.SpMatrix(self, M, **kwargs)
def DenseMatrix(self, M, **kwargs):
""" A := M """
assert isinstance(M, np.ndarray)
assert M.ndim == 2
return op.DenseMatrix(self, M, **kwargs)
def Diag(self, v, **kwargs):
""" A := diag(v) """
v = np.require(v, requirements='F')
if v.ndim > 1:
v = v.flatten(order='A')
dtype = kwargs.get('dtype', np.dtype('complex64'))
M = spp.diags( v, offsets=0 ).astype(dtype)
return self.SpMatrix(M, **kwargs)
def Adjoint(self, A, **kwargs):
""" C := A^H """
return op.Adjoint(self, A, **kwargs)
def KronI(self, c, B, **kwargs):
""" C := I_c (KRON) B """
I = self.Eye(c)
return op.Kron(self, I, B, **kwargs)
def Kron(self, A, B, **kwargs):
""" C := A (KRON) B """
return op.Kron(self, A, B, **kwargs)
def BlockDiag(self, Ms, **kwargs):
return op.BlockDiag(self, *Ms, **kwargs)
def VStack(self, Ms, **kwargs):
return op.VStack(self, *Ms, **kwargs)
def HStack (self, Ms, **kwargs):
return op.HStack(self, *Ms, **kwargs)
def UnscaledFFT(self, shape, dtype, **kwargs):
""" A := FFT{ . } """
return op.UnscaledFFT(self, shape, dtype, **kwargs)
def Eye(self, n, dtype=np.dtype('complex64'), **kwargs):
""" A := I_n """
return op.Eye(self, n, dtype=dtype, **kwargs)
def One(self, shape, dtype=np.dtype('complex64'), **kwargs):
""" A := [1] (matrix of ones) """
return op.One(self, shape, dtype=dtype, **kwargs)
def CopyIn(self, shape, dtype, **kwargs):
return op.CopyIn(self, shape, dtype)
def CopyOut(self, shape, dtype, **kwargs):
return op.CopyOut(self, shape, dtype)
def FFT(self, shape, dtype, **kwargs):
""" Unitary FFT """
n = np.prod(shape)
s = np.ones(n, order='F', dtype=dtype) / np.sqrt(n)
S = self.Diag(s, name='scale')
F = self.UnscaledFFT(shape, dtype, **kwargs)
return S*F
def FFTc(self, ft_shape, dtype, normalize=True, **kwargs):
""" Centered, Unitary FFT """
mod_slice = [ slice(d) for d in ft_shape ]
idx = np.mgrid[mod_slice]
mod = 0
for i in range(len(ft_shape)):
c = ft_shape[i] // 2
mod += (idx[i] - c / 2.0) * (c / ft_shape[i])
mod = np.exp(1j * 2.0 * np.pi * mod).astype(dtype)
M = self.Diag(mod, name='mod')
if normalize:
F = self.FFT(ft_shape, dtype=dtype, **kwargs)
else:
F = self.UnscaledFFT(ft_shape, dtype=dtype, **kwargs)
return M*F*M
def Zpad(self, M, N, mode='center', dtype=np.dtype('complex64'), **kwargs):
slc = []
if mode == 'center':
for m, n in zip(M, N):
slc += [slice(m // 2 + int(np.ceil(-n / 2)),
m // 2 + int(np.ceil( n / 2))), ]
elif mode == 'edge':
for m, n in zip(M, N):
slc.append(slice(n))
pass
x = np.arange( np.prod(M), dtype=int ).reshape(M, order='F')
rows = x[slc].flatten(order='F')
cols = np.arange(rows.size)
ones = np.ones_like(cols)
shape = np.prod(M), np.prod(N)
M = spp.coo_matrix( (ones, (rows,cols)), shape=shape, dtype=dtype )
return self.SpMatrix(M, **kwargs)
def Crop(self, M, N, dtype=np.dtype('complex64'), **kwargs):
return self.Zpad(N, M, dtype=dtype, **kwargs).H
def Interp(self, N, coord, width, table, dtype=np.dtype('complex64'), **kwargs):
assert len(N) == 3
ndim = coord.shape[0]
npts = np.prod( coord.shape[1:] )
coord = coord.reshape((ndim,-1), order='F')
from indigo.interp import interp_mat
M = interp_mat(npts, N, width, table, coord, 1).astype(dtype)
return self.SpMatrix(M, **kwargs)
def NUFFT(self, M, N, coord, width=3, n=128, oversamp=None, dtype=np.dtype('complex64'), **kwargs):
assert len(M) == 3
assert len(N) == 3
assert M[1:] == coord.shape[1:]
# target 448 x 270 x 640
# 448 x 270 x 640 mkl-batch: 170.83 ms, 237.51 gflop/s back-to-back: 121.76 ms, 333.23 gflop/s
# 1.45 1.30 1.33
# 432 x 280 x 640 mkl-batch: 183.85 ms 220.7 gflop/s back-to-back: 149.62 ms 271.19 gflop/s
# 1.40 1.35 1.33
# 432 x 270 x 640 mkl-batch: 168.62 ms 231.57 gflop/s back-to-back: 118.31 ms 330.05 gflop/s
# 1.40 1.30 1.33
if isinstance(oversamp, tuple):
omin = min(oversamp)
else:
omin = oversamp
oversamp = (omin, omin, omin)
import scipy.signal as signal
from indigo.noncart import rolloff3
ndim = coord.shape[0]
npts = np.prod( coord.shape[1:] )
oN = list(N)
for i in range(3):
oN[i] *= oversamp[i]
oN = tuple(int(on) for on in oN)
Z = self.Zpad(oN, N, dtype=dtype, name='zpad')
F = self.FFTc(oN, dtype=dtype, name='fft')
beta = np.pi * np.sqrt(((width * 2. / omin) * (omin- 0.5)) ** 2 - 0.8)
kb = signal.kaiser(2 * n + 1, beta)[n:]
G = self.Interp(oN, coord, width, kb, dtype=np.float32, name='interp')
r = rolloff3(omin, width, beta, N)
R = self.Diag(r, name='apod')
return G*F*Z*R
def Convolution(self, kernel, normalize=True, name='noname'):
F = self.FFTc(kernel.shape, name='%s.convF' % name, normalize=normalize, dtype=np.complex64)
K = self.Diag(F * kernel, name='%s.convK' % name)
I = self.Eye(F.shape[0])
return F.H * K * F
# -----------------------------------------------------------------------
# BLAS Routines
# -----------------------------------------------------------------------
def axpby(self, beta, y, alpha, x):
""" y = beta * y + alpha * x """
raise NotImplementedError()
def dot(self, x, y):
""" returns x^T * y """
raise NotImplementedError()
def norm2(self, x):
""" returns ||x||_2"""
raise NotImplementedError()
def scale(self, x, alpha):
""" x *= alpha """
raise NotImplementedError()
def pdot(self, x, y, comm):
xHy = self.dot(x, y)
if comm is not None:
xHy = comm.allreduce( xHy )
return xHy
def pnorm2(self, x, comm):
xTx = self.norm2(x)
if comm is not None:
xTx = comm.allreduce( xTx )
return xTx
def cgemm(self, y, M, x, alpha, beta, forward):
"""
Peform a dense matrix-matrix multiplication.
"""
raise NotImplementedError()
def csymm(self, y, M, x, alpha, beta, left=True):
"""
Peform a symmetric dense matrix-matrix multiplication for real symmetric matrices.
"""
raise NotImplementedError()
# -----------------------------------------------------------------------
# FFT Routines
# -----------------------------------------------------------------------
@abc.abstractmethod
def fftn(self, y, x):
"""
Peform an unscaled multidimensional forward FFT on x.
"""
raise NotImplementedError()
@abc.abstractmethod
def ifftn(self, y, x):
"""
Peform an unscaled multidimensional inverse FFT on x.
"""
raise NotImplementedError()
def _fft_workspace_size(self, x_shape):
return 0
@abc.abstractmethod
def ccsrmm(self, y, A_shape, A_indx, A_ptr, A_vals, x, alpha=1, beta=0, adjoint=False, exwrite=False):
"""
Computes Y[:] = A * X.
"""
raise NotImplementedError()
@abc.abstractmethod
def cdiamm(self, y, shape, offsets, data, x, alpha=1.0, beta=0.0, adjoint=True):
"""
Computes Y[:] = A * X.
"""
raise NotImplementedError()
@abc.abstractmethod
def onemm(self, y, x, alpha=1, beta=0):
"""
Computes Y[:] = beta * Y + alpha * [1] * X.
"""
raise NotImplementedError()
class csr_matrix(object):
"""
A device-resident sparse matrix in CSR format.
"""
_index_base = 0
def __init__(self, backend, A, name='mat'):
"""
Create a matrix from the given `scipy.sparse.sppmatrix`.
"""
if not isinstance(A, spp.csr_matrix):
A = A.tocsr()
A = self._type_correct(A)
self._backend = backend
self.rowPtrs = backend.copy_array(A.indptr + self._index_base, name=name+".rowPtrs")
self.colInds = backend.copy_array(A.indices + self._index_base, name=name+".colInds")
self.values = backend.copy_array(A.data, name=name+".data")
self.shape = A.shape
self.dtype = A.dtype
# fraction of nonzero rows/columns
try:
from indigo.backends._customcpu import inspect
nzrow, nzcol, self._exwrite = inspect(A.shape[0], A.shape[1], A.indices, A.indptr)
self._row_frac = nzrow / A.shape[0]
self._col_frac = nzcol / A.shape[1]
log.debug("matrix %s has %2d%% nonzero rows and %2d%% nonzero columns",
name, 100*self._row_frac, 100*self._col_frac)
log.debug("matrix %s supports exwrite: %s", name, self._exwrite)
except ImportError:
self._row_frac = 1.0
self._col_frac = 1.0
log.debug("skipping exwrite inspection. Is CustomCPU backend available?")
def forward(self, y, x, alpha=1, beta=0):
""" y[:] = A * x """
assert x.dtype == np.dtype("complex64"), "Bad dtype: expected compelx64, got %s" % x.dtype
assert y.dtype == np.dtype("complex64"), "Bad dtype: expected compelx64, got %s" % y.dtype
assert self.values.dtype == np.dtype("complex64")
self._backend.ccsrmm(y,
self.shape, self.colInds, self.rowPtrs, self.values,
x, alpha=alpha, beta=beta, adjoint=False, exwrite=True)
def adjoint(self, y, x, alpha=1, beta=0):
""" y[:] = A.H * x """
assert x.dtype == np.dtype("complex64"), "Bad dtype: expected compelx64, got %s" % x.dtype
assert y.dtype == np.dtype("complex64"), "Bad dtype: expected compelx64, got %s" % y.dtype
assert self.values.dtype == np.dtype("complex64")
self._backend.ccsrmm(y,
self.shape, self.colInds, self.rowPtrs, self.values,
x, alpha=alpha, beta=beta, adjoint=True, exwrite=self._exwrite)
@property
def nbytes(self):
return self.rowPtrs.nbytes + self.colInds.nbytes + self.values.nbytes
@property
def nnz(self):
return self.values.size
def _type_correct(self, A):
return A.astype(np.complex64)
class dia_matrix(object):
"""
A device-resident sparse matrix in DIA format.
"""
def __init__(self, backend, A, name='mat'):
"""
Create a matrix from the given `scipy.sparse.sppmatrix`.
"""
assert isinstance(A, spp.dia_matrix)
A = A.astype(np.complex64)
self._backend = backend
self.data = backend.copy_array(A.data.T, name=name+".data")
self.offsets = backend.copy_array(A.offsets, name=name+".data")
self.shape = A.shape
self.dtype = A.dtype
self._row_frac = 1
self._col_frac = 1
def forward(self, y, x, alpha=1, beta=0):
""" y[:] = A * x """
self._backend.cdiamm(y, self.shape, self.offsets, self.data,
x, alpha=alpha, beta=beta, adjoint=False)
def adjoint(self, y, x, alpha=1, beta=0):
""" y[:] = A.H * x """
self._backend.cdiamm(y, self.shape, self.offsets, self.data,
x, alpha=alpha, beta=beta, adjoint=True)
@property
def nbytes(self):
return self.offsets.nbytes + self.data.nbytes
@property
def nnz(self):
return self.data.size
# -----------------------------------------------------------------------
# Algorithms
# -----------------------------------------------------------------------
def cg(self, A, b_h, x_h, lamda=0.0, tol=1e-10, maxiter=100, team=None):
"""
Conjugate gradient. Solves for A x = b, where A is positive semi-definite.
Parameters
----------
A : function to perform A(x)
y : 1D array
x : 1D array, initial solution
maxiter : int, optional
{IterPrint, IterPlot, IterWrite, IterCompare}
"""
x = self.copy_array( x_h, name='x' )
b = self.copy_array( b_h, name='b' )
Ap = x.copy()
# r = b - A(x) - lamda * x
r = b
A.eval(Ap, x)
self.axpby(1, r, -1, Ap)
self.axpby(1, r, -lamda, x)
p = r.copy(name='p')
rr = self.pnorm2(r, team)
r0 = rr
for it in range(maxiter):
profile.extra['it'] = it
with profile("iter"):
A.eval(Ap, p)
self.axpby(1, Ap, lamda, p)
alpha = rr / self.pdot(p, Ap, team)
self.axpby(1, x, alpha, p)
self.axpby(1, r, -alpha, Ap)
r2 = self.pnorm2(r, team)
beta = r2 / rr
self.scale(p, beta)
self.axpby(1, p, 1, r)
rr = r2
resid = np.sqrt(rr / r0)
log.info("iter %d, residual %g", it, resid.real)
if resid < tol:
log.info("cg reached tolerance")
break
else:
log.info("cg reached maxiter")
x.copy_to(x_h)
def apgd(self, gradf, proxg, alpha, x_h, maxiter=100, team=None):
'''Accelerated proximal gradient descent.
Solves for min_x f(x) + g(x)
Parameters
----------
gradf : Gradient of f
proxg : Proximal of g
alpha : Step size
x0 : 1D array, initial solution
maxiter : int, optional
'''
x_k = self.copy_array(x_h)
y_k = x_k.copy()
y_k1 = x_k.copy()
x_k1 = x_k.copy()
gf = x_k.copy()
t_k = 1
for it in range(1,maxiter+1):
profile.extra['it'] = it
with profile("iter"):
gradf(gf, y_k)
self.axpby(1, x_k, -alpha, gf)
proxg(x_k, alpha)
t_k1 = (1.0 + np.sqrt(1.0 + 4.0 * t_k**2)) / 2.0
t_ratio = (t_k - 1) / t_k1
self.axpby(0, y_k1, 1+t_ratio, x_k)
self.axpby(1, y_k1, -t_ratio, x_k1)
x_k1.copy(x_k)
y_k.copy(y_k1)
log.info("iter %d", it)
x_k.copy_to(x_h)
def max(self, val, arr):
""" Computes elementwise maximum: arr[:] = max(arr, val). """
raise NotImplementedError()
| bsd-3-clause | -2,364,064,529,511,427,600 | 33.637228 | 170 | 0.506453 | false |
yaricom/brainhash | src/experiment_cA5_10_dt_th_al_ah_bl_bh.py | 1 | 2063 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The experiment with 10 Hz/5Hz, wisp, attention, 70, cA 5, delta, theta, alpha low, alpha high, beta low, beta high, batch size = 10 and
balanced data set
@author: yaric
"""
import experiment as ex
import config
from time import time
n_hidden = 5
batch_size = 10
experiment_name = 'cA_%d_%d_dt-th-a_l-a_h-b_l-b_h' % (n_hidden, batch_size) # will be used as parent dir for analyzer results
# The sample records identifiers
signal_ids = ['IO_10_2', 'IO_TXT', 'IO_SKY', 'KS_10_2', 'RO_10_2']
noise_ids = ['noise']
# Setup analyzer configuration
analyzer_config = ex.defaultAnalyzerConfig()
analyzer_config['batch_size'] = batch_size
analyzer_config['learning_rate'] = 0.1
analyzer_config['n_hidden'] = n_hidden
analyzer_config['training_epochs'] = 50000
analyzer_config['encoder'] = 'cA'
analyzer_config['bands'] = 'delta,theta,alpha_l,alpha_h,beta_l,beta_h'
start = time()
#
# Run analyzer
#
print("\nStart analysis with parameters:\n%s\n" % analyzer_config)
print("Start analysis for signal records: %s" % signal_ids)
ex.runEEGAnalyzerWithIDs(ids_list=signal_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
print("Start analysis for noise records: %s" % noise_ids)
ex.runEEGAnalyzerWithIDs(ids_list=noise_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
#
# Run classifiers
#
signal_dir = "%s/%s" % (config.analyzer_out_dir, experiment_name)
noise_dir = "%s/%s/%s" % (config.analyzer_out_dir, experiment_name, noise_ids[0])
out_suffix = experiment_name
print("Run classifiers over analyzed records. \nSignal dir: %s\nNoise dir: %s"
% (signal_dir, noise_dir))
ex.runClassifier(signal_dir=signal_dir,
signal_records=signal_ids,
noise_dir=noise_dir,
out_suffix=out_suffix)
print("\n\nExperiment %s took %.2f seconds.\n"
% (experiment_name, time() - start))
| gpl-3.0 | -1,605,927,457,288,513,500 | 31.746032 | 136 | 0.641299 | false |
Shiimoe/Pimbot | bot.py | 1 | 2261 | import discord
import asyncio
from numpy import random
client = discord.Client()
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.event
async def on_message(message):
if message.content.startswith('erp with me pimbot'):
await client.send_message(message.channel, '*rubs your milky thighs*')
if message.content.startswith('l-lewd'):
await client.send_message(message.channel, "I'll show you lewd! *sticks finger up butte*")
if message.content.startswith('bully me pimbot'):
await client.send_message(message.channel, "I bet that would turn you on wouldn't it you fairy")
if message.content.startswith('tell me about the jews pimbot'):
await client.send_message(message.channel, '(((they))) are putting oestrogen in the water turning the frogs and our sons gay!')
#if message.content.startswith('!best'):
#myid = '<@201909896357216256>'
#await client.send_message(message.channel, ' : %s is the best ' % myid)
#if message.content.startswith('hello <@343915078074236929>'):
#myid = '<@343915078074236929>'
#await client.send_message(message.channel, myid + ' says hello')
if message.content.startswith('<@343915078074236929>'):
await client.send_message(message.channel, 'h-huh')
if message.content.startswith('tfw no bf'):
await client.send_message(message.channel, "I'll be your bf, i-if you want")
if message.content.startswith('fuck you pimbot'):
await client.send_message(message.channel, 'no u')
if message.content.startswith('nini~'):
await client.send_message(message.channel, 'goodnight qt')
if message.content.startswith('no u'):
await client.send_message(message.channel, 'no me')
if message.content.startswith('bye bye pimbot'):
await client.send_message(message.channel, 'bye bye~')
if message.content.startswith('~github'):
await client.send_message(message.channel, 'fork me or contribute to my development on github: \nhttps://github.com/Shiimoe/Pimbot')
if message.content.startswith(message.content[0] + "-" + message.content[0]):
await client.send_message(message.channel, 'Stop stuttering you gay cunt')
client.run('MzQzOTE1MDc4MDc0MjM2OTI5.DGlHtA.50snhJlQlLsEmm69zh-v8zcKs5Y')
| gpl-3.0 | 2,294,426,036,851,443,000 | 40.109091 | 134 | 0.733304 | false |
thomasorb/orb | orb/utils/fft.py | 1 | 13896 | #!/usr/bin/python
# *-* coding: utf-8 *-*
# Author: Thomas Martin <[email protected]>
# File: fft.py
## Copyright (c) 2010-2020 Thomas Martin <[email protected]>
##
## This file is part of ORB
##
## ORB is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## ORB is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ORB. If not, see <http://www.gnu.org/licenses/>.
import logging
import time
import sys
import numpy as np
import math
import warnings
import scipy
import scipy.special as ss
from scipy import signal, interpolate, optimize
import gvar
import orb.utils.vector
import orb.utils.spectrum
import orb.utils.stats
import orb.utils.filters
import orb.cutils
import orb.constants
def mod2pi(a):
"""Return the smallest signed modulo 2 pi of any angle in radians
"""
return np.arctan2(np.sin(a), np.cos(a))
def clean_phase(ph):
"""Return a cleaned phase vector (which does not depend on an arbitrary modulo pi)
"""
ph = orb.utils.vector.robust_unwrap(np.copy(ph), 2*np.pi)
if np.any(np.isnan(ph)):
ph.fill(np.nan)
else:
# set the first sample at the smallest positive modulo pi
# value (order 0 is modulo pi)
new_orig = np.fmod(ph[0], np.pi)
while new_orig < 0:
new_orig += np.pi
if np.abs(new_orig) > np.abs(new_orig - np.pi):
new_orig -= np.pi
elif np.abs(new_orig) > np.abs(new_orig + np.pi):
new_orig += np.pi
ph -= ph[0]
ph += new_orig
return ph
def next_power_of_two(n):
"""Return the next power of two greater than n.
:param n: The number from which the next power of two has to be
computed. Can be an array of numbers.
"""
return np.array(2.**np.ceil(np.log2(n))).astype(int)
def raw_fft(x, apod=None, inverse=False, return_complex=False,
return_phase=False):
"""
Compute the raw FFT of a vector.
Return the absolute value of the complex vector by default.
:param x: Interferogram.
:param apod: (Optional) Apodization function used. See
:py:meth:`utils.norton_beer_window` (default None)
:param inverse: (Optional) If True compute the inverse FFT
(default False).
:param return_complex: (Optional) If True, the complex vector is
returned (default False).
:param return_phase: (Optional) If True, the phase is
returned.(default False)
"""
x = np.copy(x)
windows = ['1.1', '1.2', '1.3', '1.4', '1.5',
'1.6', '1.7', '1.8', '1.9', '2.0']
N = x.shape[0]
# mean substraction
x -= np.mean(x)
# apodization
if apod in windows:
x *= gaussian_window(apod, N)
elif apod is not None:
raise Exception("Unknown apodization function try %s"%
str(windows))
# zero padding
zv = np.zeros(N*2, dtype=x.dtype)
zv[int(N/2):int(N/2)+N] = x
# zero the centerburst
zv = np.roll(zv, zv.shape[0]/2)
# FFT
if not inverse:
x_fft = (np.fft.fft(zv))[:N]
else:
x_fft = (np.fft.ifft(zv))[:N]
if return_complex:
return x_fft
elif return_phase:
return np.unwrap(np.angle(x_fft))
else:
return np.abs(x_fft)
def cube_raw_fft(x, apod=None):
"""Compute the raw FFT of a cube (the last axis
beeing the interferogram axis)
:param x: Interferogram cube
:param apod: (Optional) Apodization function used. See
:py:meth:`utils.gaussian_window` (default None)
"""
x = np.copy(x)
N = x.shape[-1]
# mean substraction
x = (x.T - np.mean(x, axis=-1)).T
# apodization
if apod is not None:
x *= gaussian_window(apod, N)
# zero padding
zv_shape = np.array(x.shape)
zv_shape[-1] = N*2
zv = np.zeros(zv_shape)
zv[:,int(N/2):int(N/2)+N] = x
# FFT
return np.abs((np.fft.fft(zv))[::,:N])
def norton_beer_window(fwhm='1.6', n=1000):
"""
Return an extended Norton-Beer window function (see [NAY2007]_).
Returned window is symmetrical.
:param fwhm: FWHM relative to the sinc function. Must be: 1.1,
1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9 or 2.0. (default '1.6')
:param n: Number of points (default 1000)
.. note:: Coefficients of the extended Norton-Beer functions
apodizing functions [NAY2007]_ :
==== ======== ========= ======== ======== ======== ========
FWHM C0 C1 C2 C4 C6 C8
---- -------- --------- -------- -------- -------- --------
1.1 0.701551 -0.639244 0.937693 0.000000 0.000000 0.000000
1.2 0.396430 -0.150902 0.754472 0.000000 0.000000 0.000000
1.3 0.237413 -0.065285 0.827872 0.000000 0.000000 0.000000
1.4 0.153945 -0.141765 0.987820 0.000000 0.000000 0.000000
1.5 0.077112 0.000000 0.703371 0.219517 0.000000 0.000000
1.6 0.039234 0.000000 0.630268 0.234934 0.095563 0.000000
1.7 0.020078 0.000000 0.480667 0.386409 0.112845 0.000000
1.8 0.010172 0.000000 0.344429 0.451817 0.193580 0.000000
1.9 0.004773 0.000000 0.232473 0.464562 0.298191 0.000000
2.0 0.002267 0.000000 0.140412 0.487172 0.256200 0.113948
==== ======== ========= ======== ======== ======== ========
.. [NAY2007] Naylor, D. A., & Tahic, M. K. (2007). Apodizing
functions for Fourier transform spectroscopy. Journal of the
Optical Society of America A.
"""
norton_beer_coeffs = [
[1.1, 0.701551, -0.639244, 0.937693, 0., 0., 0., 0., 0., 0.],
[1.2, 0.396430, -0.150902, 0.754472, 0., 0., 0., 0., 0., 0.],
[1.3, 0.237413, -0.065285, 0.827872, 0., 0., 0., 0., 0., 0.],
[1.4, 0.153945, -0.141765, 0.987820, 0., 0., 0., 0., 0., 0.],
[1.5, 0.077112, 0., 0.703371, 0., 0.219517, 0., 0., 0., 0.],
[1.6, 0.039234, 0., 0.630268, 0., 0.234934, 0., 0.095563, 0., 0.],
[1.7, 0.020078, 0., 0.480667, 0., 0.386409, 0., 0.112845, 0., 0.],
[1.8, 0.010172, 0., 0.344429, 0., 0.451817, 0., 0.193580, 0., 0.],
[1.9, 0.004773, 0., 0.232473, 0., 0.464562, 0., 0.298191, 0., 0.],
[2.0, 0.002267, 0., 0.140412, 0., 0.487172, 0., 0.256200, 0., 0.113948]]
fwhm_list = ['1.1', '1.2', '1.3', '1.4', '1.5',
'1.6', '1.7', '1.8', '1.9', '2.0']
if fwhm in fwhm_list:
fwhm_index = fwhm_list.index(fwhm)
else:
raise Exception("Bad extended Norton-Beer window FWHM. Must be in : " + str(fwhm_list))
x = np.linspace(-1., 1., n)
nb = np.zeros_like(x)
for index in range(9):
nb += norton_beer_coeffs[fwhm_index][index+1]*(1. - x**2)**index
return nb
def apod2width(apod):
"""Return the width of the gaussian window for a given apodization level.
:param apod: Apodization level (must be >= 1.)
The apodization level is the broadening factor of the line (an
apodization level of 2 mean that the line fwhm will be 2 times
wider).
"""
if apod < 1.: raise Exception(
'Apodization level (broadening factor) must be > 1')
return apod - 1. + (gvar.erf(math.pi / 2. * gvar.sqrt(apod - 1.))
* orb.constants.FWHM_SINC_COEFF)
def width2apod(width):
"""This is the inverse of apod2width.
As the inverse is at least complicated to compute. This is done via
minimization.
"""
def diff(apod, width):
return apod2width(apod) - width
if width < 0: raise ValueError('width must be a positive float')
fit = optimize.least_squares(diff, 1, args=(width, ))
if fit.success:
return fit.x[0]
else:
raise Exception('error when inverting apod2width: {}'.format(fit.message))
def apod2sigma(apod, fwhm):
"""Return the broadening of the gaussian-sinc function in the
spectrum for a given apodization level. Unit is that of the fwhm.
:param apod: Apodization level (must be >= 1.)
"""
broadening = 2. * (apod2width(apod) / (math.sqrt(2.) * math.pi)
/ orb.utils.spectrum.compute_line_fwhm_pix(
oversampling_ratio=1))
return broadening * fwhm
def sigma2apod(sigma, fwhm):
"""This is the inverse of apod2sigma.
As the inverse is at least complicated to compute. This is done via
minimization.
"""
def diff(apod, sigma, fwhm):
return apod2sigma(apod, fwhm) - sigma
if sigma < 0: raise ValueError('sigma must be a positive float')
if fwhm <= 0: raise ValueError('fwhm must be a strictly positive float')
fit = optimize.least_squares(diff, 1, args=(sigma, fwhm))
if fit.success:
return fit.x[0]
else:
raise Exception('error when inverting apod2sigma: {}'.format(fit.message))
def gaussian_window(coeff, x):
"""Return a Gaussian apodization function for a given broadening
factor.
:param coeff: FWHM relative to the sinc function. Must be a float > 1.
:param x: Must be an axis defined between -1 and 1 inclusively.
x = np.linspace(-1., 1., n) for a symmetrical window.
"""
coeff = float(coeff)
#x = np.linspace(-1., 1., n)
w = apod2width(coeff)
return np.exp(-x**2 * w**2)
def learner95_window(x):
"""Return the apodization function described in Learner et al.,
J. Opt. Soc. Am. A, 12, (1995).
This function is closely related to the minimum four-term
Blackman-Harris window.
:param x: Must be an axis defnined between -1 and 1 inclusively.
x = np.linspace(-1., 1., n) for a symmetrical window.
"""
#
return (0.355766
+ 0.487395 * np.cos(math.pi*x)
+ 0.144234 * np.cos(2.*math.pi*x)
+ 0.012605 * np.cos(3.*math.pi*x))
def border_cut_window(n, coeff=0.2):
"""Return a window function with only the edges cut by a nice
gaussian shape function.
:param n: Window length
:param coeff: Border size in percentage of the total length.
"""
window = np.ones(n)
border_length = int(float(n)*coeff)
if border_length <= 1:
window[0] = 0.
window[-1] = 0.
else:
borders = signal.get_window(("gaussian",border_length/3.),
border_length*2+1)
z = int(float(borders.shape[0])/2.)
window[:z] = borders[:z]
window[-z:] = borders[-z:]
return window
def ndft(a, xk, vj):
"""Non-uniform Discret Fourier Tranform
Compute the spectrum from an interferogram. Note that the axis can
be irregularly sampled.
If the spectral axis (output axis) is irregular the result is
exact. But there is no magic: if the input axis (interferogram
sampling) is irregular the output spectrum is not exact because
the projection basis is not orthonormal.
If the interferogram is the addition of multiple regularly sampled
scans with a opd shift between each scan, the result will be good
as long as there are not too much scans added one after the
other. But if the interferogram steps are randomly distributed, it
will be better to use a classic FFT because the resulting noise
will be much lower.
:param a: 1D interferogram
:param xk: 1D sampling steps of the interferogram. Must have the
same size as a and must be relative to the real step length,
i.e. if the sampling is uniform xk = np.arange(a.size).
:param vj: 1D frequency sampling of the output spectrum.
"""
assert a.ndim == 1, 'a must be a 1d vector'
assert vj.ndim == 1, 'vj must be a 1d vector'
assert a.size == xk.size, 'size of a must equal size of xk'
angle = np.inner((-2.j * np.pi * xk / xk.size)[:,None], vj[:,None])
return np.dot(a, np.exp(angle))
def indft(a, x):
"""Inverse Non-uniform Discret Fourier Transform.
Compute the irregularly sampled interferogram from a regularly
sampled spectrum.
:param a: regularly sampled spectrum.
:param x: positions of the interferogram samples. If x =
range(size(a)), this function is equivalent to an idft or a
ifft. Note that the ifft is of course much faster to
compute. This vector may have any length.
"""
return orb.cutils.indft(a.astype(float), x.astype(float))
def spectrum_mean_energy(spectrum):
"""Return the mean energy of a spectrum by channel.
:param spectrum: a 1D spectrum
"""
return orb.cutils.spectrum_mean_energy(spectrum)
def interf_mean_energy(interf):
"""Return the mean energy of an interferogram by step.
:param interf: an interferogram
.. warning:: The mean of the interferogram is substracted to
compute only the modulation energy. This is the modulation
energy which must be conserved in the resulting spectrum. Note
that the interferogram transformation function (see
:py:meth:`utils.transform_interferogram`) remove the mean of the
interferogram before computing its FFT.
.. note:: NaNs are set to 0.
"""
return orb.cutils.interf_mean_energy(interf)
def phase_model(sigma, sigmaref, p):
"""Phase model
A simple polynomial. Defining a reference wavenumber in the given
axis is important since, if a 0 is not in the axis, the polynomial
fitting is unstable. This reference is defined in the filterfile.
"""
return np.polynomial.polynomial.polyval(sigma - sigmaref, p)
| gpl-3.0 | -8,758,207,435,317,938,000 | 32.565217 | 95 | 0.610463 | false |
MeerkatLabs/gnucash-reporting | gnucash_reports/commands/reports.py | 1 | 3287 | """
This is the main execution program for the reporting library.
"""
from __future__ import absolute_import
import argparse
import glob
import logging
import os
import simplejson as json
from gnucash_reports.wrapper import initialize
from gnucash_reports.reports import run_report
from gnucash_reports.configuration import configure_application
from gnucash_reports.utilities import load_plugins
from datetime import datetime
from yaml import load
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
logger = logging.getLogger(__name__)
def main():
"""
Execute main application
:return:
"""
load_plugins()
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', dest='configuration', default='core.yaml',
help='core configuration details of the application')
parser.add_argument('-r', '--report', dest='report', default=None,
help='only execute the requested report')
parser.add_argument('-d', '--date', dest='date', default=datetime.today().strftime('%Y-%m-%d'),
help='date formated in %%Y-%%m-%%d')
args = parser.parse_args()
with open(args.configuration) as file_pointer:
configuration = load(file_pointer, Loader=Loader)
session = initialize(configuration['gnucash_file'])
output_location = configuration.get('output_directory', 'output')
report_location = configuration.get('report_definitions', 'reports')
global_configuration = configuration.get('global', dict())
global_configuration['date'] = args.date
configure_application(configuration.get('global', dict()))
if not os.path.exists(output_location):
os.makedirs(output_location)
all_reports = []
if not args.report:
reports_list = glob.glob(os.path.join(report_location, '*.yaml'))
else:
reports_list = [args.report]
for infile in sorted(reports_list):
print 'Processing: %s' % infile
with open(infile) as report_configuration_file:
report_configuration = load(report_configuration_file, Loader=Loader)
result_definition = dict(name=report_configuration.get('page_name', 'Unnamed Page'),
reports=[])
for report_definition in report_configuration['definitions']:
result = run_report(**report_definition)
if result:
result_definition['reports'].append(result)
output_file_name = os.path.split(infile)[-1] + '.json'
with open(os.path.join(output_location, output_file_name), 'w') as output_file:
json.dump(result_definition, output_file)
all_reports.append(dict(name=result_definition.get('name'), file=output_file_name))
session.close()
definition_dictionary = dict(
modification_time=datetime.now().strftime('%c'),
last_updated=datetime.now().strftime('%c'),
reports=all_reports
)
with open(os.path.join(output_location, '__reports.json'), 'w') as all_report_file:
json.dump(definition_dictionary, all_report_file)
if __name__ == '__main__':
main()
| mit | -3,509,458,222,486,998,000 | 31.544554 | 99 | 0.642835 | false |
alexliew/learn_python_the_hard_way | projects/gothonweb/gothonweb/tests/app_tests.py | 1 | 1099 | import os
import unittest
import tempfile
from nose.tools import *
from app import app
from gothonweb.tests.tools import assert_response
app.config['TESTING'] = True
test_app = app.test_client()
def test_index():
# check that we get a 404 on the /hello URL
resp = test_app.get("/hello")
assert_response(resp, status='404')
# test our first GET request to /
resp = test_app.get('/')
assert_response(resp, status='302')
# test our first GET request to /game
resp = test_app.get('/game')
assert_response(resp, status='200')
# make sure default values work for the form
resp = test_app.post('/game')
assert_response(resp, status='302')
# test that we get expected values
data = {'action': 'tell a joke!'}
resp = test_app.post('/game', data=data)
assert_response(resp, status='302')
# class GothonWebTestCase(unittest.TestCase):
# def setUp(self):
# app.app.config['TESTING'] = True
# self.app = app.app.test_client()
# def tearDown(self):
# pass
# if __name__ == '__main__':
# unittest.main()
| mit | 2,481,793,237,079,290,000 | 24.55814 | 49 | 0.636033 | false |
marksweiss/sofine | sofine/runner.py | 1 | 19931 | """
This module is the main driver for calls to plugins from the CLI interface.
It also has all of the scaffolding and wrapper functions required to generically invoke
and run any of the supported plugin methods in the plugin interface for any plugin
using just the plugin name, plugin group and call args.
"""
import sofine.lib.utils.utils as utils
import sofine.lib.utils.conf as conf
from optparse import OptionParser
import sys
def get_data(data, data_source, data_source_group, data_source_args):
"""
* `data` - `dict`. A dict of keys and associated array of dicts of attribute keys and values. May be empty.
Any data collected by this call with append new keys and values to `data`, and append new attribute keys
and values for existing keys into the array of attribute key/attribute value (single-entry) dicts
associated with that key. Also, if this call is invoked from a piped command line call piping to sofine,
that will be detected and `data` will be read from `stdin`, overriding whatever value is passed in for this arg.
* `data_source` - `string`. The name of the plugin being called.
* `data_source_group` - `string`. The name of the plugin group for the plugin being called.
* `data_source_args` - `list`. The args for the plugin call, in `argv` format with alternating elements
referring to argument names and argument values.
* `use_namespaced_attrs` - Defaults to False. Prepend all attribute keys from all plugin calls with the plugin name and plugin
group to guarantee the key name is unique in the returned data set.
Main driver function for retrieving data from a plugin. Calls a plugin's _required_ `get_data` method.
Takes a list of data_sources and a list of argument lists to call when calling each data_source.
Can be called directly or from `main` if this module was instantiated from the command line.
This method operates based on the core data aggregation semantics of the library:
* If this is the first call in the chain, data is empty, so just fill it with the return of this call
* If there is already data, add any new keys retrieved and add attribute key/value pairs associated
with any new or existing keys
is True.
* The set of keys on each call is the union of all previously collected keys
* The set of attributes associated with each key is the union of all previously collected attribute/value
pairs collected for that key
Output looks like this:
{"key_1" : [{"attr_name_1" : value}, {"attr_name_2" : value}, {"attr_name_1, value}],
"key_2" : ...
}
"""
plugin = utils.load_plugin(data_source, data_source_group)
is_valid, parsed_args = plugin.parse_args(data_source_args)
if not is_valid:
raise ValueError ('Invalid value passed in call to {0}. Args passed: {1})'.format(data_source, data_source_args))
new_data = plugin.get_data(data.keys(), parsed_args)
if len(new_data.keys()) > 0:
for k in new_data.keys():
# Convert returned dict of attributes into a list of individual dicts. This allows all data
# from all plugins to be added to the output without needing namespacing to prevent attributed
# keys from overwriting each other. Namespacing can optionally be turned on by the caller.
new_data_list = [{name : val} for name, val in new_data[k].iteritems()]
if k in data:
data[k] += new_data_list
else:
data[k] = new_data_list
return data
def get_namespaced_data(data, data_source, data_source_group, data_source_args):
"""As in `get_data`, but each attribute dict in each array of attribute dicts that is the value of each key
in the data set is prepended with the plugin name and plugin group.
Namespaced output looks like this:
{"key_1" : [{"plugin_group_A::plugin_1::attr_name_1" : value},
{"plugin_group_A::plugin_1::attr_name_2" : value},
{"plugin_group_B::plugin_1::attr_name_1" : value}],
"key_2" : ...
}
"""
data = get_data(data, data_source, data_source_group, data_source_args)
# Take the data returned, get the list of dicts associated with each key, for each attribute key in each
# attribute dict in each list of dicts, creat the namespaced key. Insert a new attribute dict into the list
# over the old one with the namespaced key and the same value
for attrs in data.values():
for j in range(0, len(attrs)):
attr = dict(attrs[j])
attr_key = utils.namespacer(data_source_group, data_source, attr.keys()[0])
attr_val = attr.values()[0]
attrs[j] = {attr_key : attr_val}
return data
def _validate_get_data_batch(data_sources, data_source_groups, data_source_args, fn_name):
if len(data_sources) != len(data_source_args) or \
len(data_sources) != len(data_source_groups) or \
len(data_source_groups) != len(data_source_args):
raise ValueError("""Call to runner.{0}() had different lengths for
data_sources (len == {1}),
data source_groups (len == {2}) and
data_source_args (len == {3)}""".format(fn_name, len(data_sources), len(data_source_groups), len(data_source_args)))
def get_data_batch(data, data_sources, data_source_groups, data_source_args):
"""
* `data` - `dict`. A dict of keys and associated array of dicts of attribute keys and values. May be empty.
Any data collected by this call with append new keys and values to `data`, and append new attribute keys
and values for existing keys into the dict associated with that key.
* `data_source` - `list`. A list of names of plugins being called.
* `data_source_group` - `list`. A list of names of plugin groups for the plugins being called.
* `data_source_args` - `list of list`. A list of lists of args for the plugin calls, in argv format with alternating elements
referring to argument names and argument values.
Convenience wrapper for users of sofine as a Python library. This function lets a user pass in
a list of data sources, a list of plugin groups and a list of lists of arguments for each plugin call.
Note that the elements must be in order in each argument: data source name in position 0 must match
data source group in position 0 and the list of args for that call in `data_source_args[0]`.
"""
_validate_get_data_batch(data_sources, data_source_groups, data_source_args, 'get_data_batch')
for j in range(0, len(data_sources)):
data = get_data(data, data_sources[j], data_source_groups[j], data_source_args[j])
return data
def get_namespaced_data_batch(data, data_sources, data_source_groups, data_source_args):
"""As in `get_data_batch`, but each attribute dict in each array of attribute dicts that is the value of each key
in the data set is prepended with the plugin name and plugin group. All plugins called in the batch call will
namespace the attributes they contribute to the final data set returned.
Namespaced output looks like this:
{"key_1" : [{"plugin_group_A::plugin_1::attr_name_1" : value},
{"plugin_group_A::plugin_1::attr_name_2" : value},
{"plugin_group_B::plugin_1::attr_name_1" : value}],
"key_2" : ...
}
"""
_validate_get_data_batch(data_sources, data_source_groups, data_source_args, 'get_namespaced_data_batch')
for j in range(0, len(data_sources)):
data = get_namespaced_data(data, data_sources[j], data_source_groups[j], data_source_args[j])
return data
def _get_schema(get_schema_call, parse_args_call, data_source, data_source_group, args):
plugin = utils.load_plugin(data_source, data_source_group)
schema = None
if not args:
schema = get_schema_call()
else:
is_valid, parsed_args = parse_args_call(args)
if not is_valid:
raise ValueError ('Invalid value passed in call to {0}. Args passed: {1})'.format(data_source, data_source_args))
schema = get_schema_call(parsed_args)
return {"schema" : schema}
def get_schema(data_source, data_source_group, args=None):
"""
* `data_source` - `string`. The name of the plugin being called.
* `data_source_group` - `string`. The name of the plugin group for the plugin being called.
* `args` - `any`. This is a bit of a hack, but basically there are use cases that could require args in
order to figure out the schema of available fields. Maybe a plugin wraps access to a data store that allows
arbitary or varying schemas per document retrieved. Or maybe, like the included `standard.file_source`
plugin, it wraps access to a config that can provide an arbitrary list of fields.
This returns the value for a plugin's _optional_ (but highly recommended) `self.schema` attribute.
This method lets plugin users introspect the plugin to ask what schema fields it provides, that is, what
set of attribute keys can it to the attributes dict for each key in data.
Note that the default implementation is provided by `PluginBase` and it returns a properly namespaced list
of attribute keys. All the plugin creator has to do is set the `self.schema` attribute of their plugin to a
list of strings of the attribute keys it can return.
Not all data sources gurarantee they will return all attribute keys for each key in data, and not
all data sources guarantee they will return the same set of attribute keys for each key in data in
one returned data set.
"""
plugin = utils.load_plugin(data_source, data_source_group)
return _get_schema(plugin.get_schema, plugin.parse_args, data_source, data_source_group, args)
def get_namespaced_schema(data_source, data_source_group, args=None):
"""As in `get_schema` except that the schema attribute keys returned are prepended with the `data_source` and
`data_source_group`.
"""
plugin = utils.load_plugin(data_source, data_source_group)
return _get_schema(plugin.get_namespaced_schema, plugin.parse_args, data_source, data_source_group, args)
def parse_args(data_source, data_source_group, data_source_args):
"""
* `data_source` - `string`. The name of the plugin being called.
* `data_source_group` - `string`. The name of the plugin group for the plugin being called.
* `data_source_args` - `list`. The args for the plugin call, in `argv` format with alternating elements
referring to argument names and argument values.
A wrapper which calls a plugin's _required_ `parse_args` method. This method must parse arguments the plugin's `get_data`
call requires, with the arguments in argv format with alternating elements referring to argument
names and argument values.
The method is also responsible for validating arguments and returning a boolean `is_valid` as well as the
parsed (and possibly modified) args.
"""
plugin = utils.load_plugin(data_source, data_source_group)
is_valid, parsed_args = plugin.parse_args(data_source_args)
return {"is_valid" : is_valid, "parsed_args" : parsed_args}
def adds_keys(data_source, data_source_group):
"""
* `data_source` - `string`. The name of the plugin being called.
* `data_source_group` - `string`. The name of the plugin group for the plugin being called.
A wrapper which calls a plugin's _optional_ (but recommended) `adds_keys` method. This introspection method
lets plugin users ask whether a plugin adds its own keys to the `data` output or simply adds key/value
attributes to the dicts being built by sofine for each key in `data`.
"""
plugin = utils.load_plugin(data_source, data_source_group)
adds_keys = plugin.get_adds_keys()
return {"adds_keys" : adds_keys}
def get_plugin_module(data_source, data_source_group):
"""
* `data_source` - `string`. The name of the plugin being called.
* `data_source_group` - `string`. The name of the plugin group for the plugin being called.
Convenience function for clients to get an instance of a plugin module.
This lets plugin implementers expose free functions in the module and have client
code be able to access them.
"""
return utils.load_plugin_module(data_source)
def get_plugin(data_source, data_source_group):
"""
* `data_source` - `string`. The name of the plugin being called.
* `data_source_group` - `string`. The name of the plugin group for the plugin being called.
Convenience function for clients to get an instance of a plugin.
This lets plugin implementers expose free functions in the module and have client
code be able to access them.
"""
return utils.load_plugin(data_source, data_source_group)
def _parse_runner_arg(args, arg_flags):
ret = None
def try_arg_flag(arg_flag):
e = ''
i = -1
try:
i = args.index(arg_flag)
except ValueError:
e = 'Required argument {0} not found in command line argument list passed to runner.main()'.format(arg_flag)
if i == len(args) - 1:
e = 'Value for required argument {0} not found in command line argument list passed to runner.main()'.format(arg_flag)
return e, i
# Try twice if necessary, for each of the two forms of the arg flag
err, idx = try_arg_flag(arg_flags[0])
if err:
err, idx = try_arg_flag(arg_flags[1])
# Flag was found, value for it parsed, and flag and value removed from args
if not err:
ret = args[idx + 1]
del args[idx + 1]
del args[idx]
return err, ret
def _parse_global_call_args(args):
# Default output to JSON
data_format = None
err, data_format = _parse_runner_arg(args, ['--SF-d', '--SF-data-format'])
if err:
data_format = conf.DEFAULT_DATA_FORMAT
return data_format
def _parse_runner_call_args(args):
data_source = None
data_source_group = None
action = None
# Parse for both versions of required flags and raise error if not found
err, data_source = _parse_runner_arg(args, ['--SF-s', '--SF-data-source'])
if err: raise ValueError(err)
err, data_source_group = _parse_runner_arg(args, ['--SF-g','--SF-data-source-group'])
if err: raise ValueError(err)
# For optional argument, don't throw if not found, just set default value
err, action = _parse_runner_arg(args, ['--SF-a', '--SF-action'])
if err:
action = 'get_data'
return data_source, data_source_group, action, args
def _run_action(action, ret, data_source, data_source_group, data_source_args):
if action == 'get_data':
ret = get_data(ret, data_source, data_source_group, data_source_args)
if action == 'get_namespaced_data':
ret = get_namespaced_data(ret, data_source, data_source_group, data_source_args)
elif action == 'get_schema':
ret = get_schema(data_source, data_source_group, data_source_args)
elif action == 'get_namespaced_schema':
ret = get_namespaced_schema(data_source, data_source_group, data_source_args)
elif action == 'adds_keys':
ret = adds_keys(data_source, data_source_group)
elif action == 'parse_args':
ret = parse_args(data_source, data_source_group, data_source_args)
return ret
def main(argv):
"""Entry point if called from the command line. Parses CLI args, validates them and calls run().
The arguments dedicated to this framework are expected to precede the remaining args
(for clarity of reading the entire command) but don't need to. In order to clearly
separate from the args required for the call being run, they are preceded by `--SF_*`.
There is a short form and long form of each command:
* `[--SF-d|--SF-data-format] - The data format to be used for all following piped calls to `get_data`
or `get_namespaced_data`. This argument is optional. It only is evaluated for `get-data`
and `get_namespaced_data`. If it isn't passed the default data format is JSON.
* `[--SF-s|--SF-data-source]` - The name of the data source being called. This is the
name of the plugin module being called. Required.
* `[--SF-g|--SF-data-source-group`] - The plugin group where the plugin lives. This is
the plugins subdirectory where the plugin module is deployed. Required.
`[--SF-a|--SF-action]` - The plugin action being called. One of five supported actions that must be part of every plugin:
- `get_data` - retrieves available data for the keys passed to it
- `get_namespaced_data` - retrieves available data for the keys passed to it, with the attribute keys associated with each
key prepended with the plugin name and plugin group
- `adds_keys` - returns a JSON object with the attribute `adds_keys` and a
boolean indicating whether the data source adds keys or just gets data for the keys passed to it
- `get_schema` - returns a JSON object with the attribute `schema` and the schema of attributes which
this data source may add for each key
- `get_namespaced_schema` - returns a JSON object with the attribute `schema` and the schema of attributes which
this data source may add for each key, with each attribute prepended with the plugin name and plugin group
- `parse_args` - returns the values parsed for the arguments passed to the call being
made as a JSON object with an attribute `args` and an array of parsed args,
and an attribute `is_valid` with a boolean indicating whether parsing succeeded.
The `[--SF-a|--SF-action]` argument is Optional. If you don't pass it, `get_data` is assumed.
Calls to `get_data` and `get_namespaced_data` can be piped together. You can mix `get_data` and `get_namespaced_data` calls
in a piped expression.
Calls to `adds_keys` and `get_schema` and `parse_args` cannot be piped.
All calls and their arguments must be enclosed in quotes as shown in the examples below.
The complete interface for a call piping two get_data calls together:
PATH/runner.py \'[--SF-s|--SF-data-source] DATA_SOURCE_1 \\
[--SF-g|--SF-data-source-group] DATA_SOURCE_GROUP_1 \\
ARGS | \\
[--SF-s|--SF-data-source] DATA_SOURCE_2 \\
[--SF-g|--SF-data-source-group] DATA_SOURCE_GROUP_2 \\
ARGS\'
An example call piping two get_data calls together:
PATH/runner.py \'--SF-s fidelity --SF-g examples \\
-c CUSTOMER_ID -p PASSWORD -a ACCOUNT_ID -e EMAIL | \\
--SF-s ystockquotelib --SF-g examples\'
An example get_schema call:
PATH/runner.py \'--SF-s fidelity --SF-g examples --SF-a get_schema \\
-c CUSTOMER_ID -p PASSWORD -a ACCOUNT_ID -e EMAIL\'
"""
ret = {}
# Get any global args and each piped data source and set of args to call it from the CLI
# CLI syntax is split on pipes
calls = ' '.join(argv).split('|')
if len(calls):
# Parse global args, which appear before any calls. Right now only output format
# is only global arg, and it will be applied to all actions, even when that makes less sense
global_arg_call = calls[0].strip().split()
data_format = _parse_global_call_args(global_arg_call)
data_format_plugin = utils.load_plugin_module(data_format)
# If input passed from stdin, set initial data in chain of calls to that.
# Thus supports composing sofine piped chains with preceding outer piped
# command line statements that include sofine pipes within them
if utils.has_stdin():
ret = sys.stdin.read()
ret = data_format_plugin.deserialize(ret)
for call in calls:
call = call.strip().split()
data_source, data_source_group, action, data_source_args = \
_parse_runner_call_args(call)
ret = _run_action(action, ret, data_source, data_source_group, data_source_args)
print data_format_plugin.serialize(ret)
if __name__ == '__main__':
# Client passes in a statement of one or more piped calls to
# data sources enclosed in quotes. Convert to list here because
# code in main() and run() expects an argument list
argv = sys.argv[1]
argv = argv.split()
main(argv)
| mit | -5,049,273,920,547,250,000 | 46.007075 | 130 | 0.6956 | false |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/music21/alpha/trecento/exceldiff.py | 1 | 2662 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from music21.ext import xlrd
#sys.path.append('/mit/cuthbert/www/music21')
if len(sys.argv) != 3:
raise Exception("Need two arguments to diff!")
if (sys.argv[1].count(':') == 1):
(book1name, sheetname1) = sys.argv[1].split(':')
if (book1name.count('.xls') == 0):
book1name += ".xls"
else:
raise ("First name must be in form filename:sheetname")
if (sys.argv[2].count(':') == 1):
(book2name, sheetname2) = sys.argv[2].split(':')
else:
(book2name, sheetname2) = (sys.argv[2], sheetname1)
if (book2name.count('.xls') == 0):
book2name += ".xls"
book1 = xlrd.open_workbook(book1name)
book2 = xlrd.open_workbook(book2name)
sheet1 = book1.sheet_by_name(sheetname1)
sheet2 = book2.sheet_by_name(sheetname2)
totalRows1 = sheet1.nrows
totalRows2 = sheet2.nrows
extraRows = 0
longsheet = 0
if (totalRows1 > totalRows2):
longsheet = 1
extraRows = (totalRows1 - totalRows2)
minRows = totalRows2
elif (totalRows1 < totalRows2):
longsheet = 2
extraRows = (totalRows2 - totalRows1)
minRows = totalRows1
else:
minRows = totalRows1 # doesnt matter which
for i in range(0, minRows):
rowvalues1 = sheet1.row_values(i)
rowvalues2 = sheet2.row_values(i)
longrow = 0
totalCells1 = len(rowvalues1)
totalCells2 = len(rowvalues2)
extraCells = 0
if (totalCells1 > totalCells2):
longrow = 1
extraCells = (totalCells1 - totalCells2)
minCells = totalCells2
elif (totalCells1 > totalCells2):
longrow = 2
extraCells = (totalCells2 - totalCells1)
minCells = totalCells1
else:
minCells = totalCells1 # doesnt matter which
for j in range(0, minCells):
if (rowvalues1[j] != rowvalues2[j]):
print("%3d,%2s--%34s : %34s" % (i+1,xlrd.colname(j),
unicode(rowvalues1[j]).encode('utf-8')[:34],
unicode(rowvalues2[j]).encode('utf-8')[:34]))
if (extraCells > 0):
print("%3d extra cells in row %3d in" % (extraCells, i+1),)
if (longrow == 1): print(book1name + ":" + sheetname1)
elif (longrow == 2): print(book2name + ":" + sheetname2)
else: raise Exception("What? longrow was not set!")
if (extraRows > 0):
print("%3d extra rows in" % extraRows,)
if (longsheet == 1): print(book1name + ":" + sheetname1)
elif (longsheet == 2): print(book2name + ":" + sheetname2)
else: raise Exception("What? longsheet was not set!")
#------------------------------------------------------------------------------
# eof
| mit | -3,328,757,419,221,253,000 | 28.910112 | 92 | 0.586777 | false |
dirkhusemann/rezzme | RezzMe/launchers/linux2.py | 1 | 5118 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
# Copyright (c) Contributors, http://opensimulator.org/
# See CONTRIBUTORS.TXT for a full list of copyright holders.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the OpenSim Project nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import subprocess
import urllib
import PyQt4.QtCore
import RezzMe.exceptions
import RezzMe.launchers.hippo
class PlatformLauncher(object):
def __init__(self):
self._clientsDefault = {'hippo' : 'hippo_opensim_viewer',
'secondlife': 'secondlife'}
self._clients = {}
for c in self._clientsDefault:
for bin in os.environ['PATH'].split(':'):
t = '%s/%s' % (bin, self._clientsDefault[c])
if os.path.exists(t):
self._clients[c] = t
break
def _gClients(self):
return self._clients
Clients = property(fget = _gClients)
def _gClientPattern(self):
return 'client executable (*)'
ClientPattern = property(fget = _gClientPattern)
def HippoDefaultGrids(self, path):
hippoHome = os.path.dirname(os.path.realpath(path))
defaultGrids = '%s/app_settings/default_grids.xml' % hippoHome
if os.path.exists(defaultGrids):
logging.debug("launchers.linux2: found hippo's default_grids.xml at %s", defaultGrids)
return defaultGrids
logging.debug("launchers.linux2: trying to find hippo's default_grids.xml via locate...")
defaultGrids = subprocess.Popen(['locate', 'app_settings/default_grids.xml'], stdout = subprocess.PIPE).communicate()[0].rstrip()
if defaultGrids:
for p in defaultGrids.split():
if 'hippo' in p.lower():
logging.debug("launchers.linux2: found hippo's default_grids.xml at %s", p)
return p
return None
def Launch(self, avatar, password, gridInfo, clientName, client, location, purge):
# fix ' character appearing in irish names
avatar = urllib.unquote(avatar)
clientArgs = [ ]
clientArgs += ['-loginuri', gridInfo['login']]
clientArgs += ['-multiple']
keys = gridInfo.keys()
if 'welcome' in keys: clientArgs += ['-loginpage', gridInfo['welcome']]
if 'economy' in keys: clientArgs += ['-helperuri', gridInfo['economy']]
if purge:
clientArgs += ['--purge']
# mirror clientArgs into logArgs to avoid capturing passwords into
# log files
logArgs = clientArgs[:]
if avatar and password:
clientArgs += ['-login']
avatar = avatar.replace('(', '\(')
avatar = avatar.replace(')', '\)')
clientArgs += map(lambda x: "%s" % x, avatar.split())
logArgs = clientArgs[:]
clientArgs += [password]
logArgs += ["'**********'"]
if 'hippo' in clientName.lower() or 'hippo' in client.lower():
userGridXml = os.path.expanduser('~/.hippo_opensim_viewer/user_settings/grid_info.xml')
defaultGridXml = self.HippoDefaultGrids(client)
gridnick = RezzMe.launchers.hippo.HippoGridInfoFix(gridInfo, userGridXml, defaultGridXml)
clientArgs += ['-grid', gridnick]
logArgs += ['-grid', gridnick]
# has to come last
if location:
clientArgs += [location]
logArgs += [location]
# all systems go: start client
logging.debug('launchers.linux2: client %s %s', client, ' '.join(logArgs))
# os.execvp(client, clientArgs)
PyQt4.QtCore.QProcess.startDetached(client, clientArgs)
| bsd-3-clause | 8,639,283,520,196,367,000 | 39.619048 | 137 | 0.635795 | false |
rueckstiess/mtools | mtools/mlogfilter/filters/fast_filter.py | 1 | 1215 | from .base_filter import BaseFilter
class FastFilter(BaseFilter):
"""
FastFilter class.
Accept only lines that have a duration that is shorter than the specified
parameter in ms.
"""
filterArgs = [
('--fast', {'action': 'store', 'nargs': '?', 'default': False,
'type': int,
'help': ('only output lines with query times shorter '
'than FAST ms (default 1000)')})
]
def __init__(self, mlogfilter):
BaseFilter.__init__(self, mlogfilter)
if ('fast' in self.mlogfilter.args and
self.mlogfilter.args['fast'] is not False):
self.active = True
if self.mlogfilter.args['fast'] is None:
self.fastms = 1000
else:
self.fastms = self.mlogfilter.args['fast']
def accept(self, logevent):
"""
Process line.
Overwrite BaseFilter.accept() and return True if the provided
logevent should be accepted (causing output), or False if not.
"""
if self.active and logevent.duration is not None:
return logevent.duration <= self.fastms
return False
| apache-2.0 | -160,321,035,544,508,300 | 30.973684 | 77 | 0.553909 | false |
marrow/mongo | marrow/mongo/core/field/oid.py | 1 | 1116 | # encoding: utf-8
from __future__ import unicode_literals
from bson import ObjectId as OID
from collections import MutableMapping
from datetime import datetime, timedelta
from .base import Field
from ....schema import Attribute
from ....schema.compat import unicode
class ObjectId(Field):
__foreign__ = 'objectId'
__disallowed_operators__ = {'#array'}
default = Attribute()
def __fixup__(self, document):
super(ObjectId, self).__fixup__(document)
try: # Assign a default if one has not been given.
self.default
except AttributeError:
if self.__name__ == '_id': # But only if we're actually the primary key.
self.default = lambda: OID() # pylint:disable=unnecessary-lambda
def to_foreign(self, obj, name, value): # pylint:disable=unused-argument
if isinstance(value, OID):
return value
if isinstance(value, datetime):
return OID.from_datetime(value)
if isinstance(value, timedelta):
return OID.from_datetime(datetime.utcnow() + value)
if isinstance(value, MutableMapping) and '_id' in value:
return OID(value['_id'])
return OID(unicode(value))
| mit | 7,631,403,063,071,038,000 | 25.571429 | 76 | 0.701613 | false |
majdigital/bigworldgraph | backend/tests/mock_nlp.py | 1 | 1055 | class MockTokenizer:
@staticmethod
def tokenize(sentence_data):
return sentence_data.split(" ") # Yes, yes, very sophisticated
class MockTagger:
def __init__(self, naive_tag_rule):
assert callable(naive_tag_rule)
self.naive_tag_rule = naive_tag_rule
def tag(self, tokenized_sentence):
return [self.naive_tag_rule(token.lower(), tokenized_sentence) for token in tokenized_sentence]
class MockParser:
@staticmethod
def raw_parse(sentence_data):
tokens = sentence_data.split(" ")
return {
"root": {
"address": 0
},
"nodes": {
node_id: {
"address": node_id,
"word": "ROOT" if node_id == 0 else tokens[node_id - 1],
"rel": None if node_id == 0 else node_id - 1,
"deps": {
"rel": node_id + 1
}
}
for node_id in range(len(tokens) + 1)
}
}
| mit | 3,103,457,414,177,886,000 | 28.305556 | 103 | 0.483412 | false |
stoic1979/careermaker | server.py | 1 | 9828 | from flask import Flask, render_template, request, redirect, jsonify, make_response
from models import User, Candidate, \
Company, Vacancy, JobCategory, Skill, db, app
import md5
from flask_pymongo import PyMongo
import traceback
import os
import jwt
import datetime
from functools import wraps
import json
from scraper.config import *
from scraper.db import Mdb
app = Flask(__name__)
mongo = PyMongo(app)
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from bson import ObjectId
mdb = Mdb()
######################################################
# #
# Since mongodb's _id of each record was not getting #
# json encoded, so this custom JSONEncoder is needed #
# #
######################################################
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
return json.JSONEncoder.default(self, o)
# Setup login manager
login_manager = LoginManager()
login_manager.init_app(app)
class ChildView(ModelView):
column_display_pk = True
column_hide_backrefs = False
column_list = ('category', 'title', 'created_at')
admin = Admin(app, name='CareerMaker Admin', template_mode='bootstrap3')
admin.add_view(ModelView(Candidate, db.session))
admin.add_view(ModelView(Company, db.session))
admin.add_view(ModelView(Vacancy, db.session))
admin.add_view(ModelView(JobCategory, db.session))
# admin.add_view(ModelView(Skill, db.session))
# admin.add_view(ChildVinameew(Skill, db.session))
admin.add_view(ModelView(User, db.session))
@app.route("/")
@login_required
def index():
templateData = {'title': 'Home Page'}
return render_template("index.html", **templateData )
@login_manager.unauthorized_handler
def unauthorized_callback():
return redirect('/login')
@app.route("/api_demo")
def candidate_form():
templateData = {'title' : 'Home Page'}
return render_template("api_demo.html", **templateData )
@app.route("/find_company_data", methods=['POST'])
def find_company_data():
ret = {"err": 0}
try:
ret = []
print "find_company_data() ::", request.form
login()
cname = request.form['cname']
# ret['Company Name'] = cname
collection_android = mdb.db["job_vacancy_android"]
collection_python = mdb.db["job_vacancy_python"]
result = collection_android.find({"title": cname})
print result
# ret.append(result)
for data in result:
ret.append(data)
result = collection_python.find({"title": cname})
print result
# ret.append(result)
for data in result:
ret.append(data)
# testing code
print JSONEncoder().encode({'job_vacancy %s ': ret})
# mdb.retrieve_data(cname)
except Exception as exp:
print "find_company_data() :: Got exception: %s" % exp
print(traceback.format_exc())
# return json.dumps(ret)
return JSONEncoder().encode({'job_vacancy': ret})
@app.route("/save_candidate", methods=['POST'])
def save_candidate():
try:
print "save_candidate(): :", request.form
user_id = request.form['user_id']
name = request.form['name']
email = request.form['email']
pswd = request.form['pswd']
age = request.form['age']
phone = request.form['phone']
address = request.form['address']
gender = request.form['gender']
encodepassword = md5.new(pswd).hexdigest()
# save candidate in db
candidate = Candidate(user_id, name, email, encodepassword, age, phone, address, gender)
db.session.add(candidate)
db.session.commit()
except Exception as exp:
print "save_candidate(): : Got Exception: %s" % exp
print(traceback.format_exc())
return "Candidate Data Saved"
@app.route("/save_company", methods=['POST'])
def save_company():
try:
print "save_company() :: ", request.form
user_id = request.form['user_id']
name = request.form['name']
website = request.form['website']
email = request.form['email']
pswd = request.form['pswd']
mobile = request.form['mobile']
telno = request.form['telno']
address = request.form['address']
city = request.form['city']
state = request.form['state']
country = request.form['country']
pin = request.form['pin']
encodepswd = md5.new(pswd).hexdigest()
# saving company in db
company = Company(user_id, name, website, email, encodepswd, mobile, telno, address, city, state, country, pin)
db.session.add(company)
db.session.commit()
except Exception as exp:
print "save_company(): : Got Exception: %s" % exp
print (traceback.format_exc())
return "Company Saved"
@app.route("/save_vacancy", methods=['POST'])
def save_vacancy():
try:
comp_id = request.form['comp_id']
cand_id = request.form['cand_id']
post_date = request.form['post_date']
expiry_date = request.form['expiry_date']
sal_min = request.form['sal_min']
sal_max = request.form['sal_max']
fulltime = request.form['fulltime']
# saving vacancy in db
vacancy = Vacancy(comp_id, cand_id, post_date, expiry_date, sal_min, sal_max, fulltime)
db.session.add(vacancy)
db.session.commit()
except Exception as exp:
print "save_vacancy() :: Got Exception: %s" % exp
print (traceback.format_exc())
return "Vacancy saved"
@app.route("/save_JobCategory", methods=['POST'])
def save_JobCategory():
try:
title = request.form['indextitle']
# savin Job Category in db
jbcategory = JobCategory(title)
db.session.add(jbcategory)
db.session.commit()
except Exception as exp:
print "save_JobCategory() :: Got Exception: %s" % exp
print (traceback.format_exc())
return "Save Job Category"
@app.route("/save_skill", methods=['POST'])
def save_skill():
try:
cat_id = request.form['cat_id']
title = request.form['title']
# saving skill in db
skill = Skill(cat_id, title)
db.session.add(skill)
db.session.commit()
except Exception as exp:
print "save_skill() :: Got Excetion: %s" % exp
print(traceback.format_exc())
return "Save Skill"
@app.route("/search", methods=['POST'])
def search():
try:
print "search() :: %s", request.form
except Exception as exp:
print "search() :: Got Exception: %s" % exp
print (traceback.format_exc())
return "Job Search"
@app.route("/user_register", methods=['POST'])
def user_register():
try:
print "user_register() :: ", request.form
username = request.form['username']
pswd = request.form['pswd']
encodepswd = md5.new(pswd).hexdigest()
user = User(username, encodepswd)
db.session.add(user)
db.session.commit()
except Exception as exp:
print "user_register() :: Got Exception: %s" % exp
print(traceback.format_exc())
return "user Register Successfully"
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == "GET":
print "login GET"
templateData = {'title' : 'Login To Career Maker'}
return render_template("index.html", **templateData)
else:
username = request.form['username']
pswd = request.form['pswd']
encodepswd = md5.new(pswd).hexdigest()
user = User.query.filter_by(username=username).filter_by(pswd=encodepswd).first()
if not user:
print "The username and Password is invalid"
return "Invalid Username and Password"
else:
print "login is successfull"
templateData = {'title' : 'Home Page'}
return render_template("index.html", **templateData)
"""
# token authentication
app.config['secretkey'] = 'some-strong+secret#key'
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = request.args.get('token')
# ensure that token is specified in the request
if not token:
return jsonify({'message': 'Missing token!'})
# ensure that token is valid
try:
data = jwt.decode(token, app.config['secretkey'])
except:
return jsonify({'message': 'Invalid token!'})
return f(*args, **kwargs)
return decorated
@app.route('/unprotected')
def unprotected():
return 'unprotected'
@app.route('/protected')
@token_required
def protected():
return 'protected'
@app.route('/login')
def login():
auth = request.authorization
if auth and auth.password == 'password':
expiry = datetime.datetime.utcnow() + datetime.timedelta(minutes=30)
token = jwt.encode({'user': auth.username, 'exp': expiry}, app.config['secretkey'], algorithm='HS256')
return jsonify({'token': token.decode('UTF-8')})
return make_response('Could not verify!', 401, {'WWW-Authenticate': 'Basic realm="Login Required"'})
"""
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect("/")
#################################################################
# #
# Main Server #
# #
#################################################################
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, debug=True, threaded=True)
| gpl-3.0 | 804,700,029,827,939,800 | 29.521739 | 119 | 0.597477 | false |
jastination/software-engineering-excercise-repository | seer_python/interviewstreet/LuckyNumber.py | 1 | 1265 | '''
Created on May 25, 2012
@author: jjhuang
'''
def getAllPrimeNumber(N):
ret = []
for n in range(2, N + 1):
isPrime = True
for i in range(2, n//2 + 1):
if(n % i == 0):
isPrime = False
break
if(isPrime):
ret.append(n)
return ret
def buildCache(N):
table1 = []
table2 = []
for x in range(N):
a = 0
b = 0
while(x > 0):
m = x % 10
a += m
b += m * m
x //= 10
table1.append(a)
table2.append(b)
return table1,table2
if __name__ == '__main__':
#T = int(input())
primeTable = set(getAllPrimeNumber(1500))
# for t in range(T):
#A,B = [int(x) for x in input().split(" ")]
A,B = 1,1000000000
# cnt = 0
# n = A
# while(n<=B):
# a = 0
# b = 0
# nn = n
# while(nn > 0):
# d = nn % MOD
# a += table1[d]
# b += table2[d]
# nn //= MOD
# if(a in primeTable and b in primeTable):
# cnt += 1
# n += 1
# print(cnt)
| mit | 6,946,488,043,172,524,000 | 16.071429 | 49 | 0.355731 | false |
DXCanas/kolibri | kolibri/core/content/test/test_data_migrations.py | 1 | 3147 | import uuid
from kolibri.core.auth.test.migrationtestcase import TestMigrations
from kolibri.core.content.models import ChannelMetadata as RealChannelMetadata
from kolibri.core.content.models import ContentNode as RealContentNode
class ChannelFieldsTestCase(TestMigrations):
migrate_from = '0011_auto_20180907_1017'
migrate_to = '0012_auto_20180910_1702'
app = 'content'
def setUp(self):
self.file_size = 10
super(ChannelFieldsTestCase, self).setUp()
def setUpBeforeMigration(self, apps):
ChannelMetadata = apps.get_model('content', 'ChannelMetadata')
ContentNode = apps.get_model('content', 'ContentNode')
LocalFile = apps.get_model('content', 'LocalFile')
File = apps.get_model('content', 'File')
Language = apps.get_model('content', 'Language')
channel_id = uuid.uuid4().hex
Language.objects.create(id='es', lang_code='es')
Language.objects.create(id='en', lang_code='en')
root = ContentNode.objects.create(id=uuid.uuid4(),
title='test',
content_id=uuid.uuid4(),
channel_id=channel_id,
lft=1,
rght=12,
tree_id=1,
level=1,
available=True,
lang_id='es')
l1 = LocalFile.objects.create(id=uuid.uuid4().hex, available=True, file_size=self.file_size)
File.objects.create(id=uuid.uuid4().hex, available=True, contentnode=root, local_file=l1)
# unavailable objects which should not be included in calculations
child = ContentNode.objects.create(id=uuid.uuid4(),
title='test',
content_id=uuid.uuid4(),
channel_id=channel_id,
lft=1,
rght=12,
tree_id=1,
level=2,
available=False,
lang_id='en',
parent=root)
l2 = LocalFile.objects.create(id=uuid.uuid4().hex, available=False, file_size=self.file_size)
File.objects.create(id=uuid.uuid4().hex, available=False, contentnode=child, local_file=l2)
ChannelMetadata.objects.create(
id=channel_id,
name='test',
root=root
)
def test_calculated_fields(self):
channel = RealChannelMetadata.objects.get()
self.assertEqual(channel.published_size, self.file_size)
self.assertEqual(channel.total_resource_count, RealContentNode.objects.filter(available=True).count())
self.assertListEqual(list(channel.included_languages.values_list('id', flat=True)), ['es'])
| mit | 4,337,543,030,775,321,600 | 48.171875 | 110 | 0.512552 | false |
steder/maroonmpi | subunit/python/subunit/tests/test_subunit_tags.py | 1 | 2324 | #
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2005 Robert Collins <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""Tests for subunit.tag_stream."""
import unittest
from StringIO import StringIO
import subunit
class TestSubUnitTags(unittest.TestCase):
def setUp(self):
self.original = StringIO()
self.filtered = StringIO()
def test_add_tag(self):
self.original.write("tags: foo\n")
self.original.write("test: test\n")
self.original.write("tags: bar -quux\n")
self.original.write("success: test\n")
self.original.seek(0)
result = subunit.tag_stream(self.original, self.filtered, ["quux"])
self.assertEqual([
"tags: quux",
"tags: foo",
"test: test",
"tags: bar",
"success: test",
],
self.filtered.getvalue().splitlines())
def test_remove_tag(self):
self.original.write("tags: foo\n")
self.original.write("test: test\n")
self.original.write("tags: bar -quux\n")
self.original.write("success: test\n")
self.original.seek(0)
result = subunit.tag_stream(self.original, self.filtered, ["-bar"])
self.assertEqual([
"tags: -bar",
"tags: foo",
"test: test",
"tags: -quux",
"success: test",
],
self.filtered.getvalue().splitlines())
def test_suite():
loader = subunit.tests.TestUtil.TestLoader()
result = loader.loadTestsFromName(__name__)
return result
| gpl-2.0 | -8,039,500,400,970,692,000 | 32.2 | 80 | 0.63296 | false |
erzel/vitess | test/vtctld2_web_status_test.py | 1 | 9662 | #!/usr/bin/env python
"""A vtctld2 webdriver test that tests the different views of status page."""
import logging
import os
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
import unittest
from vtproto import vttest_pb2
from vttest import environment as vttest_environment
from vttest import local_database
from vttest import mysql_flavor
import environment
import utils
def setUpModule():
try:
if utils.options.xvfb:
try:
# This will be killed automatically by utils.kill_sub_processes()
utils.run_bg(['Xvfb', ':15', '-ac'])
os.environ['DISPLAY'] = ':15'
except OSError as err:
# Despite running in background, utils.run_bg() will throw immediately
# if the Xvfb binary is not found.
logging.error(
"Can't start Xvfb (will try local DISPLAY instead): %s", err)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
utils.remove_tmp_files()
class TestVtctldWeb(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Set up two keyspaces: one unsharded, one with two shards."""
if os.environ.get('CI') == 'true' and os.environ.get('TRAVIS') == 'true':
username = os.environ['SAUCE_USERNAME']
access_key = os.environ['SAUCE_ACCESS_KEY']
capabilities = {}
capabilities['tunnel-identifier'] = os.environ['TRAVIS_JOB_NUMBER']
capabilities['build'] = os.environ['TRAVIS_BUILD_NUMBER']
capabilities['platform'] = 'Linux'
capabilities['browserName'] = 'chrome'
hub_url = '%s:%s@localhost:4445' % (username, access_key)
cls.driver = webdriver.Remote(
desired_capabilities=capabilities,
command_executor='http://%s/wd/hub' % hub_url)
else:
os.environ['webdriver.chrome.driver'] = os.path.join(
os.environ['VTROOT'], 'dist')
# Only testing against Chrome for now
cls.driver = webdriver.Chrome()
topology = vttest_pb2.VTTestTopology()
topology.cells.append('test')
topology.cells.append('test2')
keyspace = topology.keyspaces.add(name='test_keyspace')
keyspace.replica_count = 2
keyspace.rdonly_count = 2
keyspace.shards.add(name='-80')
keyspace.shards.add(name='80-')
keyspace2 = topology.keyspaces.add(name='test_keyspace2')
keyspace2.shards.add(name='0')
keyspace2.replica_count = 2
keyspace2.rdonly_count = 1
port = environment.reserve_ports(1)
vttest_environment.base_port = port
mysql_flavor.set_mysql_flavor(None)
cls.db = local_database.LocalDatabase(
topology, '', False, None,
os.path.join(os.environ['VTTOP'], 'web/vtctld2/dist'),
os.path.join(os.environ['VTTOP'], 'test/vttest_schema/default'))
cls.db.setup()
cls.vtctld_addr = 'http://localhost:%d' % cls.db.config()['port']
utils.pause('Paused test after vtcombo was started.\n'
'For manual testing, connect to vtctld: %s' % cls.vtctld_addr)
@classmethod
def tearDownClass(cls):
cls.db.teardown()
cls.driver.quit()
def _get_dropdown_options(self, group):
status_content = self.driver.find_element_by_tag_name('vt-status')
dropdown = status_content.find_element_by_id(group)
return [op.text for op in
dropdown.find_elements_by_tag_name('option')]
def _get_dropdown_selection(self, group):
status_content = self.driver.find_element_by_tag_name('vt-status')
dropdown = status_content.find_element_by_id(group)
return dropdown.find_element_by_tag_name('label').text
def _change_dropdown_option(self, dropdown_id, dropdown_value):
status_content = self.driver.find_element_by_tag_name('vt-status')
dropdown = status_content.find_element_by_id(dropdown_id)
dropdown.click()
options = dropdown.find_elements_by_tag_name('li')
for op in options:
if op.text == dropdown_value:
logging.info('dropdown %s: option %s clicked', dropdown_id, op.text)
op.click()
break
def _check_dropdowns(self, keyspaces, selected_keyspace, cells, selected_cell,
types, selected_type, metrics, selected_metric):
"""Checking that all dropdowns have the correct options and selection."""
keyspace_options = self._get_dropdown_options('keyspace')
keyspace_selected = self._get_dropdown_selection('keyspace')
logging.info('Keyspace options: %s Keyspace selected: %s',
', '.join(keyspace_options), keyspace_selected)
self.assertListEqual(keyspaces, keyspace_options)
self.assertEqual(selected_keyspace, keyspace_selected)
cell_options = self._get_dropdown_options('cell')
cell_selected = self._get_dropdown_selection('cell')
logging.info('Cell options: %s Cell Selected: %s',
', '.join(cell_options), cell_selected)
self.assertListEqual(cells, cell_options)
self.assertEqual(selected_cell, cell_selected)
type_options = self._get_dropdown_options('type')
type_selected = self._get_dropdown_selection('type')
logging.info('Type options: %s Type Selected: %s',
', '.join(cell_options), cell_selected)
self.assertListEqual(types, type_options)
self.assertEqual(selected_type, type_selected)
metric_options = self._get_dropdown_options('metric')
metric_selected = self._get_dropdown_selection('metric')
logging.info('metric options: %s metric Selected: %s',
', '.join(metric_options), metric_selected)
self.assertListEqual(metrics, metric_options)
self.assertEqual(selected_metric, metric_selected)
def _check_heatmaps(self, selected_keyspace):
"""Checking that the view has the correct number of heatmaps drawn."""
status_content = self.driver.find_element_by_tag_name('vt-status')
keyspaces = status_content.find_elements_by_tag_name('vt-heatmap')
logging.info('Number of keyspaces found: %d', len(keyspaces))
if selected_keyspace == 'all':
available_keyspaces = self._get_dropdown_options('keyspace')
self.assertEqual(len(keyspaces), len(available_keyspaces)-1)
for ks in keyspaces:
heading = ks.find_element_by_id('keyspaceName')
logging.info('Keyspace name: %s', heading.text)
try:
ks.find_element_by_id(heading.text)
except NoSuchElementException:
self.fail('Cannot get keyspace')
self.assertIn(heading.text, available_keyspaces)
else:
self.assertEquals(len(keyspaces), 1)
heading = keyspaces[0].find_element_by_id('keyspaceName')
logging.info('Keyspace name: %s', heading.text)
try:
keyspaces[0].find_element_by_id(heading.text)
except NoSuchElementException:
self.fail('Cannot get keyspace')
self.assertEquals(heading.text, selected_keyspace)
def _check_new_view(
self, keyspaces, selected_keyspace, cells, selected_cell, types,
selected_type, metrics, selected_metric):
"""Checking the dropdowns and heatmaps for each newly routed view."""
logging.info('Testing realtime stats view')
self._check_dropdowns(keyspaces, selected_keyspace, cells, selected_cell,
types, selected_type, metrics, selected_metric)
self._check_heatmaps(selected_keyspace)
def test_realtime_stats(self):
logging.info('Testing realtime stats view')
# Navigate to the status page from initial app.
# TODO(thompsonja): Fix this once direct navigation works (after adding
# support for web-dir2 flag)
self.driver.get(self.vtctld_addr)
status_button = self.driver.find_element_by_partial_link_text('Status')
status_button.click()
wait = WebDriverWait(self.driver, 10)
wait.until(expected_conditions.visibility_of_element_located(
(By.TAG_NAME, 'vt-status')))
test_cases = [
(None, None, 'all', 'all', 'all'),
('type', 'REPLICA', 'all', 'all', 'REPLICA'),
('cell', 'test2', 'all', 'test2', 'REPLICA'),
('keyspace', 'test_keyspace', 'test_keyspace', 'test2', 'REPLICA'),
('cell', 'all', 'test_keyspace', 'all', 'REPLICA'),
('type', 'all', 'test_keyspace', 'all', 'all'),
('cell', 'test2', 'test_keyspace', 'test2', 'all'),
('keyspace', 'all', 'all', 'test2', 'all'),
]
for (dropdown_id, dropdown_val, keyspace, cell, tablet_type) in test_cases:
logging.info('Routing to new %s-%s-%s view', keyspace, cell, tablet_type)
if dropdown_id and dropdown_val:
self._change_dropdown_option(dropdown_id, dropdown_val)
tablet_type_options = ['all', 'MASTER', 'REPLICA', 'RDONLY']
if cell == 'test2':
tablet_type_options = ['all', 'REPLICA', 'RDONLY']
self._check_new_view(keyspaces=['all', 'test_keyspace', 'test_keyspace2'],
selected_keyspace=keyspace,
cells=['all', 'test', 'test2'],
selected_cell=cell,
types=tablet_type_options,
selected_type=tablet_type,
metrics=['lag', 'qps', 'health'],
selected_metric='health'
)
def add_test_options(parser):
parser.add_option(
'--no-xvfb', action='store_false', dest='xvfb', default=True,
help='Use local DISPLAY instead of headless Xvfb mode.')
if __name__ == '__main__':
utils.main(test_options=add_test_options)
| bsd-3-clause | -6,158,431,562,120,354,000 | 39.767932 | 80 | 0.65297 | false |
liampauling/flumine | flumine/strategy/runnercontext.py | 1 | 1835 | import logging
import datetime
from typing import Optional
logger = logging.getLogger(__name__)
class RunnerContext:
"""Runner context at strategy level"""
def __init__(self, selection_id: int):
self.selection_id = selection_id
self.invested = False
self.datetime_last_placed = None
self.datetime_last_reset = None
self.trades = []
self.live_trades = []
def place(self, trade_id) -> None:
self.invested = True
self.datetime_last_placed = datetime.datetime.utcnow()
if trade_id not in self.trades:
self.trades.append(trade_id)
if trade_id not in self.live_trades:
self.live_trades.append(trade_id)
def reset(self, trade_id) -> None:
self.datetime_last_reset = datetime.datetime.utcnow()
try:
self.live_trades.remove(trade_id)
except ValueError:
logger.warning(
"Trade '%s' not present in RunnerContext live_trades on reset"
% trade_id
)
@property
def executable_orders(self) -> bool:
if self.live_trades:
return True
else:
return False
@property
def trade_count(self) -> int:
return len(self.trades)
@property
def live_trade_count(self) -> int:
return len(self.live_trades)
@property
def placed_elapsed_seconds(self) -> Optional[float]:
if self.datetime_last_placed:
return (
datetime.datetime.utcnow() - self.datetime_last_placed
).total_seconds()
@property
def reset_elapsed_seconds(self) -> Optional[float]:
if self.datetime_last_reset:
return (
datetime.datetime.utcnow() - self.datetime_last_reset
).total_seconds()
| mit | 4,729,211,881,912,501,000 | 27.671875 | 78 | 0.585286 | false |
katadh/ngdl | ngdl.py | 1 | 9612 | import re
import nltk
import ngdl_classes
import global_vars
import ngdl_parse
import ngdl_write
def start_dialog(output_file="test.txt"):
if not global_vars.initialized:
global_vars.init()
else:
reset_global_vars()
output = open(output_file, "w")
print "Welcome to the natural language game creation program for general game playing!"
#print "First we'll work on defining the game environment"
board_size_dialog()
player_num_dialog()
game_pieces_dialog()
player_move_dialog()
goal_dialog()
terminal_dialog()
ngdl_write.write_gdl_file(output)
output.close()
def reset_global_vars():
global_vars.write_queue = [["noop", []], ["goals", []], ["terminal", []], ["distinct_cells", []], ["successors", [50]]]
global_vars.game = ngdl_classes.Game()
def board_size_dialog():
in_board_size = raw_input("What size would you like your board to be?: ")
valid_input = re.search("([0-9]+)\s?(by|x|X)\s?([0-9]+)", in_board_size)
while not valid_input:
print "Sorry, I can't understand that input yet, can you try again?"
in_board_size = raw_input("What size would you like your game to be?: ")
valid_input = re.search("([0-9]+)\s?(by|x|X)\s?([0-9]+)", in_board_size)
board_size = (valid_input.group(1), valid_input.group(3))
#confirmation = raw_input("To confirm, there will be " + board_size[0] + " columns and " + board_size[1] + " rows?: ")
global_vars.game.board = ngdl_classes.Board((int(board_size[0]), int(board_size[1])))
global_vars.write_queue.append(["board" , []])
def player_num_dialog():
in_player_num = raw_input("How many players does your game have?: ")
valid_input = re.search("[0-9]+", in_player_num)
while not valid_input:
print "Sorry, I can't understand that input yet, can you try again?"
in_player_num = raw_input("How many players does your game have?: ")
valid_input = re.search("[0-9]+", in_player_num)
num_players = int(valid_input.group())
for p in range(1,num_players+1):
global_vars.game.players.append(ngdl_classes.Player("player" + str(p)))
global_vars.write_queue.append(["players", []])
def game_pieces_dialog():
for player in global_vars.game.players:
in_piece_names = raw_input("What types of pieces does " + player.name + " have?: ")
pieces = re.findall("([0-9]*)\s|^([^\W\d]+)", in_piece_names)
for p in pieces:
global_vars.game.pieces[p[1]] = ngdl_classes.Piece(p[1])
player.pieces.append(p[1])
on_board_response = raw_input("Do any of " + player.name + "'s pieces start on the board?: ")
on_board_response = on_board_response.lower()
if not re.match("[no|n]", on_board_response):
for p in pieces:
if p[0] == "" or int(p[0]) > 1:
p_positions = raw_input("What are the starting positions <col, row> of the " +
p[1] + " that start on the board? (enter to skip): ")
else:
p_positions = raw_input("What is the starting position <col, row> of the " +
p[1] + " if it starts on the board? (enter to skip): ")
positions = re.findall("([0-9]+),\s?([0-9]+)", p_positions)
if positions:
for pos in positions:
global_vars.game.board.starting_positions[(int(pos[0]), int(pos[1]))] = player.name + " " + piece.name
def player_move_dialog():
move_conditions = raw_input("What can a player do on their turn?: ")
parse_trees = ngdl_parse.parse(move_conditions, 2)
nltk_tree = parse_trees[0]
tree = translate_tree(nltk_tree)
conditions = process_condition(tree)
action = tree.find_closest_node("ACTION")
while action.children:
index = [child.name for child in action.children].index("ACTION")
action = action[index]
if action.value == "drop":
drop_response = raw_input("By 'drop', do you mean dropping a piece like in Connect-4, or placing a piece like in Shogi?: ")
drop_response.lower()
if re.match("[connect\-4|drop]", drop_response):
global_vars.write_queue.append(["drop_occupant_conditions", [[conditions]]])
global_vars.write_queue.append(["perpetuate_untouched_cells", [["drop"]]])
else:
global_vars.write_queue.append(["place_occupant_conditions", [[conditions]]])
global_vars.write_queue.append(["perpetuate_untouched_cells", [["place"]]])
elif action.value in ["place", "mark"]:
global_vars.write_queue.append(["place_occupant_conditions", [[conditions]]])
global_vars.write_queue.append(["perpetuate_untouched_cells", [["place"]]])
#def piece_move_dialog():
def goal_dialog():
win_conditions = raw_input("How does a player win?: ")
parse_trees = ngdl_parse.parse(win_conditions, 1)
nltk_tree = parse_trees[0]
tree = translate_tree(nltk_tree)
#result = tree.find_closest_node("RESULT")
conditions_tree = tree.find_closest_node("COND")
conditions = process_condition(conditions_tree)
global_vars.write_queue.append(["win_conditions", [[conditions], ""]])
def terminal_dialog():
game_end_conditions = raw_input("Aside from a player winning, how does the game end?: ")
parse_trees = ngdl_parse.parse(game_end_conditions, 1)
nltk_tree = parse_trees[0]
tree = translate_tree(nltk_tree)
conditions_tree = tree.find_closest_node("COND")
conditions = process_condition(conditions_tree)
global_vars.write_queue.append(["game_end_conditions", [[conditions]]])
def process_result(result):
return
def process_conditions(conds):
conditions = []
if "OR" in [child.name for child in conds.children]:
conditions.append("OR")
for child in conds.children:
if child.name == "COND":
conditions.append(process_condition(child))
elif "AND" in [child.name for child in conds.children]:
conditions.append("AND")
for child in conds.children:
if child.name == "COND":
conditions.append(process_condition(child))
else:
conditions.append("COND")
conditions.append(process_condition(conds))
return conditions
def process_condition(cond_node):
for leaf in cond_node.leaves():
if leaf.value in cond_dictionary:
cond_definition = cond_dictionary[leaf.value]
slot_values = []
for slot in cond_definition[0]:
slot_node = leaf.find_closest_node(slot[0])
if not slot_node:
if len(slot) == 2:
slot_values.append(slot[1])
else:
print "Slot fill error1!"
elif cond_node not in slot_node.ancestors():
if len(slot) == 2:
slot_values.append(slot[1])
else:
print "Slot fill error2!"
elif slot_node.name == "PLAYER":
slot_values.append(process_player(slot_node))
elif slot_node.name == "BOARD_PART":
slot_values.append(process_board_part(slot_node))
elif slot_node.name == "PIECE":
slot_values.append(process_piece(slot_node))
else:
slot_values.append(slot_node.value)
if cond_definition[-1]:
global_vars.write_queue.append([cond_definition[2], slot_values])
else:
global_vars.write_queue.append([cond_definition[2], []])
return cond_definition[1].format(*slot_values)
def process_player(player_node):
return "?player"
def process_board_part(board_part_node):
square_equivalents = ["cell"]
board_part = board_part_node
while board_part.children:
index = [child.name for child in board_part.children].index("BOARD_PART")
board_part = board_part[index]
if board_part.value in square_equivalents:
return "square"
else:
return board_part.value
def process_piece(piece_node):
piece = piece_node
while piece.children:
index = [child.name for child in piece.children].index("PIECE")
piece = piece[index]
if piece.value == "piece":
return "?piece"
else:
return piece.value
def translate_tree(nltk_tree):
if nltk_tree.height() == 2:
tree = ngdl_classes.Tree(nltk_tree.node)
tree.value = nltk_tree[0]
return tree
tree = ngdl_classes.Tree(nltk_tree.node)
for subtree in nltk_tree:
if type(subtree) == str:
tree.value = subtree
else:
tree.children.append(translate_tree(subtree))
for subtree in tree.children:
subtree.parent = tree
return tree
cond_dictionary = {"empty": [[["BOARD_PART"], ["NUM", "?col"], ["NUM", "?row"]], "(empty {0} {1} {2})", "board_part_empty", False],
"open": [[["BOARD_PART"], ["NUM", "?col"], ["NUM", "?row"]], "(open {0} {1} {2})", "board_part_open", False],
"full": [[["BOARD_PART"], ["NUM", "?col"], ["NUM", "?row"]], "(full {0} {1} {2})", "board_part_full", False],
"in-a-row": [[["NUM"], ["PLAYER", "?player"], ["PIECE", "?piece"]], "({0}_in_a_row {1} {2})", "x_in_a_row", True]
}
| gpl-3.0 | 578,165,373,975,564,700 | 38.073171 | 139 | 0.577507 | false |
Dicotomix/DicotomixNewSrv | server.py | 1 | 12287 | import asyncio
import struct
import dictionary
import datetime
import tests
from collections import *
from os import listdir
from os.path import isfile, join
from enum import Enum
from dicotomix import Dicotomix, Direction, NotFoundException, OrderException
import unidecode
import sys
import numpy as np
ENABLE_TESTS = False
ENABLE_NGRAMS_LETTER = True
ENABLE_ELAG = False
grams = {}
spelling_buffer = []
default_letters = []
def _boundPrefix(left, right):
k = 0
for i in range(min(len(left),len(right))):
if left[i] != right[i]:
break
k += 1
return k
class _StateID(Enum):
HEADER = 0
LEN = 1
STR = 2
class _NetworkState:
def __init__(self):
self.header = None
self.len = None
self.str = None
def state(self):
if self.header == None:
return _StateID.HEADER
elif self.len == None:
return _StateID.LEN
else:
return _StateID.STR
DATA_PATH = "data/"
class Server(asyncio.Protocol):
def __init__(self):
self.dicotomix = None
self.words = None
self.buffer = []
self.state = _NetworkState()
self.spelling = False
self.users = []
self.login = None
self.logFile = None
def _log(self, header, message):
if self.logFile == None:
return
self.logFile.write('{:%Y-%m-%d %H:%M:%S}|{}|{}\n'.format(
datetime.datetime.now(),
header,
message
))
def connection_made(self, transport):
self.transport = transport
self.address = transport.get_extra_info('peername')
print('Connection accepted: {}'.format(*self.address))
def data_received(self, data):
self.buffer += data
while self.consume_buffer():
pass
def consume_buffer(self):
if self.state.state() == _StateID.HEADER and len(self.buffer) >= 1:
self.state.header = self.buffer[0]
self._log('NET', 'header:{}'.format(self.state.header))
return True
elif self.state.state() == _StateID.LEN and len(self.buffer) >= 3:
self.state.len = struct.unpack('>h', bytes(self.buffer[1 : 3]))[0]
self._log('NET', 'len:{}'.format(self.state.len))
return True
elif self.state.state() == _StateID.STR and len(self.buffer) >= 3 + self.state.len:
self.state.str = bytes(self.buffer[3 : 3 + self.state.len]).decode('utf-8')
self._log('NET', 'str:{}'.format(self.state.str))
self.process()
self.buffer = self.buffer[3 + self.state.len : ]
self.state = _NetworkState()
return True
return False
def process(self):
global spelling_buffer, grams, default_letters
left = None
word = None
right = None
try:
if self.state.header == 1:
self._log('DIC', 'restart')
left, word, right = self.dicotomix.nextWord(Direction.START, self.spelling)
print("ICI: ",len(self.dicotomix._words))
elif self.state.header == 2:
self._log('DIC', 'go_left')
left, word, right = self.dicotomix.nextWord(Direction.LEFT, self.spelling)
elif self.state.header == 3:
self._log('DIC', 'go_right')
left, word, right = self.dicotomix.nextWord(Direction.RIGHT, self.spelling)
elif self.state.header == 4:
self._log('DIC', 'discard')
left, word, right = self.dicotomix.discard()
elif self.state.header == 5: # spelling mode
self.dicotomix.toggleSpelling()
self.spelling = not self.spelling
spelling_buffer = []
if self.spelling:
default_letters = self.dicotomix._words
self._log('DIC', 'start_spelling')
else:
self.dicotomix._letters = default_letters[:]
self._EPSILON2 = self._FIRST_EPSILON2
self._log('DIC', 'stop_selling')
return
elif self.state.header == 6: # send users list
onlyfiles = [f for f in listdir(DATA_PATH) if isfile(join(DATA_PATH, f))]
for f in onlyfiles:
name, ext = f.split('.')
if ext == 'data':
self.users.append(name)
self.users.append("[new]")
data = '\n'.join(self.users).encode('utf8')
self.transport.write(struct.pack('>h', len(data)))
self.transport.write(struct.pack('>h', 0))
self.transport.write(data)
return
elif self.state.header == 7: # get user name
if self.login != None:
return
if self.state.str not in self.users:
print('Create user ' + self.state.str)
open(DATA_PATH + self.state.str + '.data', 'a').close()
addenda = ''
if ENABLE_ELAG == True:
addenda = '_elag'
self.login = self.state.str
words, letters = dictionary.loadDictionary2(
DATA_PATH + 'new_lexique'+addenda+'.csv',
DATA_PATH + self.login + '.data'
)
self.words = words
self.logFile = open(DATA_PATH + self.login + '.log', 'a')
self._log('DIC', 'connected:{}'.format(self.login))
# extract (cumulative frequency, word) from the whole dictionary
feed_words = dictionary.computeFeed(words)
feed_letters = dictionary.computeFeed(letters)
#for w in feed_words[:100]:
#print(w)
self.dicotomix = Dicotomix(feed_words, feed_letters)
if ENABLE_TESTS:
tests.testAll(Dicotomix(feed_words), feed_words, self.words)
if ENABLE_NGRAMS_LETTER:
grams = tests.ngram_letter(Dicotomix(feed_words), feed_words, self.words)
return
elif self.state.header == 8: # custom word
if self.spelling or len(self.state.str) == 0:
return
self._log('DIC', 'add_word:{}'.format(self.state.str))
freq = 1000.
normalized = dictionary.normalize(self.state.str)
add = False
if normalized not in self.words:
self.words[normalized] = [freq, [self.state.str]]
add = True
elif self.state.str not in self.words[normalized][1]:
self.words[normalized][0] += freq
self.words[normalized][1].append(self.state.str)
add = True
if add:
file = open(DATA_PATH + self.login + '.data', 'a')
file.write('{}|{}|{}\n'.format(
self.state.str,
normalized,
freq
))
file.close()
self.words = OrderedDict(sorted(
self.words.items(),
key = lambda x: x[0]
))
feed_words = dictionary.computeFeed(self.words)
self.dicotomix.reinit(feed_words)
else:
self._log('DIC', 'already_exists')
return
elif self.state.header == 9: #validate letter in spelling mode
spelling_buffer.append(self.state.str)
print(spelling_buffer)
H = 0.0
for (i,w) in enumerate(self.dicotomix._words[1:]):
print(w[1],self.dicotomix._wordLength(i))
H += self.dicotomix._wordLength(i)*np.log(self.dicotomix._wordLength(i))
H /= -np.log(26)
print("Old H: ", H)
the_end = ''.join(spelling_buffer[-4:])
if the_end in grams:
our_distro = grams[the_end]
default_val = 1
print(our_distro)
print(default_val)
new_letters = [[0.0,'a']]
for f,l in self.dicotomix._words[1:]:
if l in our_distro:
new_letters.append([our_distro[l]*1000,l])
else:
new_letters.append([default_val,l])
to_print = new_letters[:]
to_print.sort(reverse=True, key=lambda x: x[0])
for a in to_print:
print(a[1], a[0])
the_sum = 0.0
for i in range(len(new_letters)):
the_sum += new_letters[i][0]
new_letters[i][0] = the_sum
for i in range(len(new_letters)):
new_letters[i][0] /= the_sum
for i in range(len(new_letters)):
new_letters[i] = (new_letters[i][0],new_letters[i][1])
#for f,l in new_letters:
#print(f,l)
self.dicotomix._words = new_letters[:]
H = 0.0
for (i,w) in enumerate(self.dicotomix._words[1:]):
print(w[1],self.dicotomix._wordLength(i))
H += self.dicotomix._wordLength(i)*np.log(self.dicotomix._wordLength(i))
H /= -np.log(26)
self.dicotomix._EPSILON2 = 1-H
print("New H: ", H)
else:
self.dicotomix._words = default_letters[:]
return
except NotFoundException:
self._log('DIC', 'not_found_exception')
if self.spelling:
self._log('DIC', 'auto_restart')
left, word, right = self.dicotomix.nextWord(Direction.START)
else:
self._log('DIC', 'auto_spelling')
dummy = 'a'.encode('utf8')
self.transport.write(struct.pack('>h', len(dummy)))
self.transport.write(struct.pack('>h', -1)) # ask UI to start spelling mode
self.transport.write(dummy)
return
except OrderException:
self._log('NET', 'order_exception')
return
except AttributeError:
self._log('NET', 'attribute_error')
return
self._log('DIC', 'words:{}:{}:{}'.format(left, word, right))
prefix = _boundPrefix(left, right)
self._log('DIC', 'prefix:{}'.format(prefix))
if not self.spelling:
if word != 'a' and word != '.':
words = filter(lambda x: len(x) > 1, self.words[word][1])
else:
words = self.words[word][1]
else:
words = filter(lambda x: x[0] != '[', self.words[word][1])
if self.spelling:
print(spelling_buffer)
to_send = list(words)
canonique = ''
for k in to_send:
if len(k) != 1:
continue
canonique = unidecode.unidecode(k)
break
i_can = 0
for (i,k) in enumerate(to_send):
if k == canonique:
i_can = i
to_send[0],to_send[i_can] = to_send[i_can],to_send[0]
data = '\n'.join(to_send)
data = data.encode('utf8')
self.transport.write(struct.pack('>h', len(data)))
self.transport.write(struct.pack('>h', prefix))
self.transport.write(data)
def connection_lost(self, error):
if self.logFile != None:
self._log('NET', 'disconnected:{}'.format(self.login))
self.logFile.close()
if error:
print('ERROR: {}'.format(error))
else:
print('Closing connection')
super().connection_lost(error)
exit(0)
| mit | -5,168,988,374,299,193,000 | 35.032258 | 96 | 0.482787 | false |
geodynamics/citcoms | visual/Mayavi2/original_plugins/Citcoms_Hdf2Vtk.py | 1 | 22127 | #!/usr/bin/env python
# Script to generate TVTK files from CitcomS hdf files
# author: Martin Weier
# Copyright (C) 2006 California Institue of Technology
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#import scipy
import sys
from datetime import datetime
from getopt import getopt, GetoptError
from pprint import *
from math import *
import tables #For HDF support
import numpy
import pyvtk
import sys
# defaults
path = "./example0.h5"
vtk_path = "./vtk_output"
vtkfile = "%s.%d.vtk"
initial = 0
timesteps= None
create_topo = False
create_bottom = False
create_surface = False
create_ascii = False
nx = None
ny = None
nz = None
nx_redu=None
ny_redu=None
nz_redu=None
el_nx_redu = None
el_ny_redu = None
el_nz_redu = None
radius_inner = None
radius_outer = None
nproc_surf = None
#Filehandler to the HDF file
f = None
#####################
polygons3d = [] # arrays containing connectivity information
polygons2d = []
counter=0 #Counts iterations of citcom2vtk
def print_help():
print "Program to convert CitcomS HDF to Vtk files.\n"
print "-p, --path [path to hdf] \n\t Specify input file."
print "-o, --output [output filename] \n\t Specify the path to the folder for output files."
print ("-i, --initial [initial timestep] \n\t Specify initial timestep to export. If not \n \
\t specified script starts exporting from timestep 0.")
print "-t, --timestep [max timestep] \n\t Specify to which timestep you want to export. If not\n \
\t specified export all all timestep starting from intial timestep."
print "-x, --nx_reduce [nx] \n\t Set new nx to reduce output grid."
print "-y, --ny_reduce [ny] \n\t Set new ny to reduce output grid."
print "-z, --nz_reduce [nz] \n\t Set new nz to reduce output grid."
print "-b, --bottom \n\t Set to export Bottom information to Vtk file."
print "-s, --surface \n\t Set to export Surface information to Vtk file."
print "-c, --createtopo \n\t Set to create topography information in bottom and surface Vtk file."
print "-a, --ascii \n\t Create Vtk ASCII encoded files instead if binary."
print "-h, --help, -? \n\t Print this help."
#Iterator for CitcomDataRepresentation(yxz) to VTK(xyz)
def vtk_iter(nx,ny,nz):
for i in xrange(nx):
for j in xrange(ny):
for k in xrange(nz):
yield k + i * nz + j * nz * nx
#Reduces the CitcomS grid
def reduce_iter(n,nredu):
i=0
n_f=float(n)
nredu_f=float(nredu)
fl=(n_f-1)/nredu_f
redu = 0
for i in xrange(nredu+1):
yield int(round(redu))
redu = redu + fl
#Transform Vectors in Spherical to Cartesian Coordinates 2d
#def velocity2cart2d(vel_colat, vel_lon,x , y):
# x1 = vel_colat*cos(x)*cos(y)-vel_lon*sin(y)
# y1 = vel_colat*cos(x)*sin(y)+vel_lon*cos(y)
# z1 = -vel_colat*sin(x)
# return x1,y1,z1
#Converts Spherical to Carthesian Coordinates 2d
#def RTF2XYZ2d(vel_colat, vel_lon):
# x1 = sin(vel_colat)*cos(vel_lon)
# y1 = sin(vel_colat)*sin(vel_lon)
# z1 = cos(vel_colat)
# return x1,y1,z1
#Transform Vectors in Spherical to Cartesian Coordinates
def velocity2cart(vel_colat,vel_long,r, x, y, z):
x1 = r*sin(x)*cos(y)+vel_colat*cos(x)*cos(y)-vel_long*sin(y)
y1 = r*sin(x)*sin(y)+vel_colat*cos(x)*sin(y)+vel_long*cos(y)
z1 = r*cos(x)-vel_colat*sin(x)
return x1, y1, z1
#Converts Spherical to Cartesian Coordinates
def RTF2XYZ(thet, phi, r):
x = r * sin(thet) * cos(phi)
y = r * sin(thet) * sin(phi)
z = r * cos(thet)
return x, y, z
#Reads Citcom Files and creates a VTK File
def citcom2vtk(t):
print "Timestep:",t
benchmarkstr = ""
#Assign create_bottom and create_surface to bottom and surface
#to make them valid in methods namespace
bottom = create_bottom
surface = create_surface
ordered_points = [] #reset Sequences for points
ordered_temperature = []
ordered_velocity = []
ordered_visc = []
#Surface and Bottom Points
#Initialize empty sequences
surf_vec = []
botm_vec = []
surf_topo = []
surf_hflux = []
botm_topo = []
botm_hflux = []
surf_points = []
botm_points = []
for capnr in xrange(nproc_surf):
###Benchmark Point 1 Start##
#start = datetime.now()
############################
print "Processing cap",capnr+1,"of",nproc_surf
cap = f.root._f_getChild("cap%02d" % capnr)
#Information from hdf
#This information needs to be read only once
hdf_coords = cap.coord[:]
hdf_velocity = cap.velocity[t]
hdf_temperature = cap.temperature[t]
hdf_viscosity = cap.viscosity[t]
###Benchmark Point 1 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf," % (delta.seconds + float(delta.microseconds)/1e6)
###Benchmark Point 2 Start##
#start = datetime.now()
############################
#Create Iterator to change data representation
nx_redu_iter = reduce_iter(nx,nx_redu)
ny_redu_iter = reduce_iter(ny,ny_redu)
nz_redu_iter = reduce_iter(nz,nz_redu)
#vtk_i = vtk_iter(el_nx_redu,el_ny_redu,el_nz_redu)
# read citcom data - zxy (z fastest)
for j in xrange(el_ny_redu):
j_redu = ny_redu_iter.next()
nx_redu_iter = reduce_iter(nx,nx_redu)
for i in xrange(el_nx_redu):
i_redu = nx_redu_iter.next()
nz_redu_iter = reduce_iter(nz,nz_redu)
for k in xrange(el_nz_redu):
k_redu = nz_redu_iter.next()
colat, lon, r = map(float,hdf_coords[i_redu,j_redu,k_redu])
x_coord, y_coord, z_coord = RTF2XYZ(colat,lon,r)
ordered_points.append((x_coord,y_coord,z_coord))
ordered_temperature.append(float(hdf_temperature[i_redu,j_redu,k_redu]))
ordered_visc.append(float(hdf_viscosity[i_redu,j_redu,k_redu]))
vel_colat, vel_lon , vel_r = map(float,hdf_velocity[i_redu,j_redu,k_redu])
x_velo, y_velo, z_velo = velocity2cart(vel_colat,vel_lon,vel_r, colat,lon , r)
ordered_velocity.append((x_velo,y_velo,z_velo))
##Delete Objects for GC
del hdf_coords
del hdf_velocity
del hdf_temperature
del hdf_viscosity
###Benchmark Point 2 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf," % (delta.seconds + float(delta.microseconds)/1e6)
###Benchmark Point 3 Start##
#start = datetime.now()
############################
#Bottom Information from hdf
if bottom == True:
try:
hdf_bottom_coord = cap.botm.coord[:]
hdf_bottom_heatflux = cap.botm.heatflux[t]
hdf_bottom_topography = cap.botm.topography[t]
hdf_bottom_velocity = cap.botm.velocity[t]
except:
print "\tCould not find bottom information in file.\n \
Set create bottom to false"
bottom = False
#Surface Information from hdf
if surface==True:
try:
hdf_surface_coord = cap.surf.coord[:]
hdf_surface_heatflux = cap.surf.heatflux[t]
hdf_surface_topography = cap.surf.topography[t]
hdf_surface_velocity = cap.surf.velocity[t]
except:
print "\tCould not find surface information in file.\n \
Set create surface to false"
surface = False
###Benchmark Point 3 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf," % (delta.seconds + float(delta.microseconds)/1e6)
###Benchmark Point 4 Start##
#start = datetime.now()
############################
#Compute surface/bottom topography mean
if create_topo:
surf_mean=0.0
botm_mean=0.0
if surface:
for i in xrange(nx):
surf_mean += numpy.mean(hdf_surface_topography[i])
surf_mean = surf_mean/ny
if bottom:
for i in xrange(nx):
botm_mean += numpy.mean(hdf_bottom_topography[i])
botm_mean = botm_mean/nx
###Benchmark Point 4 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf," % (delta.seconds + float(delta.microseconds)/1e6)
###Benchmark Point 5 Start##
#start = datetime.now()
############################
#Read Surface and Bottom Data
if bottom==True or surface == True:
for i in xrange(nx):
for j in xrange(ny):
if bottom==True:
#Bottom Coordinates
if create_topo==True:
colat, lon = hdf_bottom_coord[i,j]
x,y,z = RTF2XYZ(colat,lon,radius_inner+float( (hdf_bottom_topography[i,j]-botm_mean)*(10**21)/(6371000**2/10**(-6))/(3300*10)/1000 ))
botm_points.append((x,y,z))
else:
colat, lon = hdf_bottom_coord[i,j]
x,y,z = RTF2XYZ(colat, lon,radius_inner)
botm_points.append((x,y,z))
#Bottom Heatflux
botm_hflux.append(float(hdf_bottom_heatflux[i,j]))
#Bottom Velocity
vel_colat, vel_lon = map(float,hdf_bottom_velocity[i,j])
x,y,z = velocity2cart(vel_colat,vel_lon, radius_inner, colat, lon, radius_inner)
botm_vec.append((x,y,z))
if surface==True:
#Surface Information
if create_topo==True:
colat,lon = hdf_surface_coord[i,j]
#637100 = Earth radius, 33000 = ?
x,y,z = RTF2XYZ(colat,lon,radius_outer+float( (hdf_surface_topography[i,j]-surf_mean)*(10**21)/(6371000**2/10**(-6))/(3300*10)/1000 ))
surf_points.append((x,y,z))
else:
colat, lon = hdf_surface_coord[i,j]
x,y,z = RTF2XYZ(colat, lon,radius_outer)
surf_points.append((x,y,z))
#Surface Heatflux
surf_hflux.append(float(hdf_surface_heatflux[i,j]))
#Surface Velocity
vel_colat, vel_lon = map(float,hdf_surface_velocity[i,j])
x,y,z = velocity2cart(vel_colat,vel_lon, radius_outer, colat, lon, radius_outer)
surf_vec.append((x,y,z))
#del variables for GC
if bottom==True:
del hdf_bottom_coord
del hdf_bottom_heatflux
del hdf_bottom_velocity
if surface==True:
del hdf_surface_coord
del hdf_surface_heatflux
del hdf_surface_velocity
###Benchmark Point 5 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf," % (delta.seconds + float(delta.microseconds)/1e6)
###Benchmark Point 6 Start##
#start = datetime.now()
############################
##################################################################
#Create Connectivity info
if counter==0:
#For 3d Data
i=1 #Counts X Direction
j=1 #Counts Y Direction
k=1 #Counts Z Direction
for n in xrange((el_nx_redu*el_ny_redu*el_nz_redu)-(el_nz_redu*el_nx_redu)):
if (i%el_nz_redu)==0: #X-Values!!!
j+=1 #Count Y-Values
if (j%el_nx_redu)==0:
k+=1 #Count Z-Values
if i%el_nz_redu!=0 and j%el_nx_redu!=0: #Check if Box can be created
#Get Vertnumbers
n0 = n+(capnr*(el_nx_redu*el_ny_redu*el_nz_redu))
n1 = n0+el_nz_redu
n2 = n1+el_nz_redu*el_nx_redu
n3 = n0+el_nz_redu*el_nx_redu
n4 = n0+1
n5 = n4+el_nz_redu
n6 = n5+el_nz_redu*el_nx_redu
n7 = n4+el_nz_redu*el_nx_redu
#Created Polygon Box
polygons3d.append([n0,n1,n2,n3,n4,n5,n6,n7]) #Hexahedron VTK Representation
i+=1
if bottom==True or surface==True:
#Connectivity for 2d-Data
i=1
for n in xrange((nx)*(ny) - ny):
if i%ny!=0 :
n0 = n+(capnr*((nx)*(ny)))
n1 = n0+1
n2 = n0+ny
n3 = n2+1
polygons2d.append([n0,n1,n2,n3])
i+=1
###Benchmark Point 6 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf\n" % (delta.seconds + float(delta.microseconds)/1e6)
#print benchmarkstr
#################################################################
#Write Data to VTK
#benchmarkstr = "\n\nIO:\n"
###Benchmark Point 7 Start##
#start = datetime.now()
############################
print 'Writing data to vtk...'
#Surface Points
if surface==True:
struct_coords = pyvtk.UnstructuredGrid(surf_points, pixel=polygons2d)
#topo_scal = pyvtk.Scalars(surf_topo,'Surface Topography', lookup_table='default')
hflux_scal = pyvtk.Scalars(surf_hflux,'Surface Heatflux',lookup_table='default')
vel_vec = pyvtk.Vectors(surf_vec,'Surface Velocity Vectors')
##
tempdata = pyvtk.PointData(hflux_scal,vel_vec)
data = pyvtk.VtkData(struct_coords, tempdata,'CitcomS Output %s Timestep %s' % ('surface info',t))
if create_ascii:
data.tofile(vtk_path + (vtkfile % ('surface',t)),)
else:
data.tofile(vtk_path + (vtkfile % ('surface',t)),'binary')
print "Written Surface information to file"
###Benchmark Point 7 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf," % (delta.seconds + float(delta.microseconds)/1e6)
###Benchmark Point 8 Start##
#start = datetime.now()
############################
if bottom==True:
#Bottom Points
struct_coords = pyvtk.UnstructuredGrid(botm_points, pixel=polygons2d)
#topo_scal = pyvtk.Scalars(botm_topo,'Bottom Topography','default')
hflux_scal = pyvtk.Scalars(botm_hflux,'Bottom Heatflux','default')
vel_vec = pyvtk.Vectors(botm_vec,'Bottom Velocity Vectors')
##
tempdata = pyvtk.PointData(hflux_scal,vel_vec)
data = pyvtk.VtkData(struct_coords, tempdata, 'CitcomS Output %s Timestep %s' % ('Bottom info',t))
if create_ascii:
data.tofile(vtk_path + (vtkfile % ('bottom',t)))
else:
data.tofile(vtk_path + (vtkfile % ('bottom',t)),'binary')
print "Written Bottom information to file"
###Benchmark Point 8 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf," % (delta.seconds + float(delta.microseconds)/1e6)
###Benchmark Point 9 Start##
#start = datetime.now()
#General Data
struct_coords = pyvtk.UnstructuredGrid(ordered_points,hexahedron=polygons3d)
vel_vec = pyvtk.Vectors(ordered_velocity, 'Velocity Vectors')
temp_scal = pyvtk.Scalars(ordered_temperature,'Temperature Scalars','default')
visc_scal = pyvtk.Scalars(ordered_visc,'Viscosity Scalars','default')
##
tempdata = pyvtk.PointData(temp_scal,visc_scal,vel_vec)
data = pyvtk.VtkData(struct_coords, tempdata, 'CitcomS Output %s Timestep:%d NX:%d NY:%d NZ:%d Radius_Inner:%f' % (path,t,el_nx_redu,el_ny_redu,el_nz_redu,radius_inner))
############################
if create_ascii:
data.tofile(vtk_path + (vtkfile % ('general',t)))
else:
data.tofile(vtk_path + (vtkfile % ('general',t)),'binary')
print "Written general data to file"
###Benchmark Point 9 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf\n" % (delta.seconds + float(delta.microseconds)/1e6)
#print benchmarkstr
#print "\n"
# parse command line parameters
def initialize():
global path
global vtk_path
global initial
global timesteps
global create_topo
global create_bottom
global create_surface
global create_ascii
global nx
global ny
global nz
global nx_redu
global ny_redu
global nz_redu
global el_nx_redu
global el_ny_redu
global el_nz_redu
global radius_inner
global radius_outer
global nproc_surf
global f
try:
opts, args = getopt(sys.argv[1:], "p:o:i:t:x:y:z:bscah?", ['path=','output=','timestep=','x=','y=','z=','bottom','surface','createtopo','ascii', 'help','?'])
except GetoptError, msg:
print "Error: %s" % msg
sys.exit(1)
if len(opts)<=1:
print_help()
sys.exit(0)
for opt,arg in opts:
if opt in ('-p','--path'):
path = arg
if opt in ('-o','--output'):
vtk_path = arg
if opt in ('-i','--initial'):
try:
initial = int(arg)
except ValueError:
print "Initial is not a number."
sys.exit(1)
if opt in ('-t','--timestep'):
try:
timesteps = int(arg)
except ValueError:
print "Timestep is not a number."
sys.exit(1)
if opt in ('-x','--nx_reduce'):
try:
nx_redu = int(arg)
except ValueError:
print "NX is not a number."
if opt in ('-y','--ny_reduce'):
try:
ny_redu = int(arg)
except ValueError:
print "NY is not a number."
if opt in ('-z','--nz_reduce'):
try:
nz_redu = int(arg)
except ValueError:
print "NZ is not a number."
if opt in ('-b','--bottom'):
create_bottom = True
if opt in ('-s','--surface'):
create_surface = True
if opt in ('-c','--createtopo'):
create_topo = True
if opt in ('-a','--ascii'):
create_ascii = True
if opt in ('-h','--help'):
print_help()
sys.exit(0)
if opt == '-?':
print_help()
sys.exit(0)
f = tables.openFile(path,'r')
nx = int(f.root.input._v_attrs.nodex)
ny = int(f.root.input._v_attrs.nodey)
nz = int(f.root.input._v_attrs.nodez)
#If not defined as argument read from hdf
hdf_timesteps = int(f.root.time.nrows)
if timesteps==None or timesteps>hdf_timesteps:
timesteps = hdf_timesteps
if nx_redu==None:
nx_redu = nx-1
if ny_redu==None:
ny_redu = ny-1
if nz_redu==None:
nz_redu = nz-1
if nx_redu>=nx:
nx_redu=nx-1
if ny_redu>=ny:
ny_redu=ny-1
if nz_redu>=nz:
nz_redu=nz-1
el_nx_redu = nx_redu+1
el_ny_redu = ny_redu+1
el_nz_redu = nz_redu+1
radius_inner = float(f.root.input._v_attrs.radius_inner)
radius_outer = float(f.root.input._v_attrs.radius_outer)
nproc_surf = int(f.root.input._v_attrs.nproc_surf)
###############################################################################
def citcoms_hdf2vtk():
global counter
#Call initialize to get and set input params
initialize()
d1 = datetime.now()
print "Converting Hdf to Vtk"
print "Initial:",initial, "Timesteps:",timesteps
print "NX:",el_nx_redu, "NY:",el_ny_redu, "NZ:", el_nz_redu
print "Create Bottom: ",create_bottom, " Create Surface: ", create_surface
print "Create Topography: ", create_topo
for t in xrange(initial,timesteps):
start = datetime.now()
citcom2vtk(t)
counter+=1
delta = datetime.now() - start
print "\t%.3lf sec" % (delta.seconds + float(delta.microseconds)/1e6)
d2 = datetime.now()
f.close()
print "Total: %d seconds" % (d2 - d1).seconds
###############################################################################
if __name__ == '__main__':
citcoms_hdf2vtk()
| gpl-2.0 | -704,908,123,187,786,500 | 34.011076 | 173 | 0.519953 | false |
inveniosoftware/invenio-communities | tests/records/collections/test_collections.py | 1 | 3019 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2021 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Community module tests."""
import pytest
from flask import url_for
from invenio_accounts.testutils import login_user_via_session
@pytest.mark.skip()
def test_simple_flow(db, es_clear, community, accepted_community_record, client, community_owner):
"""Test basic operations on collections."""
comid, community = community
collections_list_url = url_for(
'invenio_communities_collections.collections_list',
pid_value=comid.pid_value)
# list
resp = client.get(collections_list_url)
assert resp.status_code == 200
assert resp.json == {}
# create
collection_data = {
'id': 'test',
'title': 'Test collection',
'description': 'Test collection description',
}
resp = client.post(collections_list_url, json=collection_data)
assert resp.status_code == 401
login_user_via_session(client, user=community_owner)
resp = client.post(collections_list_url, json=collection_data)
assert resp.status_code == 201
created_resp_json = resp.json
collection_item_url = created_resp_json['links']['self']
assert created_resp_json == {
'title': collection_data['title'],
'description': collection_data['description'],
'links': {
'self': '/communities/{}/collections/test'.format(comid.pid_value)
},
}
# read
resp = client.get(collection_item_url)
assert resp.status_code == 200
assert resp.json == created_resp_json
# update
resp = client.put(collection_item_url, json={
'title': 'New test title',
# NOTE: removes description
})
assert resp.status_code == 200
assert resp.json == {
'title': 'New test title',
'description': None,
'links': {'self': collection_item_url},
}
# get record collections
community_record_url = url_for(
'invenio_communities_collections.records',
pid_value=comid.pid_value,
record_pid=accepted_community_record.record.pid.pid_value
)
resp = client.get(community_record_url)
assert resp.status_code == 200
assert '_collections' not in resp.json
# update record collections
resp = client.put(community_record_url, json={
'collections': [{'id': 'test'}]
})
assert resp.status_code == 200
assert resp.json['_collections'] == [{'id': 'test'}]
# delete
resp = client.delete(collection_item_url)
assert resp.status_code == 204
resp = client.get(collection_item_url)
assert resp.status_code == 404
@pytest.mark.skip()
def test_permissions(db, es_clear, community, accepted_community_record, client, community_owner, authenticated_user, record_owner):
"""Test collection permissions."""
# TODO: write tests for permissions
pass
| mit | 7,395,856,467,491,684,000 | 29.806122 | 132 | 0.650547 | false |
jorgb/airs | gui/images/progress_2.py | 1 | 5705 | #----------------------------------------------------------------------
# This file was generated by D:\personal\src\airs\gui\images\make_images.py
#
from wx import ImageFromStream, BitmapFromImage, EmptyIcon
import cStringIO, zlib
def getData():
return zlib.decompress(
'x\xda\x01\xbe\x06A\xf9\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\
\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\
\x08d\x88\x00\x00\x06uIDATX\x85\xc5\x97\xddo[w\x19\xc7?\xe7\xf8\xe4\xc5\x8e\
\x9d\xd4\xb1-\xe7\xa5M_\x92&\xcd\xb0*\xdc4\x15[RR\xd2 \r\xba\xa6\xdb\x05$\
\xdb?\x00\xbdc\x80V\x86@\x0ciH \xc4\xc4\xd8\r\x82\xbf\x00D/\x80v \xd1TZ_Q\
\xd6&\xf1\xe2\x15\x12\xd2.\xdbl\'Y\x1a\xec\xc6ul\x1f\x9fs\x1e.R\x1fl\xc7M_\
\xb8\xe0\x91\x9e\x0b\x9f\xdf\xe3\xe7\xfb\xfd=o\xbf\xdfO\xb1\xc4\x10\xfe\x8f\
\xa2=\xae\xa1iYO\xec\xdc\xa1\xaa\xff;\x81\xa7\x01~\x12\xd9\x96\xa2a\x9aX\xdb\
\x10\xc8f\xb3\xac\xad\xada\x18F\xd5u\xd3\xb2\x1e\xb9\x81\xaa\x110L\x93d2I"\
\x91 \x99J\xd2\xecm\xa6\xb5\xb5\x15\xaf\xd7\x8b\xe6p`Z\x16\x86ap\xf5\xeaU&\'\
\'\x19\x1d\x1d%\x14\n\xd9!\x7f\x92\xa8m!`\x98&\x0b\x0b\x0b\x9c={\x96\xeb\xd7\
\xae\x93J\xa5\xd8\xb1c\x07\x03\x83\x03\x9c:u\x8a\xfd\xfb\xf7S\xa3id\x0b\x05n\
\xdd\xba\xc5\xc5\x89\x8b\xf4\xf5\xf5\x11\n\x85l`\x87\xaa\x96\x91(\xfd\xbeE,1\
\xa4\xa8\x05S\x97\xc4r\\\xce\xbc\xfe\x9a\xec\xdc\xd5.(\xd8\xda\xb1{\x97\x9cy\
\xfd5I,\xc7%_\xc8I\xbe\x90\x93\x0f\xa2\x11\xf9\xd3\xb9?Jb9.\x05S\xb7\xb5\xe8\
ki%!\xff\xba=/\xf7\xd2)\xd1\x8d\xbc\x94b\x15\xb5\x8c@6\xbf!\xe7\xde\xfd\xb3<\
\xfb\xdc\x17\xca\xc0\x8b\xdaw\xf8\x90\x9c\xff\xcb9\xc9\xe9Y)\x98\xba\xe8F^rz\
V\xf2\x85\\\x19\x81\x82\xa9\xcb\xf2gK\xf2\xc3\x1f\xfd@\xbe>\xf65\xf9\xfd\x1f\
~\'\xe9\xccz\x19\xc1\xa2j\xa5a\xd2u\x9dD<\xce\xea\xeaj\xd5|\xad\xae\xae\x12\
\x8f\xc5(\x14\n8\x1c\x0e\x14E\xc1\xe1pT\xcd\xfd\xd2\xd2\x12\xe7\xcf\x9dgn~\
\x8e\xe6\xe6f\x8e\x1d;F}}\xfd\xf65\xa0\xaa*N\x97\x0bM\xab(\r\x05\x10\xa8\xad\
\xad\xc5\xd3\xd8\xb8e\xbd2\xf7"B0\x18\xe4\xc4\x0b\'\xe8\xfag\x17\xc3\xc7\x87\
q:\x9dU\xed\xb5R\xd6\xb5\xb5\xb5\x84B!\xf6uv\xb2\xf8\xc9"\xb9l~s\xe1\xc1\xac\
\xec\xee\xeef\xcf\x9e=\xa8\xaa\xbam\xc5+\x8a\x82\xdf\xef\xe7\xf4\xe9\xd3\xa4\
R)\xda\xdb\xdbip\xb9\xaa\xdaj\xa5\xccUU\xa5\xbd\xbd\x9dW^y\x99\x8dL\x86[\xff\
\xb8E.\x97\xc5\xe5l\xa0\xb7\xb7\x97\xb1\xf11\x0e\x1c8\x80ZQ\xe5\x95;+\x92\
\x08\x04\x02\x04\x02\x01{\xadZw(\x05S\x97\xe2\x82eYd2\x19\x96\x97\x97\x99\
\x9d\x9de~~\x9eT*E \x10 \x1c\x0e\x13\x0e\x87\xf1z\xbd\xb6\x83\xca(T\x02TJ5{\
\xa5`\xea\x02\x90\xcb\xe5\x98\x99\x99\xe1\xca\x95+\xdc\xbd{\x17\x9f\xcfG8\
\x1c\xa6\xa7\xa7\x07\xb7\xdb\x8d\xc7\xe3A\xd3\xb4\xaa \xa5\xdf\xb6#U\x9c\xaa\
5\x9af\x7f\xd3\x00\x0c\xc3\xe0\xd2\xa5K\xbc\xf3\xabw\x98\x9e\x9e\xe6~\xe6>\
\x1e\xb7\x87\xde\xde^\xde\xfc\xc9\x9b\xf4\xf7\xf7\xa3(JU\xc0jip<\xa8\x91\xa2\
\x8d\x88077\xc7\xf5k\xd7\x008>2\xc2\xae]\xbb\xd0\x1c\x0e(\x98\xba\xdcY\xbc-\
\xdf<\xfd\x8d\xaa\xbd\xff\xeaw\xbe%\xf7\xd2){\xb8<\x8d&\x96\xe3\xf2\xbd\xef\
\x9f\x91\xf6\x9dm\xb2{O\x87\xfc\xec\xe7?\x95\xcf\xee\xaeH\xc1\xd4E\x13\x11\
\xee\xdd\xbb\xc7\xdc\xdc\x9c\xddn\xa52=5M&\x93\xc1\xe5r=u\xee766\xb8s\xfb\
\x0e\xf1x\x02\x80\x8f\xee|d\x1f`\x1alV\xac\xd3\xe9\xdc\x02\x0e\x94\x01\x97\
\x86\xb9\x14\xb8\x18\xf6\x87\x91jnnf\xf0\xe8 \xb1X\x8c\xba\xba:\x06\x8f\x0e\
\xdasAS\x14\x85\xb6\xb66\x06\x06\x07\x88D"6K\x80\x96\x96 #_\x1e\xc1\xeb\xf5>\
\xf2\xa4\xab\x96\xfb\xa2x<\x1e\xc6\xc7\xc7\xe9\xef\xef\x07\xa0\xb3\xb3\x13\
\xb7\xdb\xbd9\x88\x00\x9a\x9a\x9a\x18\x1d\x1d\xe5\xd3O>err\x92\xb5\xb55\xfc~\
?C\xc7\x86\x18\x1d\x1dE+\xa9\xdaj\x91\xa8\xf6\xbb\xb2\xef\xbd^/\x87\x0f\x1f\
\xdebo\xb7\xa1eY\xc4b1\xe6\xe7\xe7I&\x93466\xb2{\xf7n\x82\xc1`Y\x0b>,\xcc\
\x8f+\xa5>\xca\xe6@QD\x84l6K4\x1a\xe5\xc3h\x94\xf4\xfd\xfbtww\xd3\xd7\xd7\
\x87\xcf\xe7\xb3\xc7\xf0\xe3\xf4\xbd\x88`Y\x16\x8a\xa2\xa0>\xe4~\xa8U\xeeBD\
\x88D"\xbc\xfd\xcb\xb7\x99\x99\x99!\x97\xcb\xd1\xd1\xd1\xc1\xf8\xcb\xe3\x8c\
\x8d\x8d\xe1\xf3\xf9\xb6\x84\xb1Z\xeeM\xd3djj\x8a\xe8\xec,]\xdd\xfb\t\x7f>\
\x8c\xdb\xed.#\xe9P\xd5\xcd9Pz!YZI\xc8\xb7\xbf\xfb\xaa\xf8\xfc\xcde\xf3`\xe8\
\xd8\x17\xe5\xc2\xc5\xbf\x89n\xe4\x1f\xd9\xf79=+\xef\xfe\xf5\xbc<7\xf0\xac\
\xb4\xb4\x06\xe5P_X~\xf3\xdb_\x97\xdd\t\x8a\xaa\x96\x86\x0f \x9dN\xf3\xf1\
\xe2\xc7\xdc\xcf\xa4\xcbv\x1a\x8f\xc7YYYADp\xa8\xaa\x9d\xaat:M.\x97+\xb3]__g\
bb\x82\xc8\x07\x11\x96\x97W\x98\x9a\x9afr\xf2}\x12\x89xY\xda\xec.(%\xe1v\xbb\
\t\x04\x02hZ\ry\xa5`\xcf\x06\xbf\xdf\x8f\xcf\xe7CQ\x14\xf2\xbaN$\x12\xe1\xf2\
\xe5\xcb\xa4R)B\xa1\x10CCC\xf6\xc9WSSCSS\x13\xf5u\xf5\xe4\xf5\x1cu\xb5\xf5\
\xa8\xaa\x8a\xc7\xd3X\x86eZ\xd6\xd6\x1a\xf0\xf9|\x9c\x1c=\xc9\xe2\xe2"\xd3\
\xd3\xd3\x98\xa6Ikk+\xcf\x7f\xe5yzzz\x10\x11\x16\x16\x16x\xeb\x17o111A.\x9fc\
\xef\x9e\xbd\xa4\xd7\xd7y\xf1\xa5\x97\xf0z\xbd\xb8\\.N\x9e<It6J,\x16c\xe7\
\xce\x9d\x9cx\xe1\xab\xf6IZZ\x03\xf6}\xa0T\x06\x06\x06\x00\xb8q\xe3\x06\x1b\
\x1b\x1bt\xee\xdb\xc7\x97\x86\x87ikkC\xd7ufff\xb8y\xf3&kk\xff\x06 \x1a\xfd\
\x90\x0b\x17&\x18<z\x14\xaf\xd7\x8b\xaa\xaa\xf4\xf6\xf6\xf2\xc6\x8f\xdf \x16\
\x8b\x11\x0c\x06\xd9\xbbw\xaf}\x93*\x9d\x9cU\xdf\x05\r\r\r\x0c\x0f\x0fs\xe8\
\xd0!D\x84\x86\x86\x06\x9cN\xa7}"Z\x96\x85i\x9a\xff\xb5wo\x1d\xd7\x0e\x87\
\x83\xae\xae.:;;Q\x14e\xf3\xfeXZ\xfd\x0fd\x0b\x81\xa2\x91\xa6i\xf8\xfd~D\xc4\
\x06v\xa8*uuu\x1c<x\x90#G\x8el\x16\xe0\x83\x14\x8c\x8c\x1c\xa7\xa5\xa5e\x8b\
\x9f\xd2\xffV\x82\x03(\xd5^\xc7\x8f\x9an\x86ap\xfd\xef\xd7\xb9\xf4\xde%\x92\
\xc9$\x9f{\xe6\x19\x8e\x8f\x8c\xd0\xd1\xd1\xb1\xedt\xac\xf60\xa9J\xa0\x9aT:5\
\x0c\x83t:\x8di\x9a\xb8\\\xae\xb2\x14U\x82U\xdbyQ\xfe\x03\x08Vc\xb3\xdaJZ\
\xa8\x00\x00\x00\x00IEND\xaeB`\x82ek`#' )
def getBitmap():
return BitmapFromImage(getImage())
def getImage():
stream = cStringIO.StringIO(getData())
return ImageFromStream(stream)
| gpl-2.0 | 4,576,252,422,895,433,000 | 65.117647 | 77 | 0.697108 | false |
fanglinfang/myuw | myuw/logger/session_log.py | 1 | 1051 | from myuw.dao.affiliation import get_base_campus
from myuw.dao.enrollment import get_current_quarter_enrollment
from myuw.dao.gws import is_grad_student, is_undergrad_student
import logging
import json
import hashlib
logger = logging.getLogger('session')
def log_session(netid, session_key, request):
if session_key is None:
session_key = ''
session_hash = hashlib.md5(session_key).hexdigest()
log_entry = {'netid': netid,
'session_key': session_hash,
'class_level': None,
'is_grad': None,
'campus': None}
try:
level = get_current_quarter_enrollment(request).class_level
log_entry['class_level'] = level
is_mobile = request.is_mobile or request.is_tablet
log_entry['is_mobile'] = bool(is_mobile)
except AttributeError:
pass
log_entry['is_grad'] = is_grad_student()
log_entry['is_ugrad'] = is_undergrad_student()
log_entry['campus'] = get_base_campus(request)
logger.info(json.dumps(log_entry))
| apache-2.0 | 1,625,579,248,412,522,000 | 31.84375 | 67 | 0.642245 | false |
shiblon/pytour | 3/tutorials/generators3.py | 1 | 2336 | # vim:tw=50
"""Generators for Refactoring
Now that we know how to make our own generators,
let's do some refactoring to make use of this idea
and clean up the code a bit. We'll start by
splitting out the |clean_lines| function, which
basically just skips blank lines and comments,
stripping unnecessary space.
This notion of converting one iterator into
another is prevalent in Python. As one rather
common example, the |enumerate| builtin converts
an iterable over items into an iterable over
|(index,item)| pairs. You built something similar
earlier.
Generators make refactoring sequence operations
really easy, even operations that need to remember
something about past elements. Without them,
separating functionality like this would be hard
or sometimes even impossible.
Exercises
- Look carefully at "clean_lines" and make sure
you understand how it works.
- Use "enumerate" to get line numbers with the
data, and emit that line number in the
ValueError message. Note that in string
formatting, {0} means "the first argument". You
can put any number in there, so long as it
matches the position of what you pass to
|format|. So, you could use |{2}| for the line
number if you want.
"""
__doc__ = """Refactoring functionality.
Changes: we now clean out comments and blank lines
in a different function, and the error message for
bad dates has the line number in it.
"""
def clean_lines(lines):
for line in lines:
line = line.strip()
if not line or line.startswith('#'):
continue
yield line
def parsed_measurements(lines):
last_date = ""
# TODO:
# Use 'enumerate(clean_lines(lines))' to get
# (number, line) pairs. Use the number in the
# exception message to show on what line the
# error occurred.
for line in clean_lines(lines):
date, measurement = line.split()
if date <= last_date:
raise ValueError("Non-increasing: {0} -> {1}".format(
last_date, date))
last_date = date
yield date, measurement
if __name__ == '__main__':
_assert_equal([('2012-10-10', '5.4'), ('2012-10-11', '5.3')],
list(parsed_measurements(['2012-10-10 5.4',
'2012-10-11 5.3'])))
_assert_raises(ValueError, lambda x: list(parsed_measurements(x)),
['2012-10-10 5.4', '2012-10-09 5.3'])
| apache-2.0 | -6,395,722,228,013,602,000 | 29.736842 | 68 | 0.689212 | false |
anish/buildbot | master/buildbot/test/unit/test_reporters_notifier.py | 1 | 14563 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import copy
import sys
from mock import Mock
from twisted.internet import defer
from twisted.trial import unittest
from buildbot import config
from buildbot.process.results import CANCELLED
from buildbot.process.results import EXCEPTION
from buildbot.process.results import FAILURE
from buildbot.process.results import SUCCESS
from buildbot.process.results import WARNINGS
from buildbot.reporters.notifier import NotifierBase
from buildbot.test.fake import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.util.config import ConfigErrorsMixin
from buildbot.test.util.misc import TestReactorMixin
from buildbot.test.util.notifier import NotifierTestMixin
py_27 = sys.version_info[0] > 2 or (sys.version_info[0] == 2
and sys.version_info[1] >= 7)
class TestMailNotifier(ConfigErrorsMixin, TestReactorMixin,
unittest.TestCase, NotifierTestMixin):
def setUp(self):
self.setUpTestReactor()
self.master = fakemaster.make_master(self, wantData=True, wantDb=True,
wantMq=True)
@defer.inlineCallbacks
def setupNotifier(self, *args, **kwargs):
mn = NotifierBase(*args, **kwargs)
mn.sendMessage = Mock(spec=mn.sendMessage)
mn.sendMessage.return_value = "<message>"
yield mn.setServiceParent(self.master)
yield mn.startService()
return mn
def test_init_enforces_tags_and_builders_are_mutually_exclusive(self):
with self.assertRaises(config.ConfigErrors):
NotifierBase(tags=['fast', 'slow'], builders=['a', 'b'])
def test_init_warns_notifier_mode_all_in_iter(self):
with self.assertRaisesConfigError(
"mode 'all' is not valid in an iterator and must be passed in as a separate string"):
NotifierBase(mode=['all'])
@defer.inlineCallbacks
def test_buildsetComplete_sends_message(self):
_, builds = yield self.setupBuildResults(SUCCESS)
mn = yield self.setupNotifier(buildSetSummary=True,
mode=("failing", "passing", "warnings"),
builders=["Builder1", "Builder2"])
mn.buildMessage = Mock()
yield mn.buildsetComplete('buildset.98.complete',
dict(bsid=98))
mn.buildMessage.assert_called_with(
"whole buildset",
builds, SUCCESS)
self.assertEqual(mn.buildMessage.call_count, 1)
@defer.inlineCallbacks
def test_buildsetComplete_doesnt_send_message(self):
_, builds = yield self.setupBuildResults(SUCCESS)
# disable passing...
mn = yield self.setupNotifier(buildSetSummary=True,
mode=("failing", "warnings"),
builders=["Builder1", "Builder2"])
mn.buildMessage = Mock()
yield mn.buildsetComplete('buildset.98.complete',
dict(bsid=98))
self.assertFalse(mn.buildMessage.called)
@defer.inlineCallbacks
def test_isMessageNeeded_ignores_unspecified_tags(self):
_, builds = yield self.setupBuildResults(SUCCESS)
build = builds[0]
# force tags
build['builder']['tags'] = ['slow']
mn = yield self.setupNotifier(tags=["fast"])
self.assertFalse(mn.isMessageNeeded(build))
@defer.inlineCallbacks
def test_isMessageNeeded_tags(self):
_, builds = yield self.setupBuildResults(SUCCESS)
build = builds[0]
# force tags
build['builder']['tags'] = ['fast']
mn = yield self.setupNotifier(tags=["fast"])
self.assertTrue(mn.isMessageNeeded(build))
@defer.inlineCallbacks
def test_isMessageNeeded_schedulers_sends_mail(self):
_, builds = yield self.setupBuildResults(SUCCESS)
build = builds[0]
# force tags
mn = yield self.setupNotifier(schedulers=['checkin'])
self.assertTrue(mn.isMessageNeeded(build))
@defer.inlineCallbacks
def test_isMessageNeeded_schedulers_doesnt_send_mail(self):
_, builds = yield self.setupBuildResults(SUCCESS)
build = builds[0]
# force tags
mn = yield self.setupNotifier(schedulers=['some-random-scheduler'])
self.assertFalse(mn.isMessageNeeded(build))
@defer.inlineCallbacks
def test_isMessageNeeded_branches_sends_mail(self):
_, builds = yield self.setupBuildResults(SUCCESS)
build = builds[0]
# force tags
mn = yield self.setupNotifier(branches=['master'])
self.assertTrue(mn.isMessageNeeded(build))
@defer.inlineCallbacks
def test_isMessageNeeded_branches_doesnt_send_mail(self):
_, builds = yield self.setupBuildResults(SUCCESS)
build = builds[0]
# force tags
mn = yield self.setupNotifier(branches=['some-random-branch'])
self.assertFalse(mn.isMessageNeeded(build))
@defer.inlineCallbacks
def run_simple_test_sends_message_for_mode(self, mode, result, shouldSend=True):
_, builds = yield self.setupBuildResults(result)
mn = yield self.setupNotifier(mode=mode)
self.assertEqual(mn.isMessageNeeded(builds[0]), shouldSend)
def run_simple_test_ignores_message_for_mode(self, mode, result):
return self.run_simple_test_sends_message_for_mode(mode, result, False)
def test_isMessageNeeded_mode_all_for_success(self):
return self.run_simple_test_sends_message_for_mode("all", SUCCESS)
def test_isMessageNeeded_mode_all_for_failure(self):
return self.run_simple_test_sends_message_for_mode("all", FAILURE)
def test_isMessageNeeded_mode_all_for_warnings(self):
return self.run_simple_test_sends_message_for_mode("all", WARNINGS)
def test_isMessageNeeded_mode_all_for_exception(self):
return self.run_simple_test_sends_message_for_mode("all", EXCEPTION)
def test_isMessageNeeded_mode_all_for_cancelled(self):
return self.run_simple_test_sends_message_for_mode("all", CANCELLED)
def test_isMessageNeeded_mode_failing_for_success(self):
return self.run_simple_test_ignores_message_for_mode("failing", SUCCESS)
def test_isMessageNeeded_mode_failing_for_failure(self):
return self.run_simple_test_sends_message_for_mode("failing", FAILURE)
def test_isMessageNeeded_mode_failing_for_warnings(self):
return self.run_simple_test_ignores_message_for_mode("failing", WARNINGS)
def test_isMessageNeeded_mode_failing_for_exception(self):
return self.run_simple_test_ignores_message_for_mode("failing", EXCEPTION)
def test_isMessageNeeded_mode_exception_for_success(self):
return self.run_simple_test_ignores_message_for_mode("exception", SUCCESS)
def test_isMessageNeeded_mode_exception_for_failure(self):
return self.run_simple_test_ignores_message_for_mode("exception", FAILURE)
def test_isMessageNeeded_mode_exception_for_warnings(self):
return self.run_simple_test_ignores_message_for_mode("exception", WARNINGS)
def test_isMessageNeeded_mode_exception_for_exception(self):
return self.run_simple_test_sends_message_for_mode("exception", EXCEPTION)
def test_isMessageNeeded_mode_warnings_for_success(self):
return self.run_simple_test_ignores_message_for_mode("warnings", SUCCESS)
def test_isMessageNeeded_mode_warnings_for_failure(self):
return self.run_simple_test_sends_message_for_mode("warnings", FAILURE)
def test_isMessageNeeded_mode_warnings_for_warnings(self):
return self.run_simple_test_sends_message_for_mode("warnings", WARNINGS)
def test_isMessageNeeded_mode_warnings_for_exception(self):
return self.run_simple_test_ignores_message_for_mode("warnings", EXCEPTION)
def test_isMessageNeeded_mode_passing_for_success(self):
return self.run_simple_test_sends_message_for_mode("passing", SUCCESS)
def test_isMessageNeeded_mode_passing_for_failure(self):
return self.run_simple_test_ignores_message_for_mode("passing", FAILURE)
def test_isMessageNeeded_mode_passing_for_warnings(self):
return self.run_simple_test_ignores_message_for_mode("passing", WARNINGS)
def test_isMessageNeeded_mode_passing_for_exception(self):
return self.run_simple_test_ignores_message_for_mode("passing", EXCEPTION)
@defer.inlineCallbacks
def run_sends_message_for_problems(self, mode, results1, results2, shouldSend=True):
_, builds = yield self.setupBuildResults(results2)
mn = yield self.setupNotifier(mode=mode)
build = builds[0]
if results1 is not None:
build['prev_build'] = copy.deepcopy(builds[0])
build['prev_build']['results'] = results1
else:
build['prev_build'] = None
self.assertEqual(mn.isMessageNeeded(builds[0]), shouldSend)
def test_isMessageNeeded_mode_problem_sends_on_problem(self):
return self.run_sends_message_for_problems("problem", SUCCESS, FAILURE, True)
def test_isMessageNeeded_mode_problem_ignores_successful_build(self):
return self.run_sends_message_for_problems("problem", SUCCESS, SUCCESS, False)
def test_isMessageNeeded_mode_problem_ignores_two_failed_builds_in_sequence(self):
return self.run_sends_message_for_problems("problem", FAILURE, FAILURE, False)
def test_isMessageNeeded_mode_change_sends_on_change(self):
return self.run_sends_message_for_problems("change", FAILURE, SUCCESS, True)
def test_isMessageNeeded_mode_change_sends_on_failure(self):
return self.run_sends_message_for_problems("change", SUCCESS, FAILURE, True)
def test_isMessageNeeded_mode_change_ignores_first_build(self):
return self.run_sends_message_for_problems("change", None, FAILURE, False)
def test_isMessageNeeded_mode_change_ignores_first_build2(self):
return self.run_sends_message_for_problems("change", None, SUCCESS, False)
def test_isMessageNeeded_mode_change_ignores_same_result_in_sequence(self):
return self.run_sends_message_for_problems("change", SUCCESS, SUCCESS, False)
def test_isMessageNeeded_mode_change_ignores_same_result_in_sequence2(self):
return self.run_sends_message_for_problems("change", FAILURE, FAILURE, False)
@defer.inlineCallbacks
def setupBuildMessage(self, **mnKwargs):
_, builds = yield self.setupBuildResults(SUCCESS)
mn = yield self.setupNotifier(**mnKwargs)
mn.messageFormatter = Mock(spec=mn.messageFormatter)
mn.messageFormatter.formatMessageForBuildResults.return_value = {"body": "body", "type": "text",
"subject": "subject"}
yield mn.buildMessage("mybldr", builds, SUCCESS)
return (mn, builds)
@defer.inlineCallbacks
def test_buildMessage_nominal(self):
mn, builds = yield self.setupBuildMessage(mode=("change",))
build = builds[0]
mn.messageFormatter.formatMessageForBuildResults.assert_called_with(
('change',), 'mybldr', build['buildset'], build, self.master,
None, ['me@foo'])
self.assertEqual(mn.sendMessage.call_count, 1)
mn.sendMessage.assert_called_with('body', 'subject', 'text', 'mybldr', SUCCESS, builds,
['me@foo'], [], [])
@defer.inlineCallbacks
def test_buildMessage_addLogs(self):
mn, builds = yield self.setupBuildMessage(mode=("change",), addLogs=True)
self.assertEqual(mn.sendMessage.call_count, 1)
# make sure the logs are send
self.assertEqual(mn.sendMessage.call_args[0][8][0]['logid'], 60)
# make sure the log has content
self.assertIn(
"log with", mn.sendMessage.call_args[0][8][0]['content']['content'])
@defer.inlineCallbacks
def test_buildMessage_addPatch(self):
mn, builds = yield self.setupBuildMessage(mode=("change",), addPatch=True)
self.assertEqual(mn.sendMessage.call_count, 1)
# make sure the patch are sent
self.assertEqual(mn.sendMessage.call_args[0][7],
[{'author': 'him@foo',
'body': b'hello, world',
'comment': 'foo',
'level': 3,
'patchid': 99,
'subdir': '/foo'}])
@defer.inlineCallbacks
def test_buildMessage_addPatchNoPatch(self):
SourceStamp = fakedb.SourceStamp
class NoPatchSourcestamp(SourceStamp):
def __init__(self, id, patchid):
super().__init__(id=id)
self.patch(fakedb, 'SourceStamp', NoPatchSourcestamp)
mn, builds = yield self.setupBuildMessage(mode=("change",), addPatch=True)
self.assertEqual(mn.sendMessage.call_count, 1)
# make sure no patches are sent
self.assertEqual(mn.sendMessage.call_args[0][7], [])
@defer.inlineCallbacks
def test_workerMissingSendMessage(self):
mn = yield self.setupNotifier(watchedWorkers=['myworker'])
yield mn.workerMissing('worker.98.complete',
dict(name='myworker',
notify=["[email protected]"],
workerinfo=dict(admin="myadmin"),
last_connection="yesterday"))
self.assertEqual(mn.sendMessage.call_count, 1)
text = mn.sendMessage.call_args[0][0]
recipients = mn.sendMessage.call_args[1]['users']
self.assertEqual(recipients, ['[email protected]'])
self.assertIn(
b"has noticed that the worker named myworker went away", text)
| gpl-2.0 | 6,238,227,632,631,262,000 | 40.847701 | 104 | 0.661334 | false |
igorsobreira/eizzek | tests/unit/test_register.py | 1 | 4055 | import re
from unittest import TestCase
from eizzek.lib.registry import PluginRegistry, SessionPluginRegistry
class PluginRegistryTest(TestCase):
def setUp(self):
self.registry = PluginRegistry()
def ping(**kwargs):
return ''
self.ping = ping
self.regex = r'^ping (.+)$'
self.compiled_regex = re.compile(self.regex)
def test_register_plugin(self):
assert len(self.registry.plugins) == 0
self.registry.register(self.ping.__name__, self.regex, self.ping)
assert len(self.registry.plugins) == 1
assert self.registry.plugins['ping'] == (self.compiled_regex, self.ping)
def other_ping(**kw):
return ''
# ignore duplicates
self.registry.register('ping', self.regex, other_ping)
assert len(self.registry.plugins) == 1
assert self.registry.plugins['ping'] == (self.compiled_regex, self.ping)
def test_unregister_plugin_by_name(self):
assert len(self.registry.plugins) == 0
self.registry.register(self.ping.__name__, self.regex, self.ping)
assert len(self.registry.plugins) == 1
self.registry.unregister('ping') # by name
assert len(self.registry.plugins) == 0
def test_unregister_plugin_by_callable(self):
self.registry.register(self.ping.__name__, self.regex, self.ping)
assert len(self.registry.plugins) == 1
self.registry.unregister(self.ping) # by callable, using __name__ attribute
assert len(self.registry.plugins) == 0
def test_clear(self):
self.registry.register(self.ping.__name__, self.regex, self.ping)
assert len(self.registry.plugins) == 1
self.registry.clear()
assert len(self.registry.plugins) == 0
class SessionPluginRegistryTest(TestCase):
def setUp(self):
self.session_registry = SessionPluginRegistry()
class TranslateSessionPlugin(object):
name = 'translate'
regex = r'^translate (?P<from>\w+) (?P<to>\w+)$'
self.translate = TranslateSessionPlugin
def test_register_plugin(self):
assert 0 == len(self.session_registry.plugins)
self.session_registry.register(self.translate)
assert 1 == len(self.session_registry.plugins)
assert (re.compile(self.translate.regex), self.translate) == \
self.session_registry.plugins[self.translate.name]
class OtherTranslatePlugin(object):
name = 'translate'
regex = r'*'
# ignore duplicates
self.session_registry.register(OtherTranslatePlugin)
assert 1 == len(self.session_registry.plugins)
assert (re.compile(self.translate.regex), self.translate) == \
self.session_registry.plugins[self.translate.name]
def test_unregister_plugin_by_name(self):
assert 0 == len(self.session_registry.plugins)
self.session_registry.register(self.translate)
assert 1 == len(self.session_registry.plugins)
self.session_registry.unregister(self.translate.name)
assert 0 == len(self.session_registry.plugins)
def test_unregister_plugin_by_callable(self):
self.session_registry.register(self.translate)
assert 1 == len(self.session_registry.plugins)
self.session_registry.unregister(self.translate) # by callable, using ``name`` attribute
assert 0 == len(self.session_registry.plugins)
def test_clear(self):
self.session_registry.register(self.translate)
assert 1 == len(self.session_registry.plugins)
self.session_registry.clear()
assert 0 == len(self.session_registry.plugins)
| mit | -8,068,151,602,769,320,000 | 29.037037 | 100 | 0.590136 | false |
monsta-hd/ml-mnist | ml_mnist/tests/test_model_selection.py | 1 | 5910 | import numpy as np
from unittest import skip
from ml_mnist.model_selection import TrainTestSplitter as TTS
class TestSplit(object):
def setUp(self):
self.y = np.array([1, 1, 1, 2, 2, 3, 3,
1, 1, 2, 2, 2, 3, 3,
1, 1, 2, 2, 3, 3, 3])
def test_split_no_shuffle(self):
"""Ensure order is preserved if no shuffle."""
tts = TTS(shuffle=False)
for train_ratio in np.arange(0.15, 0.96, 0.1):
train, test = tts.split(self.y, train_ratio=train_ratio, stratify=False)
np.testing.assert_allclose(self.y, np.concatenate((self.y[train], self.y[test])))
@skip("use this test for `random.Random` rng")
def test_split_random_seed(self):
"""
Ensure for fixed (known) `random_seed` TTS instances produce
the same outputs (for the same inputs).
"""
for random_seed, stratify, (train_ref, test_ref) in (
(42, False, (
np.array([6, 8, 9, 15, 7, 3, 17, 14, 11, 16]),
np.array([2, 19, 18, 1, 20, 10, 12, 4, 5, 0, 13])
)),
(1337, False, (
np.array([ 8, 20, 9, 18, 1, 4, 0, 7, 14, 16]),
np.array([17, 3, 15, 11, 5, 13, 2, 19, 6, 10, 12])
)),
(42, True, (
np.array([12, 6, 4, 2, 3, 5, 1, 0, 9]),
np.array([20, 13, 16, 17, 18, 15, 8, 19, 10, 14, 7, 11])
)),
(1337, True, (
np.array([6, 5, 1, 12, 0, 3, 2, 4, 9]),
np.array([14, 19, 17, 13, 10, 7, 16, 11, 8, 18, 15, 20])
)),
):
tts = TTS(shuffle=True, random_seed=random_seed)
train, test = tts.split(self.y, train_ratio=0.5, stratify=stratify)
np.testing.assert_allclose(train_ref, train)
np.testing.assert_allclose(test_ref, test)
def test_split_random_seed_2(self):
"""
Ensure different TTS instances with the same `random_seed` produce
the same outputs (for the same inputs).
"""
for train_ratio in (0.25, 0.5, 0.75):
for random_seed in np.random.randint(0, 1337, 25):
for stratify in (False, True):
for i in xrange(2):
if i == 0:
tts1 = TTS(shuffle=True, random_seed=random_seed)
train1, test1 = tts1.split(self.y, train_ratio=train_ratio, stratify=stratify)
if i == 1:
tts2 = TTS(shuffle=True, random_seed=random_seed)
train2, test2 = tts2.split(self.y, train_ratio=train_ratio, stratify=stratify)
np.testing.assert_allclose(train1, train1)
np.testing.assert_allclose(test1, test1)
def test_split_random_seed_3(self):
"""
Ensure same TTS instance always produce
the same outputs (for the same inputs).
"""
for train_ratio in (0.25, 0.5, 0.75):
for random_seed in np.random.randint(0, 1337, 25):
for stratify in (False, True):
for i in xrange(2):
if i == 0:
tts = TTS(shuffle=True, random_seed=random_seed)
train1, test1 = tts.split(self.y, train_ratio=train_ratio, stratify=stratify)
if i == 1:
train2, test2 = tts.split(self.y, train_ratio=train_ratio, stratify=stratify)
np.testing.assert_allclose(train1, train1)
np.testing.assert_allclose(test1, test1)
def test_split_stratification_no_shuffle(self):
"""Ensure stratification is preserved if no shuffle."""
tts = TTS(shuffle=False)
train, test = tts.split(self.y, train_ratio=4./7., stratify=True)
assert np.count_nonzero(train == 1) ==\
np.count_nonzero(train == 2) == np.count_nonzero(train == 3)
assert np.count_nonzero(test == 1) == \
np.count_nonzero(test == 2) == np.count_nonzero(test == 3)
def test_split_stratification_random(self):
"""Ensure stratification is preserved even with shuffling."""
for random_seed in np.random.randint(0, 1337, 100):
tts = TTS(shuffle=True, random_seed=random_seed)
train, test = tts.split(self.y, train_ratio=4./7., stratify=True)
assert np.count_nonzero(train == 1) == \
np.count_nonzero(train == 2) == np.count_nonzero(train == 3)
assert np.count_nonzero(test == 1) == \
np.count_nonzero(test == 2) == np.count_nonzero(test == 3)
def test_make_k_folds_no_shuffle(self):
"""Ensure order is preserved if no shuffle (for different number of folds)."""
for n_folds in xrange(2, 100):
tts = TTS(shuffle=False)
folds = list(tts.make_k_folds(self.y, n_folds=n_folds, stratify=False))
np.testing.assert_allclose(np.arange(len(self.y)), np.concatenate(folds))
def test_make_k_folds_stratification_no_shuffle(self):
"""Ensure stratification is preserved if no shuffle."""
tts = TTS(shuffle=False)
for fold in tts.make_k_folds(self.y, n_folds=7, stratify=True):
np.testing.assert_allclose(np.sort(self.y[fold]), np.array([1, 2, 3]))
def test_make_k_folds_stratification_random(self):
"""Ensure stratification is preserved even with shuffling."""
for random_seed in np.random.randint(0, 1337, 100):
tts = TTS(shuffle=True, random_seed=random_seed)
for fold in tts.make_k_folds(self.y, n_folds=7, stratify=True):
np.testing.assert_allclose(np.sort(self.y[fold]), np.array([1, 2, 3]))
| mit | -991,270,284,764,097,200 | 47.442623 | 106 | 0.528426 | false |
be-cloud-be/horizon-addons | horizon/school_documentation/source/_extensions/github_link.py | 1 | 3522 | import inspect
import importlib
import os.path
from urlparse import urlunsplit
"""
* adds github_link(mode) context variable: provides URL (in relevant mode) of
current document on github
* if sphinx.ext.linkcode is enabled, automatically generates github linkcode
links (by setting config.linkcode_resolve)
Settings
========
* ``github_user``, username/organisation under which the project lives
* ``github_project``, name of the project on github
* (optional) ``version``, github branch to link to (default: master)
Notes
=====
* provided ``linkcode_resolve`` only supports Python domain
* generates https github links
* explicitly imports ``openerp``, so useless for anyone else
"""
def setup(app):
app.add_config_value('github_user', None, 'env')
app.add_config_value('github_project', None, 'env')
app.connect('html-page-context', add_doc_link)
def linkcode_resolve(domain, info):
""" Resolves provided object to corresponding github URL
"""
# TODO: js?
if domain != 'py':
return None
if not (app.config.github_user and app.config.github_project):
return None
module, fullname = info['module'], info['fullname']
# TODO: attributes/properties don't have modules, maybe try to look
# them up based on their cached host object?
if not module:
return None
obj = importlib.import_module(module)
for item in fullname.split('.'):
obj = getattr(obj, item, None)
if obj is None:
return None
# get original from decorated methods
try: obj = getattr(obj, '_orig')
except AttributeError: pass
try:
obj_source_path = inspect.getsourcefile(obj)
_, line = inspect.getsourcelines(obj)
except (TypeError, IOError):
# obj doesn't have a module, or something
return None
import openerp
# FIXME: make finding project root project-independent
project_root = os.path.join(os.path.dirname(openerp.__file__), '..')
return make_github_link(
app,
os.path.relpath(obj_source_path, project_root),
line)
app.config.linkcode_resolve = linkcode_resolve
def make_github_link(app, path, line=None, mode="blob"):
config = app.config
urlpath = "/{user}/{project}/{mode}/{branch}/{path}".format(
user=config.github_user,
project=config.github_project,
branch=config.version or 'master',
path=path,
mode=mode,
)
return urlunsplit((
'https',
'github.com',
urlpath,
'',
'' if line is None else 'L%d' % line
))
def add_doc_link(app, pagename, templatename, context, doctree):
""" Add github_link function linking to the current page on github """
if not app.config.github_user and app.config.github_project:
return
# FIXME: find other way to recover current document's source suffix
# in Sphinx 1.3 it's possible to have mutliple source suffixes and that
# may be useful in the future
source_suffix = app.config.source_suffix
source_suffix = source_suffix if isinstance(source_suffix, basestring) else source_suffix[0]
# FIXME: odoo/odoo has a doc/ prefix which is incorrect for this
# project, how to unify? Add new setting?
context['github_link'] = lambda mode='edit': make_github_link(
app, '%s%s' % (pagename, source_suffix), mode=mode)
| agpl-3.0 | -8,153,371,989,408,569,000 | 32.542857 | 96 | 0.636286 | false |
cavestruz/L500analysis | plotting/profiles/T_Vr_evolution/Vr_evolution/plot_Vrall_nu_binned_r500c.py | 1 | 3207 | from L500analysis.data_io.get_cluster_data import GetClusterData
from L500analysis.utils.utils import aexp2redshift
from L500analysis.plotting.tools.figure_formatting import *
from L500analysis.plotting.profiles.tools.profiles_percentile \
import *
from L500analysis.plotting.profiles.tools.select_profiles \
import nu_cut, prune_dict
from L500analysis.utils.constants import linear_rbins
from derived_field_functions import *
color = matplotlib.cm.afmhot_r
matplotlib.rcParams['legend.handlelength'] = 0
matplotlib.rcParams['legend.numpoints'] = 1
matplotlib.rcParams['legend.fontsize'] = 12
aexps = [1.0,0.9,0.8,0.7,0.6,0.5,0.45,0.4,0.35]
nu_threshold = {0:[1,1.7],1:[1.7,2.3],2:[2.3, 2.7]} # 1, 1.7, 2.3, 2.7
nu_threshold_key = 2
nu_label = r"%0.1f$\leq\nu_{500c}\leq$%0.1f"%(nu_threshold[nu_threshold_key][0],
nu_threshold[nu_threshold_key][1])
db_name = 'L500_NR_0'
db_dir = '/home/babyostrich/Documents/Repos/L500analysis/'
profiles_list = ['r_mid',
'R/R500c',
'vel_gas_rad_avg',
'vel_dark_rad_avg',
'bulk_vel_gas_rad_avg',
'VrVc_ratio_500c',
]
halo_properties_list=['r500c','M_total_500c','nu_500c']
Vratio=r"$\tilde{V}=1-V_r/V_{circ,500c}$"
fVz1=r"$\tilde{V}/\tilde{V}(z=1)$"
pa = PlotAxes(figname='Vall_r500c_nu%01d'%nu_threshold_key,
axes=[[0.15,0.4,0.80,0.55],[0.15,0.15,0.80,0.24]],
axes_labels=[Vratio,fVz1],
ylog=[False,False],
xlabel=r"$R/R_{500c}$",
xlim=(0.2,5),
ylims=[(.81,2.),(0.6,1.39)])
Vr={}
Vplots = [Vr]
clkeys = ['VrVc_ratio_500c']
linestyles = ['-']
for aexp in aexps :
cldata = GetClusterData(aexp=aexp,db_name=db_name,
db_dir=db_dir,
profiles_list=profiles_list,
halo_properties_list=halo_properties_list)
nu_cut_hids = nu_cut(nu=cldata['nu_500c'], threshold=nu_threshold[nu_threshold_key])
for Vplot, key in zip(Vplots,clkeys) :
pruned_profiles = prune_dict(d=cldata[key],k=nu_cut_hids)
Vplot[aexp] = calculate_profiles_mean_variance(pruned_profiles)
pa.axes[Vratio].plot( linear_rbins, Vr[aexp]['mean'],color=color(aexp),ls='-',
label="$z=%3.1f$" % aexp2redshift(aexp))
for aexp in aexps :
for V,ls in zip(Vplots,linestyles) :
fractional_evolution = get_profiles_division_mean_variance(
mean_profile1=V[aexp]['mean'],
var_profile1=V[aexp]['var'],
mean_profile2=V[0.5]['mean'],
var_profile2=V[0.5]['var'],
)
pa.axes[fVz1].plot( linear_rbins, fractional_evolution['mean'],
color=color(aexp),ls=ls)
pa.axes[Vratio].text(0.2,1.9,nu_label)
pa.axes[Vratio].tick_params(labelsize=12)
pa.axes[Vratio].tick_params(labelsize=12)
pa.axes[fVz1].set_yticks(arange(0.6,1.4,0.2))
pa.set_legend(axes_label=Vratio,ncol=3,loc='lower right', frameon=False)
pa.color_legend_texts(axes_label=Vratio)
pa.savefig()
| mit | 24,674,641,612,658,730 | 35.033708 | 90 | 0.586218 | false |
goldsborough/ig | ig/colors.py | 1 | 1072 | import random
def random_color(base, variation):
'''
Returns a random, bounded color value.
Args:
base: Some base color component (between 0 and 255)
variation: The degree of variation (around the color)
Returns:
A random color.
'''
color = base + (2 * random.random() - 1) * variation
return max(8, min(int(color), 256))
class Colors(object):
'''
Aggregates information about the color scheme of the visualization.
'''
def __init__(self, base_colors):
'''
Constructor.
Args:
base_colors: The base colors around which to vary
'''
self.base = list(base_colors)
self.variation = None
self.alpha_min = None
def generate(self):
'''
Generates a color.
Returns:
A new RGBA color value.
'''
rgba = [random_color(color, self.variation) for color in self.base]
rgba.append(max(self.alpha_min, random.random()))
return 'rgba({0})'.format(','.join(map(str, rgba)))
| mit | 1,673,187,914,116,669,000 | 23.363636 | 75 | 0.570896 | false |
NERC-CEH/jules-jasmin | job_runner/job_runner/controllers/jobs.py | 1 | 5375 | """
# Majic
# Copyright (C) 2014 CEH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import logging
from pylons.controllers.util import abort
from pylons.decorators import jsonify
from job_runner.lib.base import BaseController
from job_runner.utils.constants import *
from job_runner.services.job_service import JobService
from job_runner.model.job_status import JobStatus
from job_runner.services.service_exception import ServiceException
log = logging.getLogger(__name__)
def _validate_namelist(namelist):
"""
Validate that the namelist has a name and that the parameters have names
:param namelist: the name list
"""
if not JSON_MODEL_NAMELIST_NAME in namelist or namelist[JSON_MODEL_NAMELIST_NAME].strip() == '':
abort(400, "Invalid name for one of the name lists")
if not JSON_MODEL_PARAMETERS in namelist:
abort(400, "namelist has no parameters in")
for key in namelist[JSON_MODEL_PARAMETERS]:
if key.strip() == '':
abort(400, "A parameter name can not be blank")
def _validate_namelist_file(namelist_file):
"""
Validate that the namelist file has a filename and contains some namelists
:param namelist_file: the name list file
"""
if not JSON_MODEL_NAMELIST_FILE_FILENAME in namelist_file \
or namelist_file[JSON_MODEL_NAMELIST_FILE_FILENAME].strip() == '':
abort(400, "Invalid filename for one of the namelist files")
if not JSON_MODEL_NAMELISTS in namelist_file or len(namelist_file[JSON_MODEL_NAMELISTS]) == 0:
abort(400, "namelist file has no namelists in")
for namelist in namelist_file[JSON_MODEL_NAMELISTS]:
_validate_namelist(namelist)
class JobsController(BaseController):
"""
Controller for jobs
"""
def __init__(self, job_service=JobService()):
"""
:param job_service: the job service
"""
self._job_service = job_service
@jsonify
def new(self):
"""
Create a new job submission.
"""
json = self._get_json_abort_on_error()
log.debug("New Model with parameters %s" % json)
self._check_field_exists_in_json("code version", json, JSON_MODEL_CODE_VERSION)
if json[JSON_MODEL_CODE_VERSION] not in VALID_CODE_VERSIONS:
abort(400, "Invalid code version")
self._check_field_exists_in_json("model run id", json, JSON_MODEL_RUN_ID, is_int=True)
self._check_field_exists_in_json("user id", json, JSON_USER_ID, is_int=True)
self._check_field_exists_in_json("user name", json, JSON_USER_NAME)
self._check_field_exists_in_json("user email address", json, JSON_USER_EMAIL)
self._check_field_exists_in_json("namelist files", json, JSON_MODEL_NAMELIST_FILES)
if len(json[JSON_MODEL_NAMELIST_FILES]) == 0:
abort(400, "Invalid namelist files")
self._check_field_exists_in_json("land cover", json, JSON_LAND_COVER)
namelist = []
for namelist_file in json[JSON_MODEL_NAMELIST_FILES]:
namelist.append(_validate_namelist_file(namelist_file))
try:
return self._job_service.submit(json)
except ServiceException, ex:
abort(400, ex.message)
@jsonify
def status(self):
"""
Return the statuses of the jobs requested
"""
json = self._get_json_abort_on_error()
log.debug("Status with parameters %s" % json)
queued_jobs_status = self._job_service.queued_jobs_status()
job_statuses = []
for job_id in json:
try:
job_status = JobStatus(int(job_id))
job_status.check(self._job_service, queued_jobs_status)
job_statuses.append(job_status)
except ValueError:
abort(400, "Job ids must all be integers")
return job_statuses
@jsonify
def delete(self):
"""
Delete a model run directory
"""
json = self._get_json_abort_on_error()
log.debug("Delete with parameters %s" % json)
if JSON_MODEL_RUN_ID not in json:
abort(400, "Model run id must be included")
try:
model_run_id = int(json[JSON_MODEL_RUN_ID])
self._job_service.delete(model_run_id)
except ValueError:
abort(400, "Model run id must be an integer")
except ServiceException, ex:
abort(400, ex.message)
except Exception:
log.exception("Unknown error when trying to delete model run directory")
abort(400, "Unknown error when trying to delete model run directory")
| gpl-2.0 | 8,434,452,970,082,947,000 | 33.455128 | 100 | 0.641302 | false |
parantapa/pbdset | pbdset.py | 1 | 14786 | # encoding: utf-8
# pylint: disable=too-many-instance-attributes
# pylint: disable=attribute-defined-outside-init
#
# ____________________/\
# \______ \______ )/______
# | ___/| | _// ___/
# | | | | \\___ \
# |____| |______ /____ >
# \/ \/
# ________ __ __
# \______ \ _____ _/ |______ ______ _____/ |_
# | | \\__ \\ __\__ \ / ___// __ \ __\
# | ` \/ __ \| | / __ \_\___ \\ ___/| |
# /_______ (____ /__| (____ /____ >\___ >__|
# \/ \/ \/ \/ \/
"""
Read and write PB's Dataset files.
"""
import sys
import os.path
import struct
import lmdb
# Import comression/decompression functions
from zlib import compress as zlib_comp, decompress as zlib_decomp
from lz4 import compress as _lz4_comp, decompress as lz4_decomp
from backports.lzma import compress as _xz_comp, \
decompress as _xz_decomp, \
CHECK_NONE
from zstd import ZstdCompressor, ZstdDecompressor
def lz4_comp(data, _):
return _lz4_comp(data)
def xz_comp(data, level):
return _xz_comp(data, preset=level, check=CHECK_NONE)
def xz_decomp(data):
return _xz_decomp(data)
_zcomp = {}
_zdecomp = ZstdDecompressor()
def zstd_comp(data, level):
if level not in _zcomp:
_zcomp[level] = ZstdCompressor(level=level,
write_checksum=False,
write_content_size=True)
return _zcomp[level].compress(data)
def zstd_decomp(data):
return _zdecomp.decompress(data)
# We serialize using msgpack
from msgpack import packb as _packb, unpackb as _unpackb
def pack(x):
return _packb(x, use_bin_type=True)
def unpack(x, default=None):
if x is None:
return default
else:
return _unpackb(x, encoding="utf-8")
# Setup the checksum function
from zlib import adler32
def checksum(data):
return adler32(data) & 0xffffffff
COMP_TABLE = {
"none": (lambda data, level: data, lambda comp: comp),
"zlib": (zlib_comp, zlib_decomp),
"lz4": (lz4_comp, lz4_decomp),
"xz": (xz_comp, xz_decomp),
"zstd": (zstd_comp, zstd_decomp)
}
VERSION = 0.1
class Closes(object): # pylint: disable=too-few-public-methods
"""
Runs close() on context exiting and garbage collection.
"""
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def close(self):
raise NotImplementedError()
class DataStore(Closes):
"""
An abstraction layer over underlying data store.
"""
META_KEYS = frozenset([
"version", "block_length", "length", "comp_format", "comp_level"
])
DEFAULT_PARAMS = {
"map_size": 2 ** 40,
"subdir": False,
"readonly": True,
"metasync": False,
"sync": False,
"mode": 0o644,
"readahead": False,
"meminit": False,
"max_dbs": 2,
"lock": False,
}
def __init__(self, fname, write=False, create=False):
self.closed = True
self.fname = fname
self.write = write
self.create = create
_exists = os.path.exists(fname)
if create and _exists:
raise IOError("File '%s' already exists" % fname)
if not create and not _exists:
raise IOError("File '%s' doesn't exist" % fname)
params = dict(DataStore.DEFAULT_PARAMS)
params["readonly"] = not write
self.env = lmdb.open(self.fname, **params)
try:
self.meta_db = self.env.open_db("meta", create=create)
self.block_db = self.env.open_db("block", create=create)
self.txn = self.env.begin(write=write, buffers=True)
self.closed = False
except Exception:
self.env.close()
raise
def close(self):
if not self.closed:
self.txn.commit()
if self.write:
self.env.sync(True)
self.env.close()
self.closed = True
def get(self, i):
ib = struct.pack(">I", i)
return self.txn.get(ib, db=self.block_db)
def put(self, i, block):
ib = struct.pack(">I", i)
self.txn.put(ib, block, db=self.block_db)
def __getattr__(self, key):
if key not in DataStore.META_KEYS:
raise AttributeError("Unknown attribute: '%s'" % key)
value = self.txn.get(key, db=self.meta_db)
if value is None:
return None
else:
return unpack(value)
def __setattr__(self, key, value):
if key in DataStore.META_KEYS:
self.txn.put(key, pack(value), db=self.meta_db)
else:
self.__dict__[key] = value
def comp_block(block_raw, comp_fn, comp_level):
"""
Compress the block and add header.
"""
block_chksum = checksum(block_raw)
block_comp = comp_fn(block_raw, comp_level)
header = struct.pack("<II", len(block_raw), block_chksum)
block_hcomp = header + block_comp
return block_hcomp
def decomp_block(block_hcomp, decomp_fn):
"""
Decompress the block.
"""
len_block_raw, stored_chksum = struct.unpack_from("<II", block_hcomp)
block_comp = buffer(block_hcomp, 8, len(block_hcomp) - 8)
block_raw = decomp_fn(block_comp)
block_chksum = checksum(block_raw)
if len(block_raw) != len_block_raw:
raise IOError("Size mismatch: %d != %d"
% (len(block_raw), len_block_raw))
if block_chksum != stored_chksum:
raise IOError("Checksum mismatch: %0x != %0x"
% (block_chksum, stored_chksum))
return block_raw
class DatasetReader(Closes):
"""
Read entries from a dataset file.
"""
def __init__(self, fname):
self.closed = True
self.store = DataStore(fname)
try:
if self.store.version != VERSION:
raise IOError("Invalid version: %d" % self.store.version)
self.block_length = self.store.block_length
self.length = self.store.length
self.comp_format = self.store.comp_format
self.comp_level = self.store.comp_level
try:
_, self.decomp_fn = COMP_TABLE[self.comp_format]
except KeyError:
raise IOError("Unknown compression: %s" % self.comp_format)
self.closed = False
except Exception:
self.store.close()
raise
# number of blocks already present in the dataset
self.num_blocks = self.length // self.block_length
self.num_blocks += bool(self.length % self.block_length)
# NOTE: Only used by get_idx
# get_idxs and get_slice use their own local block storage
self.cur_block_idx = -1
self.cur_block = None
def close(self):
if not self.closed:
self.store.close()
self.closed = True
def load_block(self, i):
"""
Load a block from the given file.
"""
block_hcomp = self.store.get(i)
if block_hcomp is None:
raise IOError("Block %d not in store" % i)
try:
block_raw = decomp_block(block_hcomp, self.decomp_fn)
except IOError as e:
raise IOError("Block %d: %s", (i, e)), None, sys.exc_info()[2]
return unpack(block_raw)
def __len__(self):
return self.length
def get_idx(self, n):
"""
Get the value at given idx.
"""
n = (self.length + n) if n < 0 else n
if n < 0 or n >= self.length:
raise IndexError("Index out of range")
i = n // self.block_length
j = n % self.block_length
if self.cur_block_idx != i:
self.cur_block = self.load_block(i)
self.cur_block_idx = i
return unpack(self.cur_block[j])
def get_slice(self, *args):
"""
Return iterable for the given range.
"""
_block_length = self.block_length
start, stop, step = slice(*args).indices(self.length)
# Find the number of items in slice
n = (stop - start) // step
if n <= 0:
return
# Check if begin and end indexes are in range
if start < 0 or start >= self.length:
raise IndexError("Index out of range")
end = start + (n - 1) * step
if end < 0 or end >= self.length:
raise IndexError("Index out of range")
# Do the actual loop
# This doesn't use the class's cur_block
cur_block_idx = -1
cur_block = None
for n in xrange(start, stop, step):
i = n // _block_length
j = n % _block_length
if cur_block_idx != i:
cur_block = self.load_block(i)
cur_block_idx = i
yield unpack(cur_block[j])
def get_idxs(self, ns):
"""
Get the values at given idxs.
NOTE: if the indexes are not sorted,
performance may be really slow.
"""
_block_length = self.block_length
cur_block_idx = -1
cur_block = None
for n in ns:
n = (self.length + n) if n < 0 else n
if n < 0 or n >= self.length:
raise IndexError("Index out of range")
i = n // _block_length
j = n % _block_length
if cur_block_idx != i:
cur_block = self.load_block(i)
cur_block_idx = i
yield unpack(cur_block[j])
def __iter__(self):
for i in xrange(self.num_blocks):
cur_block = self.load_block(i)
for item in cur_block:
yield unpack(item)
def __getitem__(self, key):
if isinstance(key, slice):
return list(self.get_slice(key.start, key.stop, key.step))
elif isinstance(key, (list, tuple)):
return list(self.get_idxs(key))
else:
return self.get_idx(key)
class DatasetWriter(Closes):
"""
Writes a dataset object to a file.
"""
def __init__(self, fname, create=True, block_length=1,
comp_format="lz4", comp_level=6):
self.closed = True
# Check the parameters
block_length = int(block_length)
if block_length < 1:
raise ValueError("Block length must be at-least 1")
if comp_format not in COMP_TABLE:
raise IOError("Unknown compression: %s" % comp_format)
comp_level = int(comp_level)
if not 1 <= comp_level <= 9:
raise ValueError("Invalid compression level: %d" % comp_level)
self.fname = fname
self.store = DataStore(fname, write=True, create=create)
try:
if create:
self.block_length = block_length
self.length = 0
self.comp_format = comp_format
self.comp_level = comp_level
self.write_meta(True)
else:
if self.store.version != VERSION:
raise IOError("Invalid version: %d" % self.store.version)
self.block_length = self.store.block_length
self.length = self.store.length
self.comp_format = self.store.comp_format
self.comp_level = self.store.comp_level
self.comp_fn, self.decomp_fn = COMP_TABLE[self.comp_format]
self.closed = False
except:
self.store.close()
raise
# number of blocks already present in the dataset
self.num_blocks = self.length // self.block_length
self.num_blocks += bool(self.length % self.block_length)
if self.length % self.block_length == 0:
self.cur_block = []
else:
self.cur_block = self.load_block(self.num_blocks -1)
self.num_blocks -= 1
def write_meta(self, full=False):
"""
Write meta information.
"""
if full:
self.store.version = VERSION
self.store.block_length = self.block_length
self.store.comp_format = self.comp_format
self.store.comp_level = self.comp_level
self.store.length = self.length
def load_block(self, i):
"""
Load a block from the given file.
"""
block_hcomp = self.store.get(i)
if block_hcomp is None:
raise IOError("Block %d not in store" % i)
try:
block_raw = decomp_block(block_hcomp, self.decomp_fn)
except IOError as e:
raise IOError("Block %d: %s", (i, e)), None, sys.exc_info()[2]
return unpack(block_raw)
def dump_block(self, i, block):
"""
Write the block to the store.
"""
block_raw = pack(block)
block_hcomp = comp_block(block_raw, self.comp_fn, self.comp_level)
self.store.put(i, block_hcomp)
self.write_meta()
def flush(self, force=False):
"""
Flush the current block to output file.
"""
if len(self.cur_block) != self.block_length and not force:
raise ValueError("Cant flush unfilled block without forcing")
if not self.cur_block:
return
self.dump_block(self.num_blocks, self.cur_block)
self.num_blocks += 1
self.cur_block = []
def close(self):
if not self.closed:
self.flush(force=True)
self.store.close()
self.closed = True
def append(self, obj):
"""
Append the object to database.
"""
self.cur_block.append(pack(obj))
self.length += 1
if len(self.cur_block) == self.block_length:
self.flush()
def extend(self, iterable):
for item in iterable:
self.cur_block.append(pack(item))
self.length += 1
if len(self.cur_block) == self.block_length:
self.flush()
def open(fname, mode="r", block_length=None, comp_format="lz4", comp_level=6):
# pylint: disable=redefined-builtin
"""
Open a dataset for reading or writing.
"""
if mode == "r":
return DatasetReader(fname)
elif mode == "w":
if block_length is None:
raise ValueError("Must specify block_length for write mode")
return DatasetWriter(fname, True, block_length, comp_format, comp_level)
elif mode == "a":
return DatasetWriter(fname, False)
else:
raise ValueError("Invalid mode '%s'" % mode)
| mit | 649,836,582,525,200,100 | 27.822612 | 80 | 0.532328 | false |
cactusbin/nyt | matplotlib/lib/matplotlib/backend_bases.py | 1 | 106941 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes suh as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
:class:`ShowBase`
The base class for the Show class of each interactive backend;
the 'show' callable is then set to Show.__call__, inherited from
ShowBase.
"""
from __future__ import division, print_function
import os
import warnings
import time
import io
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
#import matplotlib.path as path
from matplotlib import rcParams
from matplotlib import is_interactive
from matplotlib import get_backend
from matplotlib._pylab_helpers import Gcf
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
import matplotlib.tight_bbox as tight_bbox
import matplotlib.textpath as textpath
from matplotlib.path import Path
from matplotlib.cbook import mplDeprecation
try:
from PIL import Image
_has_pil = True
except ImportError:
_has_pil = False
_backend_d = {}
def register_backend(format, backend_class):
_backend_d[format] = backend_class
class ShowBase(object):
"""
Simple base class to generate a show() callable in backends.
Subclass must override mainloop() method.
"""
def __call__(self, block=None):
"""
Show all figures. If *block* is not None, then
it is a boolean that overrides all other factors
determining whether show blocks by calling mainloop().
The other factors are:
it does not block if run inside "ipython --pylab";
it does not block in interactive mode.
"""
managers = Gcf.get_all_fig_managers()
if not managers:
return
for manager in managers:
manager.show()
if block is not None:
if block:
self.mainloop()
return
else:
return
# Hack: determine at runtime whether we are
# inside ipython in pylab mode.
from matplotlib import pyplot
try:
ipython_pylab = not pyplot.show._needmain
# IPython versions >= 0.10 tack the _needmain
# attribute onto pyplot.show, and always set
# it to False, when in --pylab mode.
ipython_pylab = ipython_pylab and get_backend() != 'WebAgg'
# TODO: The above is a hack to get the WebAgg backend
# working with `ipython --pylab` until proper integration
# is implemented.
except AttributeError:
ipython_pylab = False
# Leave the following as a separate step in case we
# want to control this behavior with an rcParam.
if ipython_pylab:
return
if not is_interactive() or get_backend() == 'WebAgg':
self.mainloop()
def mainloop(self):
pass
class RendererBase:
"""An abstract base class to handle drawing/rendering operations.
The following methods *must* be implemented in the backend:
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_text`
* :meth:`get_text_width_height_descent`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
self._text2path = textpath.TextToPath()
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s*. If *gid* is given, use
*gid* as the id of the group. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`.
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path,
trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
for vertices, codes in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans +
transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
"""
Draws a collection of paths selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before being
applied. *offset_position* may be either "screen" or "data"
depending on the space that the offsets are in.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
:meth:`draw_path`. Some backends may want to override this in
order to render each set of path data only once, and then
reference that path multiple times with the different offsets,
colors, styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transform))
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_ids, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
path, transform = path_id
transform = transforms.Affine2D(
transform.get_matrix()).translate(xo, yo)
self.draw_path(gc0, path, transform, rgbFace)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, edgecolors):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if edgecolors is None:
edgecolors = facecolors
linewidths = np.array([gc.get_linewidth()], np.float_)
return self.draw_path_collection(
gc, master_transform, paths, [], offsets, offsetTrans, facecolors,
edgecolors, linewidths, [], [antialiased], [None], 'screen')
def draw_gouraud_triangle(self, gc, points, colors, transform):
"""
Draw a Gouraud-shaded triangle.
*points* is a 3x2 array of (x, y) points for the triangle.
*colors* is a 3x4 array of RGBA colors for each point of the
triangle.
*transform* is an affine transform to apply to the points.
"""
raise NotImplementedError
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
"""
Draws a series of Gouraud triangles.
*points* is a Nx3x2 array of (x, y) points for the trianglex.
*colors* is a Nx3x4 array of RGBA colors for each point of the
triangles.
*transform* is an affine transform to apply to the points.
"""
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
def _iter_collection_raw_paths(self, master_transform, paths,
all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = all_transforms[i % Ntransforms]
yield path, transform + master_transform
def _iter_collection(self, gc, master_transform, all_transforms,
path_ids, offsets, offsetTrans, facecolors,
edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Ntransforms = len(all_transforms)
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc0 = self.new_gc()
gc0.copy_properties(gc)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc0.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if offset_position == 'data':
if Ntransforms:
transform = (all_transforms[i % Ntransforms] +
master_transform)
else:
transform = master_transform
xo, yo = transform.transform_point((xo, yo))
xp, yp = transform.transform_point((0, 0))
xo = -(xp - xo)
yo = -(yp - yo)
if not (np.isfinite(xo) and np.isfinite(yo)):
continue
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
if Nlinewidths:
gc0.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc0.set_dashes(*linestyles[i % Nlinestyles])
fg = edgecolors[i % Nedgecolors]
if len(fg) == 4:
if fg[3] == 0.0:
gc0.set_linewidth(0)
else:
gc0.set_foreground(fg)
else:
gc0.set_foreground(fg)
if rgbFace is not None and len(rgbFace) == 4:
if rgbFace[3] == 0:
rgbFace = None
gc0.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc0.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc0, rgbFace
gc0.restore()
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, gc, x, y, im):
"""
Draw the image instance into the current axes;
*gc*
a GraphicsContext containing clipping information
*x*
is the distance in pixels from the left hand side of the canvas.
*y*
the distance from the origin. That is, if origin is
upper, y is the distance from top. If origin is lower, y
is the distance from bottom
*im*
the :class:`matplotlib._image.Image` instance
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
override this method for renderers that do not necessarily
want to rescale and composite raster images. (like SVG)
"""
return False
def option_scale_image(self):
"""
override this method for renderers that support arbitrary
scaling of image (most of the vector backend).
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
"""
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text baseline in display coords
*s*
the text string
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
*mtext*
a :class:`matplotlib.text.Text` instance
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be plotted along with
your text.
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)
def _get_text_path_transform(self, x, y, s, prop, angle, ismath):
"""
return the text path and transform
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
text2path = self._text2path
fontsize = self.points_to_pixels(prop.get_size_in_points())
if ismath == "TeX":
verts, codes = text2path.get_text_path(prop, s, ismath=False,
usetex=True)
else:
verts, codes = text2path.get_text_path(prop, s, ismath=ismath,
usetex=False)
path = Path(verts, codes)
angle = angle / 180. * 3.141592
if self.flipy():
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, self.height - y)
else:
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, y)
return path, transform
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
"""
draw the text by converting them to paths using textpath module.
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
path, transform = self._get_text_path_transform(
x, y, s, prop, angle, ismath)
color = gc.get_rgb()
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
if ismath == 'TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self._text2path.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
dpi = self.points_to_pixels(72)
if ismath:
dims = self._text2path.mathtext_parser.parse(s, dpi, prop)
return dims[0:3] # return width, height, descent
flags = self._text2path._get_hinting_flag()
font = self._text2path._get_font(prop)
size = prop.get_size_in_points()
font.set_size(size, dpi)
# the width and height of unrotated string
font.set_text(s, 0.0, flags=flags)
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, eg, postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
"""
Used in MixedModeRenderer. Switch to the raster renderer.
"""
pass
def stop_rasterizing(self):
"""
Used in MixedModeRenderer. Switch back to the vector renderer
and draw the contents of the raster renderer as an image on
the vector renderer.
"""
pass
def start_filter(self):
"""
Used in AggRenderer. Switch to a temporary renderer for image
filtering effects.
"""
pass
def stop_filter(self, filter_func):
"""
Used in AggRenderer. Switch back to the original renderer.
The contents of the temporary renderer is processed with the
*filter_func* and is drawn on the original renderer as an
image.
"""
pass
class GraphicsContextBase:
"""
An abstract base class that provides color, line styles, etc...
"""
# a mapping from dash styles to suggested offset, dash pairs
dashd = {
'solid': (None, None),
'dashed': (0, (6.0, 6.0)),
'dashdot': (0, (3.0, 5.0, 1.0, 5.0)),
'dotted': (0, (1.0, 3.0)),
}
def __init__(self):
self._alpha = 1.0
self._forced_alpha = False # if True, _alpha overrides A from RGBA
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'round'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0, 1.0)
self._orig_color = (0.0, 0.0, 0.0, 1.0)
self._hatch = None
self._url = None
self._gid = None
self._snap = None
self._sketch = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._forced_alpha = gc._forced_alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._orig_color = gc._orig_color
self._hatch = gc._hatch
self._url = gc._url
self._gid = gc._gid
self._snap = gc._snap
self._sketch = gc._sketch
def restore(self):
"""
Restore the graphics context from the stack - needed only
for backends that save graphics contexts on a stack
"""
pass
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox`
instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<http://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_forced_alpha(self):
"""
Return whether the value given by get_alpha() should be used to
override any other alpha-channel values.
"""
return self._forced_alpha
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three or four floats from 0-1.
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_gid(self):
"""
Return the object identifier if one is set, None otherwise.
"""
return self._gid
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
If ``alpha=None`` (the default), the alpha components of the
foreground and fill colors will be used to set their respective
transparencies (where applicable); otherwise, ``alpha`` will override
them.
"""
if alpha is not None:
self._alpha = alpha
self._forced_alpha = True
else:
self._alpha = 1.0
self._forced_alpha = False
self.set_foreground(self._orig_color)
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b:
self._antialiased = 1
else:
self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
assert path is None or isinstance(path, transforms.TransformedPath)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points.
``(None, None)`` specifies a solid line
"""
if dash_list is not None:
dl = np.asarray(dash_list)
if np.any(dl <= 0.0):
raise ValueError("All values in the dash list must be positive")
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGBA=False):
"""
Set the foreground color. fg can be a MATLAB format string, a
html hex color string, an rgb or rgba unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
If you know fg is rgba, set ``isRGBA=True`` for efficiency.
"""
self._orig_color = fg
if self._forced_alpha:
self._rgb = colors.colorConverter.to_rgba(fg, self._alpha)
elif isRGBA:
self._rgb = fg
else:
self._rgb = colors.colorConverter.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
self._orig_color = frac
self._rgb = (frac, frac, frac, self._alpha)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = w
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted'). One may specify customized dash styles by providing
a tuple of (offset, dash pairs). For example, the predefiend
linestyles have following values.:
'dashed' : (0, (6.0, 6.0)),
'dashdot' : (0, (3.0, 5.0, 1.0, 5.0)),
'dotted' : (0, (1.0, 3.0)),
"""
if style in self.dashd:
offset, dashes = self.dashd[style]
elif isinstance(style, tuple):
offset, dashes = style
else:
raise ValueError('Unrecognized linestyle: %s' % str(style))
self._linestyle = style
self.set_dashes(offset, dashes)
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_gid(self, id):
"""
Sets the id.
"""
self._gid = id
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
def get_hatch_path(self, density=6.0):
"""
Returns a Path for the current hatch.
"""
if self._hatch is None:
return None
return Path.hatch(self._hatch, density)
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
class TimerBase(object):
'''
A base class for providing timer events, useful for things animations.
Backends need to implement a few specific methods in order to use their
own timing mechanisms so that the timer events are integrated into their
event loops.
Mandatory functions that must be implemented:
* `_timer_start`: Contains backend-specific code for starting
the timer
* `_timer_stop`: Contains backend-specific code for stopping
the timer
Optional overrides:
* `_timer_set_single_shot`: Code for setting the timer to
single shot operating mode, if supported by the timer
object. If not, the `Timer` class itself will store the flag
and the `_on_timer` method should be overridden to support
such behavior.
* `_timer_set_interval`: Code for setting the interval on the
timer, if there is a method for doing so on the timer
object.
* `_on_timer`: This is the internal function that any timer
object should call, which will handle the task of running
all callbacks that have been set.
Attributes:
* `interval`: The time between timer events in
milliseconds. Default is 1000 ms.
* `single_shot`: Boolean flag indicating whether this timer
should operate as single shot (run once and then
stop). Defaults to `False`.
* `callbacks`: Stores list of (func, args) tuples that will be
called upon timer events. This list can be manipulated
directly, or the functions `add_callback` and
`remove_callback` can be used.
'''
def __init__(self, interval=None, callbacks=None):
#Initialize empty callbacks list and setup default settings if necssary
if callbacks is None:
self.callbacks = []
else:
self.callbacks = callbacks[:] # Create a copy
if interval is None:
self._interval = 1000
else:
self._interval = interval
self._single = False
# Default attribute for holding the GUI-specific timer object
self._timer = None
def __del__(self):
'Need to stop timer and possibly disconnect timer.'
self._timer_stop()
def start(self, interval=None):
'''
Start the timer object. `interval` is optional and will be used
to reset the timer interval first if provided.
'''
if interval is not None:
self._set_interval(interval)
self._timer_start()
def stop(self):
'''
Stop the timer.
'''
self._timer_stop()
def _timer_start(self):
pass
def _timer_stop(self):
pass
def _get_interval(self):
return self._interval
def _set_interval(self, interval):
# Force to int since none of the backends actually support fractional
# milliseconds, and some error or give warnings.
interval = int(interval)
self._interval = interval
self._timer_set_interval()
interval = property(_get_interval, _set_interval)
def _get_single_shot(self):
return self._single
def _set_single_shot(self, ss=True):
self._single = ss
self._timer_set_single_shot()
single_shot = property(_get_single_shot, _set_single_shot)
def add_callback(self, func, *args, **kwargs):
'''
Register `func` to be called by timer when the event fires. Any
additional arguments provided will be passed to `func`.
'''
self.callbacks.append((func, args, kwargs))
def remove_callback(self, func, *args, **kwargs):
'''
Remove `func` from list of callbacks. `args` and `kwargs` are optional
and used to distinguish between copies of the same function registered
to be called with different arguments.
'''
if args or kwargs:
self.callbacks.remove((func, args, kwargs))
else:
funcs = [c[0] for c in self.callbacks]
if func in funcs:
self.callbacks.pop(funcs.index(func))
def _timer_set_interval(self):
'Used to set interval on underlying timer object.'
pass
def _timer_set_single_shot(self):
'Used to set single shot on underlying timer object.'
pass
def _on_timer(self):
'''
Runs all function that have been registered as callbacks. Functions
can return False (or 0) if they should not be called any more. If there
are no callbacks, the timer is automatically stopped.
'''
for func, args, kwargs in self.callbacks:
ret = func(*args, **kwargs)
# docstring above explains why we use `if ret == False` here,
# instead of `if not ret`.
if ret == False:
self.callbacks.remove((func, args, kwargs))
if len(self.callbacks) == 0:
self.stop()
class Event:
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas, guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class CloseEvent(Event):
"""
An event triggered by a figure being closed
In addition to the :class:`Event` attributes, the following event
attributes are defined:
"""
def __init__(self, name, canvas, guiEvent=None):
Event.__init__(self, name, canvas, guiEvent)
class LocationEvent(Event):
"""
An event that has a screen location
The following additional attributes are defined and shown with
their default values.
In addition to the :class:`Event` attributes, the following
event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y, guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas, guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
if self.canvas.mouse_grabber is None:
axes_list = [a for a in self.canvas.figure.get_axes()
if a.in_axes(self)]
else:
axes_list = [self.canvas.mouse_grabber]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axes_list.sort(key=lambda x: x.zorder)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
trans = self.inaxes.transData.inverted()
xdata, ydata = trans.transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes != self.inaxes:
# process axes enter/leave events
try:
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
except:
pass
# See ticket 2901582.
# I think this is a valid exception to the rule
# against catching all exceptions; if anything goes
# wrong, we simply want to move on and process the
# current event.
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
for scroll events)
*key*
the key depressed when the mouse event triggered (see
:class:`KeyEvent`)
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
dblclick = None # whether or not the event is the result of a double click
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, dblclick=False, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
self.dblclick = dblclick
def __str__(self):
return ("MPL MouseEvent: xy=(%d,%d) xydata=(%s,%s) button=%d " +
"dblclick=%s inaxes=%s") % (self.x, self.y, self.xdata,
self.ydata, self.button,
self.dblclick, self.inaxes)
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- eg a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
line, = ax.plot(rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print('on pick line:', zip(xdata[ind], ydata[ind]))
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist,
guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key(s) pressed. Could be **None**, a single case sensitive ascii
character ("g", "G", "#", etc.), a special key
("control", "shift", "f1", "up", etc.) or a
combination of the above (e.g., "ctrl+alt+g", "ctrl+alt+G").
.. note::
Modifier keys will be prefixed to the pressed key and will be in the
order "ctrl", "alt", "super". The exception to this rule is when the
pressed key is itself a modifier key, therefore "ctrl+alt" and
"alt+control" can both be valid key values.
Example usage::
def on_key(event):
print('you pressed', event.key, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase(object):
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event',
'close_event'
]
supports_blit = True
def __init__(self, figure):
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry()
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event', self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event', self.pick)
self.mouse_grabber = None # the axes currently grabbing mouse
self.toolbar = None # NavigationToolbar2 will set me
self._is_saving = False
if False:
## highlight the artists that are hit
self.mpl_connect('motion_notify_event', self.onHilite)
## delete the artists that are clicked on
#self.mpl_disconnect(self.button_pick_id)
#self.mpl_connect('button_press_event',self.onRemove)
def is_saving(self):
"""
Returns `True` when the renderer is in the process of saving
to a file, rather than rendering for an on-screen buffer.
"""
return self._is_saving
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
def sort_artists(artists):
# This depends on stable sort and artists returned
# from get_children in z order.
L = [(h.zorder, h) for h in artists]
L.sort()
return [h for zorder, h in L]
# Find the top artist under the cursor
under = sort_artists(self.figure.hitlist(ev))
h = None
if under:
h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
if not hasattr(self, '_active'):
self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
#print "within:"," ".join([str(x) for x in under])
#print "entering:",[str(a) for a in enter]
#print "leaving:",[str(a) for a in leave]
# On leave restore the captured colour
for a in leave:
if hasattr(a, 'get_color'):
a.set_color(self._active[a])
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a, 'get_color'):
self._active[a] = a.get_color()
elif hasattr(a, 'get_edgecolor'):
self._active[a] = (a.get_edgecolor(), a.get_facecolor())
else:
self._active[a] = None
for a in enter:
if hasattr(a, 'get_color'):
a.set_color('red')
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else:
self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def close_event(self, guiEvent=None):
"""
This method will be called by all functions connected to the
'close_event' with a :class:`CloseEvent`
"""
s = 'close_event'
try:
event = CloseEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
except (TypeError, AttributeError):
pass
# Suppress the TypeError when the python session is being killed.
# It may be that a better solution would be a mechanism to
# disconnect all callbacks upon shutdown.
# AttributeError occurs on OSX with qt4agg upon exiting
# with an open window; 'callbacks' attribute no longer exists.
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist, **kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, dblclick=False, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key,
dblclick=dblclick, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
self._lastx, self._lasty = None, None
def enter_notify_event(self, guiEvent=None, xy=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
*xy*
the coordinate location of the pointer when the canvas is
entered
"""
if xy is not None:
x, y = xy
self._lastx, self._lasty = x, y
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
"""Called when GUI is idle."""
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def grab_mouse(self, ax):
"""
Set the child axes which are currently grabbing the mouse events.
Usually called by the widgets themselves.
It is an error to call this if the mouse is already grabbed by
another axes.
"""
if self.mouse_grabber not in (None, ax):
raise RuntimeError('two different attempted to grab mouse input')
self.mouse_grabber = ax
def release_mouse(self, ax):
"""
Release the mouse grab held by the axes, ax.
Usually called by the widgets.
It is ok to call this even if you ax doesn't have the mouse
grab currently.
"""
if self.mouse_grabber is ax:
self.mouse_grabber = None
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
Return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
filetypes = {
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'pgf': 'LaTeX PGF Figure',
'png': 'Portable Network Graphics',
'ps': 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'}
# All of these print_* functions do a lazy import because
# a) otherwise we'd have cyclical imports, since all of these
# classes inherit from FigureCanvasBase
# b) so we don't import a bunch of stuff the user may never use
# TODO: these print_* throw ImportErrror when called from
# compare_images_decorator (decorators.py line 112)
# if the backend has not already been loaded earlier on. Simple trigger:
# >>> import matplotlib.tests.test_spines
# >>> list(matplotlib.tests.test_spines.test_spines_axes_positions())[0][0]()
def print_eps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_eps(*args, **kwargs)
def print_pdf(self, *args, **kwargs):
from backends.backend_pdf import FigureCanvasPdf # lazy import
pdf = self.switch_backends(FigureCanvasPdf)
return pdf.print_pdf(*args, **kwargs)
def print_pgf(self, *args, **kwargs):
from backends.backend_pgf import FigureCanvasPgf # lazy import
pgf = self.switch_backends(FigureCanvasPgf)
return pgf.print_pgf(*args, **kwargs)
def print_png(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(*args, **kwargs)
def print_ps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_ps(*args, **kwargs)
def print_raw(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_raw(*args, **kwargs)
print_bmp = print_rgba = print_raw
def print_svg(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svg(*args, **kwargs)
def print_svgz(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svgz(*args, **kwargs)
if _has_pil:
filetypes['jpg'] = 'Joint Photographic Experts Group'
filetypes['jpeg'] = filetypes['jpg']
def print_jpg(self, filename_or_obj, *args, **kwargs):
"""
Supported kwargs:
*quality*: The image quality, on a scale from 1 (worst) to
95 (best). The default is 95, if not given in the
matplotlibrc file in the savefig.jpeg_quality parameter.
Values above 95 should be avoided; 100 completely
disables the JPEG quantization stage.
*optimize*: If present, indicates that the encoder should
make an extra pass over the image in order to select
optimal encoder settings.
*progressive*: If present, indicates that this image
should be stored as a progressive JPEG file.
"""
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
buf, size = agg.print_to_buffer()
if kwargs.pop("dryrun", False):
return
image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
options = cbook.restrict_dict(kwargs, ['quality', 'optimize',
'progressive'])
if 'quality' not in options:
options['quality'] = rcParams['savefig.jpeg_quality']
return image.save(filename_or_obj, format='jpeg', **options)
print_jpeg = print_jpg
filetypes['tif'] = filetypes['tiff'] = 'Tagged Image File Format'
def print_tif(self, filename_or_obj, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
buf, size = agg.print_to_buffer()
if kwargs.pop("dryrun", False):
return
image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
dpi = (self.figure.dpi, self.figure.dpi)
return image.save(filename_or_obj, format='tiff',
dpi=dpi)
print_tiff = print_tif
def get_supported_filetypes(self):
"""Return dict of savefig file formats supported by this backend"""
return self.filetypes
def get_supported_filetypes_grouped(self):
"""Return a dict of savefig file formats supported by this backend,
where the keys are a file type name, such as 'Joint Photographic
Experts Group', and the values are a list of filename extensions used
for that filetype, such as ['jpg', 'jpeg']."""
groupings = {}
for ext, name in self.filetypes.iteritems():
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def _get_print_method(self, format):
method_name = 'print_%s' % format
# check for registered backends
if format in _backend_d:
backend_class = _backend_d[format]
def _print_method(*args, **kwargs):
backend = self.switch_backends(backend_class)
print_method = getattr(backend, method_name)
return print_method(*args, **kwargs)
return _print_method
formats = self.get_supported_filetypes()
if (format not in formats or not hasattr(self, method_name)):
formats = sorted(formats)
raise ValueError(
'Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
return getattr(self, method_name)
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure
*edgecolor*
the edgecolor of the figure
*orientation*
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
*bbox_inches*
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure. If None, use savefig.bbox
*pad_inches*
Amount of padding around the figure when bbox_inches is
'tight'. If None, use savefig.pad_inches
*bbox_extra_artists*
A list of extra artists that will be considered when the
tight bbox is calculated.
"""
if format is None:
# get format from filename, or from backend's default filetype
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
print_method = self._get_print_method(format)
if dpi is None:
dpi = rcParams['savefig.dpi']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
bbox_inches = kwargs.pop("bbox_inches", None)
if bbox_inches is None:
bbox_inches = rcParams['savefig.bbox']
if bbox_inches:
# call adjust_bbox to save only the given area
if bbox_inches == "tight":
# when bbox_inches == "tight", it saves the figure
# twice. The first save command is just to estimate
# the bounding box of the figure. A stringIO object is
# used as a temporary file object, but it causes a
# problem for some backends (ps backend with
# usetex=True) if they expect a filename, not a
# file-like object. As I think it is best to change
# the backend to support file-like object, i'm going
# to leave it as it is. However, a better solution
# than stringIO seems to be needed. -JJL
#result = getattr(self, method_name)
result = print_method(
io.BytesIO(),
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
dryrun=True,
**kwargs)
renderer = self.figure._cachedRenderer
bbox_inches = self.figure.get_tightbbox(renderer)
bbox_artists = kwargs.pop("bbox_extra_artists", None)
if bbox_artists is None:
bbox_artists = self.figure.get_default_bbox_extra_artists()
bbox_filtered = []
for a in bbox_artists:
bbox = a.get_window_extent(renderer)
if a.get_clip_on():
clip_box = a.get_clip_box()
if clip_box is not None:
bbox = Bbox.intersection(bbox, clip_box)
clip_path = a.get_clip_path()
if clip_path is not None and bbox is not None:
clip_path = clip_path.get_fully_transformed_path()
bbox = Bbox.intersection(bbox,
clip_path.get_extents())
if bbox is not None and (bbox.width != 0 or
bbox.height != 0):
bbox_filtered.append(bbox)
if bbox_filtered:
_bbox = Bbox.union(bbox_filtered)
trans = Affine2D().scale(1.0 / self.figure.dpi)
bbox_extra = TransformedBbox(_bbox, trans)
bbox_inches = Bbox.union([bbox_inches, bbox_extra])
pad = kwargs.pop("pad_inches", None)
if pad is None:
pad = rcParams['savefig.pad_inches']
bbox_inches = bbox_inches.padded(pad)
restore_bbox = tight_bbox.adjust_bbox(self.figure, format,
bbox_inches)
_bbox_inches_restore = (bbox_inches, restore_bbox)
else:
_bbox_inches_restore = None
self._is_saving = True
try:
#result = getattr(self, method_name)(
result = print_method(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
bbox_inches_restore=_bbox_inches_restore,
**kwargs)
finally:
if bbox_inches and restore_bbox:
restore_bbox()
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
self._is_saving = False
#self.figure.canvas.draw() ## seems superfluous
return result
def get_default_filetype(self):
"""
Get the default savefig file format as specified in rcParam
``savefig.format``. Returned string excludes period. Overridden
in backends that only support a single file type.
"""
return rcParams['savefig.format']
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None if there is no window (eg, a PS backend).
"""
if hasattr(self, "manager"):
return self.manager.get_window_title()
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def get_default_filename(self):
"""
Return a string, which includes extension, suitable for use as
a default filename.
"""
default_filename = self.get_window_title() or 'image'
default_filename = default_filename.lower().replace(' ', '_')
return default_filename + '.' + self.get_default_filetype()
def switch_backends(self, FigureCanvasClass):
"""
Instantiate an instance of FigureCanvasClass
This is used for backend switching, eg, to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (eg, setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
newCanvas._is_saving = self._is_saving
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
- 'figure_enter_event',
- 'figure_leave_event',
- 'axes_enter_event',
- 'axes_leave_event'
- 'close_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = canvas.mpl_connect('button_press_event', on_press)
"""
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
Disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting periodic
events through the backend's native event loop. Implemented only for
backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerBase(*args, **kwargs)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self, timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
Call signature::
start_event_loop_default(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str, mplDeprecation)
if timeout <= 0:
timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter * timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
self._looping = False
def key_press_handler(event, canvas, toolbar=None):
"""
Implement the default mpl key bindings for the canvas and toolbar
described at :ref:`key-event-handling`
*event*
a :class:`KeyEvent` instance
*canvas*
a :class:`FigureCanvasBase` instance
*toolbar*
a :class:`NavigationToolbar2` instance
"""
# these bindings happen whether you are over an axes or not
if event.key is None:
return
# Load key-mappings from your matplotlibrc file.
fullscreen_keys = rcParams['keymap.fullscreen']
home_keys = rcParams['keymap.home']
back_keys = rcParams['keymap.back']
forward_keys = rcParams['keymap.forward']
pan_keys = rcParams['keymap.pan']
zoom_keys = rcParams['keymap.zoom']
save_keys = rcParams['keymap.save']
quit_keys = rcParams['keymap.quit']
grid_keys = rcParams['keymap.grid']
toggle_yscale_keys = rcParams['keymap.yscale']
toggle_xscale_keys = rcParams['keymap.xscale']
all = rcParams['keymap.all_axes']
# toggle fullscreen mode (default key 'f')
if event.key in fullscreen_keys:
canvas.manager.full_screen_toggle()
# quit the figure (defaut key 'ctrl+w')
if event.key in quit_keys:
Gcf.destroy_fig(canvas.figure)
if toolbar is not None:
# home or reset mnemonic (default key 'h', 'home' and 'r')
if event.key in home_keys:
toolbar.home()
# forward / backward keys to enable left handed quick navigation
# (default key for backward: 'left', 'backspace' and 'c')
elif event.key in back_keys:
toolbar.back()
# (default key for forward: 'right' and 'v')
elif event.key in forward_keys:
toolbar.forward()
# pan mnemonic (default key 'p')
elif event.key in pan_keys:
toolbar.pan()
# zoom mnemonic (default key 'o')
elif event.key in zoom_keys:
toolbar.zoom()
# saving current figure (default key 's')
elif event.key in save_keys:
toolbar.save_figure()
if event.inaxes is None:
return
# these bindings require the mouse to be over an axes to trigger
# switching on/off a grid in current axes (default key 'g')
if event.key in grid_keys:
event.inaxes.grid()
canvas.draw()
# toggle scaling of y-axes between 'log and 'linear' (default key 'l')
elif event.key in toggle_yscale_keys:
ax = event.inaxes
scale = ax.get_yscale()
if scale == 'log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale == 'linear':
ax.set_yscale('log')
ax.figure.canvas.draw()
# toggle scaling of x-axes between 'log and 'linear' (default key 'k')
elif event.key in toggle_xscale_keys:
ax = event.inaxes
scalex = ax.get_xscale()
if scalex == 'log':
ax.set_xscale('linear')
ax.figure.canvas.draw()
elif scalex == 'linear':
ax.set_xscale('log')
ax.figure.canvas.draw()
elif (event.key.isdigit() and event.key != '0') or event.key in all:
# keys in list 'all' enables all axes (default key 'a'),
# otherwise if key is a number only enable this particular axes
# if it was the axes, where the event was raised
if not (event.key in all):
n = int(event.key) - 1
for i, a in enumerate(canvas.figure.get_axes()):
# consider axes, in which the event was raised
# FIXME: Why only this axes?
if event.x is not None and event.y is not None \
and a.in_axes(event):
if event.key in all:
a.set_navigate(True)
else:
a.set_navigate(i == n)
class NonGuiException(Exception):
pass
class FigureManagerBase:
"""
Helper class for pyplot mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure number
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
self.key_press_handler_id = self.canvas.mpl_connect('key_press_event',
self.key_press)
"""
The returned id from connecting the default key handler via
:meth:`FigureCanvasBase.mpl_connnect`.
To disable default key press handling::
manager, canvas = figure.canvas.manager, figure.canvas
canvas.mpl_disconnect(manager.key_press_handler_id)
"""
def show(self):
"""
For GUI backends, show the figure window and redraw.
For non-GUI backends, raise an exception to be caught
by :meth:`~matplotlib.figure.Figure.show`, for an
optional warning.
"""
raise NonGuiException()
def destroy(self):
pass
def full_screen_toggle(self):
pass
def resize(self, w, h):
""""For gui backends, resize the window (in pixels)."""
pass
def key_press(self, event):
"""
Implement the default mpl key bindings defined at
:ref:`key-event-handling`
"""
key_press_handler(event, self.canvas, self.canvas.toolbar)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None for non-GUI backends (eg, a PS backend).
"""
return 'image'
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect for non-GUI backends (eg, a PS backend).
"""
pass
class Cursors:
# this class is only used as a simple namespace
HAND, POINTER, SELECT_REGION, MOVE = range(4)
cursors = Cursors()
class NavigationToolbar2(object):
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
# list of toolitems to add to the toolbar, format is:
# (
# text, # the text of the button (often not visible to users)
# tooltip_text, # the tooltip shown on hover (where possible)
# image_file, # name of the image for the button (without the extension)
# name_of_method, # name of the method in NavigationToolbar2 to call
# )
toolitems = (
('Home', 'Reset original view', 'home', 'home'),
('Back', 'Back to previous view', 'back', 'back'),
('Forward', 'Forward to next view', 'forward', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),
('Save', 'Save the figure', 'filesave', 'save_figure'),
)
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time
# of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
self._ids_zoom = []
self._zoom_mode = None
self._button_pressed = None # determined by the button pressed
# at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
"""Display a message on toolbar or in status bar"""
pass
def back(self, *args):
"""move back up the view lim stack"""
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
"""Draw a rectangle rubberband to indicate zoom limits"""
pass
def forward(self, *args):
"""Move forward in the view lim stack"""
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
"""Restore the original view"""
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def mouse_move(self, event):
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active == 'ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
elif (self._active == 'PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
if event.inaxes and event.inaxes.get_navigate():
try:
s = event.inaxes.format_coord(event.xdata, event.ydata)
except (ValueError, OverflowError):
pass
else:
if len(self.mode):
self.set_message('%s, %s' % (self.mode, s))
else:
self.set_message(s)
else:
self.set_message(self.mode)
def pan(self, *args):
"""Activate the pan/zoom tool. pan with left button, zoom with right"""
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
"""Called whenver a mouse button is pressed."""
pass
def press_pan(self, event):
"""the press mouse button in pan/zoom mode callback"""
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_pan()):
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect('motion_notify_event',
self.drag_pan)
self.press(event)
def press_zoom(self, event):
"""the press mouse button in zoom to rect mode callback"""
# If we're already in the middle of a zoom, pressing another
# button works to "cancel"
if self._ids_zoom != []:
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self.release(event)
self.draw()
self._xypress = None
self._button_pressed = None
self._ids_zoom = []
return
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_zoom()):
self._xypress.append((x, y, a, i, a.viewLim.frozen(),
a.transData.frozen()))
id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)
id2 = self.canvas.mpl_connect('key_press_event',
self._switch_on_zoom_mode)
id3 = self.canvas.mpl_connect('key_release_event',
self._switch_off_zoom_mode)
self._ids_zoom = id1, id2, id3
self._zoom_mode = event.key
self.press(event)
def _switch_on_zoom_mode(self, event):
self._zoom_mode = event.key
self.mouse_move(event)
def _switch_off_zoom_mode(self, event):
self._zoom_mode = None
self.mouse_move(event)
def push_current(self):
"""push the current view limits and position onto the stack"""
lims = []
pos = []
for a in self.canvas.figure.get_axes():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lims.append((xmin, xmax, ymin, ymax))
# Store both the original and modified positions
pos.append((
a.get_position(True).frozen(),
a.get_position().frozen()))
self._views.push(lims)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
"""this will be called whenever mouse button is released"""
pass
def release_pan(self, event):
"""the release mouse button callback in pan/zoom mode"""
if self._button_pressed is None:
return
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress:
return
self._xypress = []
self._button_pressed = None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
"""the drag callback in pan/zoom mode"""
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def drag_zoom(self, event):
"""the drag callback in zoom mode"""
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = self._xypress[0]
# adjust x, last, y, last
x1, y1, x2, y2 = a.bbox.extents
x, lastx = max(min(x, lastx), x1), min(max(x, lastx), x2)
y, lasty = max(min(y, lasty), y1), min(max(y, lasty), y2)
if self._zoom_mode == "x":
x1, y1, x2, y2 = a.bbox.extents
y, lasty = y1, y2
elif self._zoom_mode == "y":
x1, y1, x2, y2 = a.bbox.extents
x, lastx = x1, x2
self.draw_rubberband(event, x, y, lastx, lasty)
def release_zoom(self, event):
"""the release mouse button callback in zoom to rect mode"""
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self._ids_zoom = []
if not self._xypress:
return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x - lastx) < 5 or abs(y - lasty) < 5:
self._xypress = None
self.release(event)
self.draw()
return
x0, y0, x1, y1 = lim.extents
# zoom to rect
inverse = a.transData.inverted()
lastx, lasty = inverse.transform_point((lastx, lasty))
x, y = inverse.transform_point((x, y))
Xmin, Xmax = a.get_xlim()
Ymin, Ymax = a.get_ylim()
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a, la):
twinx = True
if a.get_shared_y_axes().joined(a, la):
twiny = True
last_a.append(a)
if twinx:
x0, x1 = Xmin, Xmax
else:
if Xmin < Xmax:
if x < lastx:
x0, x1 = x, lastx
else:
x0, x1 = lastx, x
if x0 < Xmin:
x0 = Xmin
if x1 > Xmax:
x1 = Xmax
else:
if x > lastx:
x0, x1 = x, lastx
else:
x0, x1 = lastx, x
if x0 > Xmin:
x0 = Xmin
if x1 < Xmax:
x1 = Xmax
if twiny:
y0, y1 = Ymin, Ymax
else:
if Ymin < Ymax:
if y < lasty:
y0, y1 = y, lasty
else:
y0, y1 = lasty, y
if y0 < Ymin:
y0 = Ymin
if y1 > Ymax:
y1 = Ymax
else:
if y > lasty:
y0, y1 = y, lasty
else:
y0, y1 = lasty, y
if y0 > Ymin:
y0 = Ymin
if y1 < Ymax:
y1 = Ymax
if self._button_pressed == 1:
if self._zoom_mode == "x":
a.set_xlim((x0, x1))
elif self._zoom_mode == "y":
a.set_ylim((y0, y1))
else:
a.set_xlim((x0, x1))
a.set_ylim((y0, y1))
elif self._button_pressed == 3:
if a.get_xscale() == 'log':
alpha = np.log(Xmax / Xmin) / np.log(x1 / x0)
rx1 = pow(Xmin / x0, alpha) * Xmin
rx2 = pow(Xmax / x0, alpha) * Xmin
else:
alpha = (Xmax - Xmin) / (x1 - x0)
rx1 = alpha * (Xmin - x0) + Xmin
rx2 = alpha * (Xmax - x0) + Xmin
if a.get_yscale() == 'log':
alpha = np.log(Ymax / Ymin) / np.log(y1 / y0)
ry1 = pow(Ymin / y0, alpha) * Ymin
ry2 = pow(Ymax / y0, alpha) * Ymin
else:
alpha = (Ymax - Ymin) / (y1 - y0)
ry1 = alpha * (Ymin - y0) + Ymin
ry2 = alpha * (Ymax - y0) + Ymin
if self._zoom_mode == "x":
a.set_xlim((rx1, rx2))
elif self._zoom_mode == "y":
a.set_ylim((ry1, ry2))
else:
a.set_xlim((rx1, rx2))
a.set_ylim((ry1, ry2))
self.draw()
self._xypress = None
self._button_pressed = None
self._zoom_mode = None
self.push_current()
self.release(event)
def draw(self):
"""Redraw the canvases, update the locators"""
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw()
def _update_view(self):
"""Update the viewlim and position from the view and
position stack for each axes
"""
lims = self._views()
if lims is None:
return
pos = self._positions()
if pos is None:
return
for i, a in enumerate(self.canvas.figure.get_axes()):
xmin, xmax, ymin, ymax = lims[i]
a.set_xlim((xmin, xmax))
a.set_ylim((ymin, ymax))
# Restore both the original and modified positions
a.set_position(pos[i][0], 'original')
a.set_position(pos[i][1], 'active')
self.draw()
def save_figure(self, *args):
"""Save the current figure"""
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
"""Reset the axes stack"""
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
"""Activate zoom to rect mode"""
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event',
self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event',
self.release_zoom)
self.mode = 'zoom rect'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
"""Enable or disable back/forward button"""
pass
| unlicense | 6,469,178,272,311,308,000 | 32.419063 | 81 | 0.562338 | false |
artdent/jgments | src/com/google/jgments/extract.py | 1 | 13331 | #!/usr/bin/python
#
# Copyright 2010 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tool to create Java classes from Pygments lexers and token lists.
This tool aims to support most descendants of pygments.lexers.RegexLexer
that do not override get_tokens_unprocessed to do something fancy.
It requires a patched version of pygments that accepts the _record_only
attribute on RegexLexer instances.
Limitations:
- The using() token action does not support passing extra kwargs to
the LanguageDefinition constructor.
- Regex modes other than multiline are not supported.
- The translation between Python and Java regular expressions is ad-hoc
and imperfect. It might contain as-yet-undetected bugs.
The highest level interface to this module consists of the Write* functions
that output Java source code. The remaining non-private functions form the
slightly lower-level programmatic interface.
The command-line interface is usable standalone or via the Google build system.
"""
import fnmatch
import os
import re
import sys
# Suppress warnings about unusual import order.
# pylint: disable-msg=C6204,C6205,W0611
try:
import google3
from google3.third_party.java_src.jgments.java.com.google.jgments import (
lexers, youstillhavetwoproblems)
from google3.pyglib import iterlib
from google3.pyglib import resources
except ImportError:
google3 = None
import lexers
import youstillhavetwoproblems
from stubs import iterlib
from stubs import resources
import mako.template
# Import pygments after the lexers module does its monkeypatching.
import pygments.formatters.html
import pygments.lexer
import pygments.token
assert pygments.lexer.bygroups.__module__ == 'lexers', 'Pygments was not monkeypatched!'
_DEFAULT_PACKAGE = 'com.google.jgments.syntax'
if google3:
_DEFAULT_BASEDIR = 'third_party/java_src/jgments/java'
_TEMPLATES_DIR = 'google3/%s/com/google/jgments' % _DEFAULT_BASEDIR
else:
_DEFAULT_BASEDIR = 'build/java'
_TEMPLATES_DIR = 'java/com/google/jgments'
def _EscapeForString(s):
"""Escape string contents for safe inclusion in a double-quoted string."""
return s.replace('\\', '\\\\').replace('"', r'\"')
def _JavaLexerName(lexer_cls_name):
"""Return the name in Java of the given lexer class name."""
assert '.' not in lexer_cls_name, \
'Lexer class name must not refer to the enclosing module.'
return lexer_cls_name + 'Syntax'
class _ProcessedTokenMatcher(object):
"""Translates token matcher tuples into Java syntax."""
def __init__(self, matcher_tuple, lexer):
"""Parses and converts a token matcher tuple.
Args:
matcher_tuple: a tuple of the form (regex, token action, state action),
with the last member being optional. This is the sort of tuple contained
in the _tokens dictionary of a preprocessed RegexLexer subclass.
lexer: the RegexLexer instance being processed.
"""
if len(matcher_tuple) == 3:
regex, token_action, state_action = matcher_tuple
elif len(matcher_tuple) == 2:
regex, token_action = matcher_tuple
state_action = None
else:
raise RuntimeError('Wrong number of args in token matcher tuple %s'
% matcher_tuple)
self._lexer = lexer
self.regex = self._ProcessRegex(regex)
self.token_action = self._ProcessTokenAction(token_action)
self.state_action = self._ProcessStateAction(state_action)
def __str__(self):
return 'ProcessedTokenMatcher<%r, %r, %r>' % (
self.regex, self.token_action, self.state_action)
def _TokenRef(self, token):
"""Formats a Pygments token as a reference to a member of a Java enum."""
return 'Token.' + _FormatToken(token)
def _ProcessTokenAction(self, action):
"""Convert a token action into Java syntax."""
if isinstance(action, pygments.lexer._TokenType):
return 'TokenActions.singleToken(%s)' % (self._TokenRef(action,))
elif isinstance(action, pygments.lexer.RegexLexerMeta):
return '%s.INSTANCE' % _JavaLexerName(action.name)
elif isinstance(action, tuple):
fn, args = action
if fn == 'using':
assert len(args) == 1
return self._ProcessUsing(args[0])
elif fn == 'bygroups':
return self._ProcessBygroups(args)
raise RuntimeError('Unknown token action %s' % action)
def _ProcessUsing(self, delegate):
if delegate == pygments.lexer.this:
return 'USING_THIS'
else:
return '%s.USING_THIS' % _JavaLexerName(delegate.name)
def _ProcessBygroups(self, args):
if iterlib.All(isinstance(arg, pygments.token._TokenType)
for arg in args):
# Simple case: avoid the extra indirection when the action
# for all groups is to yield a single token.
args = [self._TokenRef(arg) for arg in args]
else:
args = [self._ProcessTokenAction(arg) for arg in args]
# Capitalize "byGroups" per the Java convention.
return 'TokenActions.byGroups(%s)' % ', '.join(args)
def _ProcessLexerName(self, lexer_name):
if lexer_name not in lexers.ALL:
raise RuntimeError('No lexer available for %s' % lexer_name)
return '"%s"' % lexer_name
def _ProcessStateAction(self, action):
"""Converts a state transition action into Java syntax."""
if not action:
return 'StateActions.NOOP'
elif isinstance(action, tuple):
if len(action) == 1:
# A 1-item tuple is the same as just performing the action itself.
return self._ProcessStateAction(action[0])
return 'StateActions.multiple(%s)' % ', '.join(
self._ProcessStateAction(sub_action) for sub_action in action)
elif isinstance(action, int):
return 'StateActions.pop(%d)' % -action
elif action == '#pop':
return 'StateActions.pop(1)'
elif action == '#push':
return 'StateActions.DUPLICATE_TOP'
elif isinstance(action, str):
return 'StateActions.push(State.%s)' % _FormatState(action)
raise RuntimeError('Unknown action %s' % action)
def _ProcessRegex(self, regex):
"""Converts a regular expression to java syntax."""
return _EscapeForString(youstillhavetwoproblems.to_java(regex))
class _RecordingLexerMeta(pygments.lexer.RegexLexerMeta):
def _process_regex(cls, regex, rflags):
return regex
def _process_token(cls, token):
return token
def ExtractStates(lexer_cls):
"""Extracts the state dictionary from a pygments lexer class."""
class RecordingLexer(lexer_cls):
__metaclass__ = _RecordingLexerMeta
# Instantiating the lexer takes the tokens attribute, preprocesses it,
# and produces a _tokens attribute that we can munge.
lexer = RecordingLexer()
states = {}
for state, matchers in lexer._tokens.items():
states[_FormatState(state)] = [
_ProcessedTokenMatcher(matcher_tuple, lexer)
for matcher_tuple in matchers]
return states
def _GlobToRegex(glob):
"""Converts a shell glob to a regular expression."""
# fnmatch.translate adds '$' or '\Z(?ms)' (on python >= 2.6)
# to the end of the regex.
return fnmatch.translate(glob).rstrip('$').replace(r'\Z(?ms)', '')
def ConvertFilenames(filenames):
"""Converts a list of file name globs into single regex."""
# The regexes returned by fnmatch.translate are simple enough that the
# escaping used for token regexes is not necessary here.
return _EscapeForString('(%s)$' % (
'|'.join(_GlobToRegex(glob) for glob in filenames)))
def AllTokens():
"""Retrieves all descendants of pygments.token.Token."""
def Traverse(token):
for tok in token.subtypes:
yield tok
for sub_token in Traverse(tok):
yield sub_token
return sorted(Traverse(pygments.token.Token))
def _FormatToken(token):
"""Converts a Pythonic token name into a Java-friendly constant name."""
assert str(token).startswith('Token.'), 'Expected token, found ' + token
return str(token)[len('Token.'):].replace('.', '_').upper()
def _FormatState(state):
return state.replace('-', '_').upper()
def WriteTokens(config):
"""Converts the list of Pygments tokens into a Java enum.
Args:
config: an OutputConfiguration object.
"""
outfile = config.OutputFile('Token')
all_tokens = AllTokens()
tokens = [_FormatToken(token) for token in all_tokens]
short_names = [pygments.formatters.html._get_ttype_class(token)
for token in all_tokens]
template = mako.template.Template(
resources.GetResource(os.path.join(_TEMPLATES_DIR, 'tokens.mako')))
outfile.write(template.render(tokens=tokens, short_names=short_names,
package=config.package))
def WriteLexerList(config):
"""Writes the Java class containing the list of all lexers.
Args:
config: an OutputConfiguration object.
"""
outfile = config.OutputFile('Lexers')
template = mako.template.Template(
resources.GetResource(os.path.join(_TEMPLATES_DIR, 'lexers.mako')))
lexer_list = dict((name, _JavaLexerName(name)) for name in lexers.ALL)
outfile.write(template.render(lexers=lexer_list, package=config.package))
def WriteLexer(config, name):
"""Converts a Pygments lexer into a Java lexer.
Args:
config: an OutputConfiguration object.
name: the short name of the lexer (e.g. "Css" or "Python"),
usable as an index into ALL_LEXERS.
"""
try:
lexer_cls = lexers.ALL[name]
except KeyError:
raise RuntimeError('Unknown lexer "%s"' % name)
class_name = _JavaLexerName(name)
outfile = config.OutputFile(class_name)
states = ExtractStates(lexer_cls)
filenames = ConvertFilenames(lexer_cls.filenames)
template = mako.template.Template(
resources.GetResource(os.path.join(_TEMPLATES_DIR, 'lexer.mako')))
outfile.write(template.render_unicode(
states=states, lexer_name=class_name, origin=lexer_cls,
package=config.package, filenames=filenames).encode('utf-8'))
class OutputConfiguration(object):
"""Configuration object describing where to write files.
Attributes:
package: package name for generated java files.
basedir: directory to prepend to the package path.
outfile: open file to write to. If None, a path will be derived
from the other arguments.
"""
def __init__(self, package=_DEFAULT_PACKAGE, basedir=_DEFAULT_BASEDIR,
outfile=None):
self.package = package
self.basedir = basedir
self.outfile = outfile
self._written = False
def OutputFile(self, class_name):
"""Returns an open file for writing the given class."""
if self.outfile:
if self._written:
raise RuntimeError(
'Attempted to write multiple classes to the same open file.')
self._written = True
return self.outfile
return self._CreateParentsAndOpen(self._FilePath(class_name))
def _CreateParentsAndOpen(self, path):
"""Opens a file for writing, recursively creating subdirs if needed."""
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
return open(path, 'w')
def _FilePath(self, class_name):
return os.path.join(self.basedir, self.package.replace('.', '/'),
class_name + '.java')
def main():
if len(sys.argv) == 2:
# With one argument, write a single module (either a lexer
# or the token list) to stdout.
config = OutputConfiguration(outfile=sys.stdout)
if sys.argv[-1] == 'Tokens':
WriteTokens(config)
elif sys.argv[-1] == 'Lexers':
WriteLexerList(config)
else:
WriteLexer(config, sys.argv[-1])
elif len(sys.argv) == 1:
# With no arguments, write all modules to the default output paths.
config = OutputConfiguration()
WriteTokens(config)
for lexer_name in lexers.ALL:
WriteLexer(config, lexer_name)
WriteLexerList(config)
else:
raise RuntimeError('Unknown command line: ' + sys.argv)
if __name__ == '__main__':
main()
| bsd-2-clause | 4,275,517,555,670,912,500 | 34.836022 | 88 | 0.702123 | false |
mazvv/travelcrm | travelcrm/views/contracts.py | 1 | 7715 | # -*-coding: utf-8-*-
import logging
from pyramid.view import view_config, view_defaults
from pyramid.httpexceptions import HTTPFound
from . import BaseView
from ..models import DBSession
from ..models.contract import Contract
from ..lib.bl.contracts import get_contract_copy
from ..lib.bl.subscriptions import subscribe_resource
from ..lib.utils.common_utils import translate as _
from ..forms.contracts import (
ContractForm,
ContractSearchForm,
ContractAssignForm,
)
from ..lib.events.resources import (
ResourceCreated,
ResourceChanged,
ResourceDeleted,
)
log = logging.getLogger(__name__)
@view_defaults(
context='..resources.contracts.ContractsResource',
)
class ContractsView(BaseView):
@view_config(
request_method='GET',
renderer='travelcrm:templates/contracts/index.mako',
permission='view'
)
def index(self):
return {
'title': self._get_title(),
}
@view_config(
name='list',
xhr='True',
request_method='POST',
renderer='json',
permission='view'
)
def _list(self):
form = ContractSearchForm(self.request, self.context)
form.validate()
qb = form.submit()
return {
'total': qb.get_count(),
'rows': qb.get_serialized()
}
@view_config(
name='view',
request_method='GET',
renderer='travelcrm:templates/contracts/form.mako',
permission='view'
)
def view(self):
if self.request.params.get('rid'):
resource_id = self.request.params.get('rid')
contract = Contract.by_resource_id(resource_id)
return HTTPFound(
location=self.request.resource_url(
self.context, 'view', query={'id': contract.id}
)
)
result = self.edit()
result.update({
'title': self._get_title(_(u'View')),
'readonly': True,
})
return result
@view_config(
name='add',
request_method='GET',
renderer='travelcrm:templates/contracts/form.mako',
permission='add'
)
def add(self):
return {
'title': self._get_title(_(u'Add')),
}
@view_config(
name='add',
request_method='POST',
renderer='json',
permission='add'
)
def _add(self):
form = ContractForm(self.request)
if form.validate():
contract = form.submit()
DBSession.add(contract)
DBSession.flush()
event = ResourceCreated(self.request, contract)
event.registry()
return {
'success_message': _(u'Saved'),
'response': contract.id
}
else:
return {
'error_message': _(u'Please, check errors'),
'errors': form.errors
}
@view_config(
name='edit',
request_method='GET',
renderer='travelcrm:templates/contracts/form.mako',
permission='edit'
)
def edit(self):
contract = Contract.get(self.request.params.get('id'))
return {
'item': contract,
'title': self._get_title(_(u'Edit')),
}
@view_config(
name='edit',
request_method='POST',
renderer='json',
permission='edit'
)
def _edit(self):
contract = Contract.get(self.request.params.get('id'))
form = ContractForm(self.request)
if form.validate():
form.submit(contract)
event = ResourceChanged(self.request, contract)
event.registry()
return {
'success_message': _(u'Saved'),
'response': contract.id
}
else:
return {
'error_message': _(u'Please, check errors'),
'errors': form.errors
}
@view_config(
name='copy',
request_method='GET',
renderer='travelcrm:templates/contracts/form.mako',
permission='add'
)
def copy(self):
contract = get_contract_copy(
self.request.params.get('id'), self.request
)
return {
'action': self.request.path_url,
'item': contract,
'title': self._get_title(_(u'Copy')),
}
@view_config(
name='copy',
request_method='POST',
renderer='json',
permission='add'
)
def _copy(self):
return self._add()
@view_config(
name='details',
request_method='GET',
renderer='travelcrm:templates/contracts/details.mako',
permission='view'
)
def details(self):
contract = Contract.get(self.request.params.get('id'))
return {
'item': contract,
}
@view_config(
name='delete',
request_method='GET',
renderer='travelcrm:templates/contracts/delete.mako',
permission='delete'
)
def delete(self):
return {
'title': self._get_title(_(u'Delete')),
'id': self.request.params.get('id')
}
@view_config(
name='delete',
request_method='POST',
renderer='json',
permission='delete'
)
def _delete(self):
errors = False
ids = self.request.params.getall('id')
if ids:
try:
items = DBSession.query(Contract).filter(
Contract.id.in_(ids)
)
for item in items:
DBSession.delete(item)
event = ResourceDeleted(self.request, item)
event.registry()
DBSession.flush()
except:
errors=True
DBSession.rollback()
if errors:
return {
'error_message': _(
u'Some objects could not be delete'
),
}
return {'success_message': _(u'Deleted')}
@view_config(
name='assign',
request_method='GET',
renderer='travelcrm:templates/contracts/assign.mako',
permission='assign'
)
def assign(self):
return {
'id': self.request.params.get('id'),
'title': self._get_title(_(u'Assign Maintainer')),
}
@view_config(
name='assign',
request_method='POST',
renderer='json',
permission='assign'
)
def _assign(self):
form = ContractAssignForm(self.request)
if form.validate():
form.submit(self.request.params.getall('id'))
return {
'success_message': _(u'Assigned'),
}
else:
return {
'error_message': _(u'Please, check errors'),
'errors': form.errors
}
@view_config(
name='subscribe',
request_method='GET',
renderer='travelcrm:templates/contracts/subscribe.mako',
permission='view'
)
def subscribe(self):
return {
'id': self.request.params.get('id'),
'title': self._get_title(_(u'Subscribe')),
}
@view_config(
name='subscribe',
request_method='POST',
renderer='json',
permission='view'
)
def _subscribe(self):
ids = self.request.params.getall('id')
for id in ids:
contract = Contract.get(id)
subscribe_resource(self.request, contract.resource)
return {
'success_message': _(u'Subscribed'),
}
| gpl-3.0 | 532,814,128,972,921,660 | 25.788194 | 67 | 0.516397 | false |
Subsets and Splits