repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
MichaelNedzelsky/intellij-community | refs/heads/master | python/testData/fillParagraph/enter_after.py | 83 | # Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
# tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
# veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea
# commodo consequat. Duis aute irure dolor in reprehenderit in voluptate
# velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat
# cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id
# est laborum. |
dhomeier/astropy | refs/heads/wcs-datfix-unwarn | astropy/wcs/tests/extension/__init__.py | 12133432 | |
mcella/django | refs/heads/master | django/utils/__init__.py | 12133432 | |
plumdog/django_migration_testcase | refs/heads/master | tests/test_second_app/south_migrations/__init__.py | 12133432 | |
maljac/odoomrp-wip | refs/heads/8.0 | procurement_sale_forecast/wizard/make_procurement.py | 24 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, api
class MakeProcurement(models.TransientModel):
_inherit = 'make.procurement'
@api.multi
def make_procurement(self):
result = super(MakeProcurement, self).make_procurement()
forecast_line_obj = self.env['procurement.sale.forecast.line']
context = self.env.context
if context.get('active_model') == 'procurement.sale.forecast.line':
forecast_line_id = context['active_id']
procurement_id = result['res_id']
forecast_line = forecast_line_obj.browse(forecast_line_id)
forecast_line.procurement_id = procurement_id
return result
|
c4goldsw/shogun | refs/heads/develop | applications/ocr/Ai.py | 26 | # File : $HeadURL$
# Version: $Id$
from modshogun import RealFeatures, MulticlassLabels
from modshogun import GaussianKernel
from modshogun import GMNPSVM
import numpy as np
import gzip as gz
import pickle as pkl
import common as com
class Ai:
def __init__(self):
self.x = None
self.y = None
self.x_test = None
self.y_test = None
self.svm = None
def load_train_data(self, x_fname, y_fname):
Ai.__init__(self)
self.x = np.loadtxt(x_fname)
self.y = np.loadtxt(y_fname) - 1.0
self.x_test = self.x
self.y_test = self.y
def _svm_new(self, kernel_width, c, epsilon):
if self.x == None or self.y == None:
raise Exception("No training data loaded.")
x = RealFeatures(self.x)
y = MulticlassLabels(self.y)
self.svm = GMNPSVM(c, GaussianKernel(x, x, kernel_width), y)
self.svm.set_epsilon(epsilon)
def write_svm(self):
gz_stream = gz.open(com.TRAIN_SVM_FNAME_GZ, 'wb', 9)
pkl.dump(self.svm, gz_stream)
gz_stream.close()
def read_svm(self):
gz_stream = gz.open(com.TRAIN_SVM_FNAME_GZ, 'rb')
self.svm = pkl.load(gz_stream)
gz_stream.close()
def enable_validation(self, train_frac):
x = self.x
y = self.y
idx = np.arange(len(y))
np.random.shuffle(idx)
train_idx=idx[:np.floor(train_frac*len(y))]
test_idx=idx[np.ceil(train_frac*len(y)):]
self.x = x[:,train_idx]
self.y = y[train_idx]
self.x_test = x[:,test_idx]
self.y_test = y[test_idx]
def train(self, kernel_width, c, epsilon):
self._svm_new(kernel_width, c, epsilon)
x = RealFeatures(self.x)
self.svm.io.enable_progress()
self.svm.train(x)
self.svm.io.disable_progress()
def load_classifier(self): self.read_svm()
def classify(self, matrix):
cl = self.svm.apply(
RealFeatures(
np.reshape(matrix, newshape=(com.FEATURE_DIM, 1),
order='F')
)
).get_label(0)
return int(cl + 1.0) % 10
def get_test_error(self):
self.svm.io.enable_progress()
l = self.svm.apply(RealFeatures(self.x_test)).get_labels()
self.svm.io.disable_progress()
return 1.0 - np.mean(l == self.y_test)
|
nuclear-wizard/moose | refs/heads/devel | python/chigger/utils/AxisOptions.py | 15 | #pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import vtk
import mooseutils
from .Options import Options
VTK_NOTATION_ENUM = [
vtk.vtkAxis.STANDARD_NOTATION,
vtk.vtkAxis.SCIENTIFIC_NOTATION,
vtk.vtkAxis.FIXED_NOTATION,
vtk.vtkAxis.PRINTF_NOTATION
]
def get_options():
"""
Retuns options for vtkAxis objects.
"""
opt = Options()
opt.add('num_ticks', 5, "The number of tick marks to place on the axis.", vtype=int)
opt.add('lim', "The axis extents.", vtype=list)
opt.add('font_color', [1, 1, 1], "The color of the axis, ticks, and labels.")
opt.add('title', "The axis label.", vtype=str)
opt.add('font_size', "The axis title and label font sizes, in points.", vtype=int)
opt.add('title_font_size', "The axis title font size, in points.", vtype=int)
opt.add('tick_font_size', "The axis tick label font size, in points.", vtype=int)
opt.add('grid', True, "Show/hide the grid lines for this axis.")
opt.add('grid_color', [0.25, 0.25, 0.25], "The color for the grid lines.")
opt.add('precision', "The axis numeric precision.", vtype=int)
opt.add('notation', "The type of notation, leave empty to let VTK decide", vtype=str,
allow=['standard', 'scientific', 'fixed', 'printf'])
opt.add('ticks_visible', True, "Control visibility of tickmarks on colorbar axis.")
opt.add('axis_visible', True, "Control visibility of axis line on colorbar axis.")
opt.add('labels_visible', True, "Control visibility of the numeric labels.")
opt.add('axis_position', 'left', "Set the axis position (left, right, top, bottom)", vtype=str,
allow=['left', 'right', 'top', 'bottom'])
opt.add('axis_point1', [0, 0], 'Starting location of axis, in absolute viewport coordinates.')
opt.add('axis_point2', [0, 0], 'Ending location of axis, in absolute viewport coordinates.')
opt.add('axis_scale', 1, "The axis scaling factor.", vtype=float)
opt.add('axis_factor', 0, "Offset the axis by adding a factor.", vtype=float)
opt.add('axis_opacity', 1, "The vtkAxis opacity.", vtype=float)
opt.add('zero_tol', 1e-10, "Tolerance for considering limits to be the same.")
return opt
def set_options(vtkaxis, opt):
"""
Set the options for vtkAxis object.
"""
# Visibility
vtkaxis.SetTicksVisible(opt['ticks_visible'])
vtkaxis.SetAxisVisible(opt['axis_visible'])
vtkaxis.SetLabelsVisible(opt['labels_visible'])
# Opacity
if opt.isOptionValid('axis_opacity'):
opacity = opt['axis_opacity']
vtkaxis.SetOpacity(opacity)
vtkaxis.GetTitleProperties().SetOpacity(opacity)
vtkaxis.GetLabelProperties().SetOpacity(opacity)
# Ticks
if opt.isOptionValid('num_ticks'):
vtkaxis.SetNumberOfTicks(opt['num_ticks'])
# Limits
if opt.isOptionValid('lim'):
lim = opt['lim']
if abs(lim[1] - lim[0]) < opt['zero_tol']:
vtkaxis.SetBehavior(vtk.vtkAxis.CUSTOM)
vtkaxis.SetRange(0, 1)
pos = vtk.vtkDoubleArray()
pos.SetNumberOfTuples(2)
pos.SetValue(0, 0)
pos.SetValue(1, 1)
labels = vtk.vtkStringArray()
labels.SetNumberOfTuples(2)
labels.SetValue(0, str(lim[0]))
labels.SetValue(1, str(lim[1]))
vtkaxis.SetCustomTickPositions(pos, labels)
else:
vtkaxis.SetCustomTickPositions(None, None)
vtkaxis.SetBehavior(vtk.vtkAxis.FIXED)
scale = opt['axis_scale']
factor = opt['axis_factor']
vtkaxis.SetRange(lim[0] * scale + factor, lim[1] * scale + factor)
vtkaxis.RecalculateTickSpacing()
else:
vtkaxis.SetBehavior(vtk.vtkAxis.AUTO)
vtkaxis.SetCustomTickPositions(None, None)
# Color
if opt.isOptionValid('font_color'):
clr = opt['font_color']
vtkaxis.GetTitleProperties().SetColor(*clr)
vtkaxis.GetLabelProperties().SetColor(*clr)
vtkaxis.GetPen().SetColorF(*clr)
# Axis title
if opt.isOptionValid('title'):
vtkaxis.SetTitle(opt['title'])
# Font sizes
if opt.isOptionValid('font_size'):
vtkaxis.GetTitleProperties().SetFontSize(opt['font_size'])
vtkaxis.GetLabelProperties().SetFontSize(opt['font_size'])
if opt.isOptionValid('title_font_size'):
vtkaxis.GetTitleProperties().SetFontSize(opt['title_font_size'])
if opt.isOptionValid('tick_font_size'):
vtkaxis.GetLabelProperties().SetFontSize(opt['tick_font_size'])
# Precision/notation
if opt.isOptionValid('notation'):
notation = opt['notation'].upper()
vtk_notation = getattr(vtk.vtkAxis, notation + '_NOTATION')
vtkaxis.SetNotation(vtk_notation)
if opt.isOptionValid('precision'):
if vtkaxis.GetNotation() in VTK_NOTATION_ENUM[1:3]:
vtkaxis.SetPrecision(opt['precision'])
else:
mooseutils.mooseWarning("When using 'precision' option, 'notation' option has to be "
"set to either 'scientific' or 'fixed'.")
# Grid lines
vtkaxis.SetGridVisible(opt['grid'])
vtkaxis.GetGridPen().SetColorF(opt['grid_color'])
# Set the position and points
if opt.isOptionValid('axis_position'):
pos = {'left':vtk.vtkAxis.LEFT, 'right':vtk.vtkAxis.RIGHT, 'top':vtk.vtkAxis.TOP,
'bottom':vtk.vtkAxis.BOTTOM}
vtkaxis.SetPosition(pos[opt['axis_position']])
if opt.isOptionValid('axis_point1'):
vtkaxis.SetPoint1(*opt['axis_point1'])
if opt.isOptionValid('axis_point2'):
vtkaxis.SetPoint2(*opt['axis_point2'])
|
szibis/ansible-modules-core | refs/heads/devel | utilities/helper/accelerate.py | 90 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, James Cammarata <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: accelerate
short_description: Enable accelerated mode on remote node
description:
- This modules launches an ephemeral I(accelerate) daemon on the remote node which
Ansible can use to communicate with nodes at high speed.
- The daemon listens on a configurable port for a configurable amount of time.
- Fireball mode is AES encrypted
version_added: "1.3"
options:
port:
description:
- TCP port for the socket connection
required: false
default: 5099
aliases: []
timeout:
description:
- The number of seconds the socket will wait for data. If none is received when the timeout value is reached, the connection will be closed.
required: false
default: 300
aliases: []
minutes:
description:
- The I(accelerate) listener daemon is started on nodes and will stay around for
this number of minutes before turning itself off.
required: false
default: 30
ipv6:
description:
- The listener daemon on the remote host will bind to the ipv6 localhost socket
if this parameter is set to true.
required: false
default: false
multi_key:
description:
- When enabled, the daemon will open a local socket file which can be used by future daemon executions to
upload a new key to the already running daemon, so that multiple users can connect using different keys.
This access still requires an ssh connection as the uid for which the daemon is currently running.
required: false
default: no
version_added: "1.6"
notes:
- See the advanced playbooks chapter for more about using accelerated mode.
requirements:
- "python >= 2.6"
- "python-keyczar"
author: "James Cammarata (@jimi-c)"
'''
EXAMPLES = '''
# To use accelerate mode, simply add "accelerate: true" to your play. The initial
# key exchange and starting up of the daemon will occur over SSH, but all commands and
# subsequent actions will be conducted over the raw socket connection using AES encryption
- hosts: devservers
accelerate: true
tasks:
- command: /usr/bin/anything
'''
import base64
import errno
import getpass
import json
import os
import os.path
import pwd
import signal
import socket
import struct
import sys
import syslog
import tempfile
import time
import traceback
import SocketServer
from datetime import datetime
from threading import Thread, Lock
# import module snippets
# we must import this here at the top so we can use get_module_path()
from ansible.module_utils.basic import *
syslog.openlog('ansible-%s' % os.path.basename(__file__))
# the chunk size to read and send, assuming mtu 1500 and
# leaving room for base64 (+33%) encoding and header (100 bytes)
# 4 * (975/3) + 100 = 1400
# which leaves room for the TCP/IP header
CHUNK_SIZE=10240
# FIXME: this all should be moved to module_common, as it's
# pretty much a copy from the callbacks/util code
DEBUG_LEVEL=0
def log(msg, cap=0):
global DEBUG_LEVEL
if DEBUG_LEVEL >= cap:
syslog.syslog(syslog.LOG_NOTICE|syslog.LOG_DAEMON, msg)
def v(msg):
log(msg, cap=1)
def vv(msg):
log(msg, cap=2)
def vvv(msg):
log(msg, cap=3)
def vvvv(msg):
log(msg, cap=4)
HAS_KEYCZAR = False
try:
from keyczar.keys import AesKey
HAS_KEYCZAR = True
except ImportError:
pass
SOCKET_FILE = os.path.join(get_module_path(), '.ansible-accelerate', ".local.socket")
def get_pid_location(module):
"""
Try to find a pid directory in the common locations, falling
back to the user's home directory if no others exist
"""
for dir in ['/var/run', '/var/lib/run', '/run', os.path.expanduser("~/")]:
try:
if os.path.isdir(dir) and os.access(dir, os.R_OK|os.W_OK):
return os.path.join(dir, '.accelerate.pid')
except:
pass
module.fail_json(msg="couldn't find any valid directory to use for the accelerate pid file")
# NOTE: this shares a fair amount of code in common with async_wrapper, if async_wrapper were a new module we could move
# this into utils.module_common and probably should anyway
def daemonize_self(module, password, port, minutes, pid_file):
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
try:
pid = os.fork()
if pid > 0:
vvv("exiting pid %s" % pid)
# exit first parent
module.exit_json(msg="daemonized accelerate on port %s for %s minutes with pid %s" % (port, minutes, str(pid)))
except OSError, e:
log("fork #1 failed: %d (%s)" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(022)
# do second fork
try:
pid = os.fork()
if pid > 0:
log("daemon pid %s, writing %s" % (pid, pid_file))
pid_file = open(pid_file, "w")
pid_file.write("%s" % pid)
pid_file.close()
vvv("pid file written")
sys.exit(0)
except OSError, e:
log("fork #2 failed: %d (%s)" % (e.errno, e.strerror))
sys.exit(1)
dev_null = file('/dev/null','rw')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
log("daemonizing successful")
class LocalSocketThread(Thread):
server = None
terminated = False
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None):
self.server = kwargs.get('server')
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
def run(self):
try:
if os.path.exists(SOCKET_FILE):
os.remove(SOCKET_FILE)
else:
dir = os.path.dirname(SOCKET_FILE)
if os.path.exists(dir):
if not os.path.isdir(dir):
log("The socket file path (%s) exists, but is not a directory. No local connections will be available" % dir)
return
else:
# make sure the directory is accessible only to this
# user, as socket files derive their permissions from
# the directory that contains them
os.chmod(dir, 0700)
elif not os.path.exists(dir):
os.makedirs(dir, 0700)
except OSError:
pass
self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.s.bind(SOCKET_FILE)
self.s.listen(5)
while not self.terminated:
try:
conn, addr = self.s.accept()
vv("received local connection")
data = ""
while "\n" not in data:
data += conn.recv(2048)
try:
try:
new_key = AesKey.Read(data.strip())
found = False
for key in self.server.key_list:
try:
new_key.Decrypt(key.Encrypt("foo"))
found = True
break
except:
pass
if not found:
vv("adding new key to the key list")
self.server.key_list.append(new_key)
conn.sendall("OK\n")
else:
vv("key already exists in the key list, ignoring")
conn.sendall("EXISTS\n")
# update the last event time so the server doesn't
# shutdown sooner than expected for new cliets
try:
self.server.last_event_lock.acquire()
self.server.last_event = datetime.now()
finally:
self.server.last_event_lock.release()
except Exception, e:
vv("key loaded locally was invalid, ignoring (%s)" % e)
conn.sendall("BADKEY\n")
finally:
try:
conn.close()
except:
pass
except:
pass
def terminate(self):
self.terminated = True
self.s.shutdown(socket.SHUT_RDWR)
self.s.close()
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args,
**self._Thread__kwargs)
def join(self,timeout=None):
Thread.join(self, timeout=timeout)
return self._return
class ThreadedTCPServer(SocketServer.ThreadingTCPServer):
key_list = []
last_event = datetime.now()
last_event_lock = Lock()
def __init__(self, server_address, RequestHandlerClass, module, password, timeout, use_ipv6=False):
self.module = module
self.key_list.append(AesKey.Read(password))
self.allow_reuse_address = True
self.timeout = timeout
if use_ipv6:
self.address_family = socket.AF_INET6
if self.module.params.get('multi_key', False):
vv("starting thread to handle local connections for multiple keys")
self.local_thread = LocalSocketThread(kwargs=dict(server=self))
self.local_thread.start()
SocketServer.ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass)
def shutdown(self):
self.local_thread.terminate()
self.running = False
SocketServer.ThreadingTCPServer.shutdown(self)
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
# the key to use for this connection
active_key = None
def send_data(self, data):
try:
self.server.last_event_lock.acquire()
self.server.last_event = datetime.now()
finally:
self.server.last_event_lock.release()
packed_len = struct.pack('!Q', len(data))
return self.request.sendall(packed_len + data)
def recv_data(self):
header_len = 8 # size of a packed unsigned long long
data = ""
vvvv("in recv_data(), waiting for the header")
while len(data) < header_len:
try:
d = self.request.recv(header_len - len(data))
if not d:
vvv("received nothing, bailing out")
return None
data += d
except:
# probably got a connection reset
vvvv("exception received while waiting for recv(), returning None")
return None
vvvv("in recv_data(), got the header, unpacking")
data_len = struct.unpack('!Q',data[:header_len])[0]
data = data[header_len:]
vvvv("data received so far (expecting %d): %d" % (data_len,len(data)))
while len(data) < data_len:
try:
d = self.request.recv(data_len - len(data))
if not d:
vvv("received nothing, bailing out")
return None
data += d
vvvv("data received so far (expecting %d): %d" % (data_len,len(data)))
except:
# probably got a connection reset
vvvv("exception received while waiting for recv(), returning None")
return None
vvvv("received all of the data, returning")
try:
self.server.last_event_lock.acquire()
self.server.last_event = datetime.now()
finally:
self.server.last_event_lock.release()
return data
def handle(self):
try:
while True:
vvvv("waiting for data")
data = self.recv_data()
if not data:
vvvv("received nothing back from recv_data(), breaking out")
break
vvvv("got data, decrypting")
if not self.active_key:
for key in self.server.key_list:
try:
data = key.Decrypt(data)
self.active_key = key
break
except:
pass
else:
vv("bad decrypt, exiting the connection handler")
return
else:
try:
data = self.active_key.Decrypt(data)
except:
vv("bad decrypt, exiting the connection handler")
return
vvvv("decryption done, loading json from the data")
data = json.loads(data)
mode = data['mode']
response = {}
last_pong = datetime.now()
if mode == 'command':
vvvv("received a command request, running it")
twrv = ThreadWithReturnValue(target=self.command, args=(data,))
twrv.start()
response = None
while twrv.is_alive():
if (datetime.now() - last_pong).seconds >= 15:
last_pong = datetime.now()
vvvv("command still running, sending keepalive packet")
data2 = json.dumps(dict(pong=True))
data2 = self.active_key.Encrypt(data2)
self.send_data(data2)
time.sleep(0.1)
response = twrv._return
vvvv("thread is done, response from join was %s" % response)
elif mode == 'put':
vvvv("received a put request, putting it")
response = self.put(data)
elif mode == 'fetch':
vvvv("received a fetch request, getting it")
response = self.fetch(data)
elif mode == 'validate_user':
vvvv("received a request to validate the user id")
response = self.validate_user(data)
vvvv("response result is %s" % str(response))
json_response = json.dumps(response)
vvvv("dumped json is %s" % json_response)
data2 = self.active_key.Encrypt(json_response)
vvvv("sending the response back to the controller")
self.send_data(data2)
vvvv("done sending the response")
if mode == 'validate_user' and response.get('rc') == 1:
vvvv("detected a uid mismatch, shutting down")
self.server.shutdown()
except:
tb = traceback.format_exc()
log("encountered an unhandled exception in the handle() function")
log("error was:\n%s" % tb)
if self.active_key:
data2 = json.dumps(dict(rc=1, failed=True, msg="unhandled error in the handle() function"))
data2 = self.active_key.Encrypt(data2)
self.send_data(data2)
def validate_user(self, data):
if 'username' not in data:
return dict(failed=True, msg='No username specified')
vvvv("validating we're running as %s" % data['username'])
# get the current uid
c_uid = os.getuid()
try:
# the target uid
t_uid = pwd.getpwnam(data['username']).pw_uid
except:
vvvv("could not find user %s" % data['username'])
return dict(failed=True, msg='could not find user %s' % data['username'])
# and return rc=0 for success, rc=1 for failure
if c_uid == t_uid:
return dict(rc=0)
else:
return dict(rc=1)
def command(self, data):
if 'cmd' not in data:
return dict(failed=True, msg='internal error: cmd is required')
if 'tmp_path' not in data:
return dict(failed=True, msg='internal error: tmp_path is required')
vvvv("executing: %s" % data['cmd'])
use_unsafe_shell = False
executable = data.get('executable')
if executable:
use_unsafe_shell = True
rc, stdout, stderr = self.server.module.run_command(data['cmd'], executable=executable, use_unsafe_shell=use_unsafe_shell, close_fds=True)
if stdout is None:
stdout = ''
if stderr is None:
stderr = ''
vvvv("got stdout: %s" % stdout)
vvvv("got stderr: %s" % stderr)
return dict(rc=rc, stdout=stdout, stderr=stderr)
def fetch(self, data):
if 'in_path' not in data:
return dict(failed=True, msg='internal error: in_path is required')
try:
fd = file(data['in_path'], 'rb')
fstat = os.stat(data['in_path'])
vvv("FETCH file is %d bytes" % fstat.st_size)
while fd.tell() < fstat.st_size:
data = fd.read(CHUNK_SIZE)
last = False
if fd.tell() >= fstat.st_size:
last = True
data = dict(data=base64.b64encode(data), last=last)
data = json.dumps(data)
data = self.active_key.Encrypt(data)
if self.send_data(data):
return dict(failed=True, stderr="failed to send data")
response = self.recv_data()
if not response:
log("failed to get a response, aborting")
return dict(failed=True, stderr="Failed to get a response from %s" % self.host)
response = self.active_key.Decrypt(response)
response = json.loads(response)
if response.get('failed',False):
log("got a failed response from the master")
return dict(failed=True, stderr="Master reported failure, aborting transfer")
except Exception, e:
fd.close()
tb = traceback.format_exc()
log("failed to fetch the file: %s" % tb)
return dict(failed=True, stderr="Could not fetch the file: %s" % str(e))
fd.close()
return dict()
def put(self, data):
if 'data' not in data:
return dict(failed=True, msg='internal error: data is required')
if 'out_path' not in data:
return dict(failed=True, msg='internal error: out_path is required')
final_path = None
if 'user' in data and data.get('user') != getpass.getuser():
vvv("the target user doesn't match this user, we'll move the file into place via sudo")
tmp_path = os.path.expanduser('~/.ansible/tmp/')
if not os.path.exists(tmp_path):
try:
os.makedirs(tmp_path, 0700)
except:
return dict(failed=True, msg='could not create a temporary directory at %s' % tmp_path)
(fd,out_path) = tempfile.mkstemp(prefix='ansible.', dir=tmp_path)
out_fd = os.fdopen(fd, 'w', 0)
final_path = data['out_path']
else:
out_path = data['out_path']
out_fd = open(out_path, 'w')
try:
bytes=0
while True:
out = base64.b64decode(data['data'])
bytes += len(out)
out_fd.write(out)
response = json.dumps(dict())
response = self.active_key.Encrypt(response)
self.send_data(response)
if data['last']:
break
data = self.recv_data()
if not data:
raise ""
data = self.active_key.Decrypt(data)
data = json.loads(data)
except:
out_fd.close()
tb = traceback.format_exc()
log("failed to put the file: %s" % tb)
return dict(failed=True, stdout="Could not write the file")
vvvv("wrote %d bytes" % bytes)
out_fd.close()
if final_path:
vvv("moving %s to %s" % (out_path, final_path))
self.server.module.atomic_move(out_path, final_path)
return dict()
def daemonize(module, password, port, timeout, minutes, use_ipv6, pid_file):
try:
daemonize_self(module, password, port, minutes, pid_file)
def timer_handler(signum, _):
try:
try:
server.last_event_lock.acquire()
td = datetime.now() - server.last_event
# older python timedelta objects don't have total_seconds(),
# so we use the formula from the docs to calculate it
total_seconds = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
if total_seconds >= minutes * 60:
log("server has been idle longer than the timeout, shutting down")
server.running = False
server.shutdown()
else:
# reschedule the check
vvvv("daemon idle for %d seconds (timeout=%d)" % (total_seconds,minutes*60))
signal.alarm(30)
except:
pass
finally:
server.last_event_lock.release()
signal.signal(signal.SIGALRM, timer_handler)
signal.alarm(30)
tries = 5
while tries > 0:
try:
if use_ipv6:
address = ("::", port)
else:
address = ("0.0.0.0", port)
server = ThreadedTCPServer(address, ThreadedTCPRequestHandler, module, password, timeout, use_ipv6=use_ipv6)
server.allow_reuse_address = True
break
except Exception, e:
vv("Failed to create the TCP server (tries left = %d) (error: %s) " % (tries,e))
tries -= 1
time.sleep(0.2)
if tries == 0:
vv("Maximum number of attempts to create the TCP server reached, bailing out")
raise Exception("max # of attempts to serve reached")
# run the server in a separate thread to make signal handling work
server_thread = Thread(target=server.serve_forever, kwargs=dict(poll_interval=0.1))
server_thread.start()
server.running = True
v("serving!")
while server.running:
time.sleep(1)
# wait for the thread to exit fully
server_thread.join()
v("server thread terminated, exiting!")
sys.exit(0)
except Exception, e:
tb = traceback.format_exc()
log("exception caught, exiting accelerated mode: %s\n%s" % (e, tb))
sys.exit(0)
def main():
global DEBUG_LEVEL
module = AnsibleModule(
argument_spec = dict(
port=dict(required=False, default=5099),
ipv6=dict(required=False, default=False, type='bool'),
multi_key=dict(required=False, default=False, type='bool'),
timeout=dict(required=False, default=300),
password=dict(required=True),
minutes=dict(required=False, default=30),
debug=dict(required=False, default=0, type='int')
),
supports_check_mode=True
)
password = base64.b64decode(module.params['password'])
port = int(module.params['port'])
timeout = int(module.params['timeout'])
minutes = int(module.params['minutes'])
debug = int(module.params['debug'])
ipv6 = module.params['ipv6']
multi_key = module.params['multi_key']
if not HAS_KEYCZAR:
module.fail_json(msg="keyczar is not installed (on the remote side)")
DEBUG_LEVEL=debug
pid_file = get_pid_location(module)
daemon_pid = None
daemon_running = False
if os.path.exists(pid_file):
try:
daemon_pid = int(open(pid_file).read())
try:
# sending signal 0 doesn't do anything to the
# process, other than tell the calling program
# whether other signals can be sent
os.kill(daemon_pid, 0)
except OSError, e:
if e.errno == errno.EPERM:
# no permissions means the pid is probably
# running, but as a different user, so fail
module.fail_json(msg="the accelerate daemon appears to be running as a different user that this user cannot access (pid=%d)" % daemon_pid)
else:
daemon_running = True
except ValueError:
# invalid pid file, unlink it - otherwise we don't care
try:
os.unlink(pid_file)
except:
pass
if daemon_running and multi_key:
# try to connect to the file socket for the daemon if it exists
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
try:
s.connect(SOCKET_FILE)
s.sendall(password + '\n')
data = ""
while '\n' not in data:
data += s.recv(2048)
res = data.strip()
except:
module.fail_json(msg="failed to connect to the local socket file")
finally:
try:
s.close()
except:
pass
if res in ("OK", "EXISTS"):
module.exit_json(msg="transferred new key to the existing daemon")
else:
module.fail_json(msg="could not transfer new key: %s" % data.strip())
else:
# try to start up the daemon
daemonize(module, password, port, timeout, minutes, ipv6, pid_file)
main()
|
TheLoneRanger14/Decaf.v2 | refs/heads/master | roms/seabios/scripts/layoutrom.py | 7 | #!/usr/bin/env python
# Script to analyze code and arrange ld sections.
#
# Copyright (C) 2008-2014 Kevin O'Connor <[email protected]>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import operator
import sys
# LD script headers/trailers
COMMONHEADER = """
/* DO NOT EDIT! This is an autogenerated file. See scripts/layoutrom.py. */
OUTPUT_FORMAT("elf32-i386")
OUTPUT_ARCH("i386")
SECTIONS
{
"""
COMMONTRAILER = """
/* Discard regular data sections to force a link error if
* code attempts to access data not marked with VAR16 (or other
* appropriate macro)
*/
/DISCARD/ : {
*(.text*) *(.data*) *(.bss*) *(.rodata*)
*(COMMON) *(.discard*) *(.eh_frame) *(.note*)
}
}
"""
######################################################################
# Determine section locations
######################################################################
# Align 'pos' to 'alignbytes' offset
def alignpos(pos, alignbytes):
mask = alignbytes - 1
return (pos + mask) & ~mask
# Determine the final addresses for a list of sections that end at an
# address.
def setSectionsStart(sections, endaddr, minalign=1, segoffset=0):
totspace = 0
for section in sections:
if section.align > minalign:
minalign = section.align
totspace = alignpos(totspace, section.align) + section.size
startaddr = int((endaddr - totspace) / minalign) * minalign
curaddr = startaddr
for section in sections:
curaddr = alignpos(curaddr, section.align)
section.finalloc = curaddr
section.finalsegloc = curaddr - segoffset
curaddr += section.size
return startaddr, minalign
# The 16bit code can't exceed 64K of space.
BUILD_BIOS_ADDR = 0xf0000
BUILD_BIOS_SIZE = 0x10000
BUILD_ROM_START = 0xc0000
BUILD_LOWRAM_END = 0xa0000
# Space to reserve in f-segment for dynamic allocations
BUILD_MIN_BIOSTABLE = 2048
# Layout the 16bit code. This ensures sections with fixed offset
# requirements are placed in the correct location. It also places the
# 16bit code as high as possible in the f-segment.
def fitSections(sections, fillsections):
# fixedsections = [(addr, section), ...]
fixedsections = []
for section in sections:
if section.name.startswith('.fixedaddr.'):
addr = int(section.name[11:], 16)
section.finalloc = addr + BUILD_BIOS_ADDR
section.finalsegloc = addr
fixedsections.append((addr, section))
if section.align != 1:
print("Error: Fixed section %s has non-zero alignment (%d)" % (
section.name, section.align))
sys.exit(1)
fixedsections.sort(key=operator.itemgetter(0))
firstfixed = fixedsections[0][0]
# Find freespace in fixed address area
# fixedAddr = [(freespace, section), ...]
fixedAddr = []
for i in range(len(fixedsections)):
fixedsectioninfo = fixedsections[i]
addr, section = fixedsectioninfo
if i == len(fixedsections) - 1:
nextaddr = BUILD_BIOS_SIZE
else:
nextaddr = fixedsections[i+1][0]
avail = nextaddr - addr - section.size
fixedAddr.append((avail, section))
fixedAddr.sort(key=operator.itemgetter(0))
# Attempt to fit other sections into fixed area
canrelocate = [(section.size, section.align, section.name, section)
for section in fillsections]
canrelocate.sort()
canrelocate = [section for size, align, name, section in canrelocate]
totalused = 0
for freespace, fixedsection in fixedAddr:
addpos = fixedsection.finalsegloc + fixedsection.size
totalused += fixedsection.size
nextfixedaddr = addpos + freespace
# print("Filling section %x uses %d, next=%x, available=%d" % (
# fixedsection.finalloc, fixedsection.size, nextfixedaddr, freespace))
while 1:
canfit = None
for fitsection in canrelocate:
if addpos + fitsection.size > nextfixedaddr:
# Can't fit and nothing else will fit.
break
fitnextaddr = alignpos(addpos, fitsection.align) + fitsection.size
# print("Test %s - %x vs %x" % (
# fitsection.name, fitnextaddr, nextfixedaddr))
if fitnextaddr > nextfixedaddr:
# This item can't fit.
continue
canfit = (fitnextaddr, fitsection)
if canfit is None:
break
# Found a section that can fit.
fitnextaddr, fitsection = canfit
canrelocate.remove(fitsection)
fitsection.finalloc = addpos + BUILD_BIOS_ADDR
fitsection.finalsegloc = addpos
addpos = fitnextaddr
totalused += fitsection.size
# print(" Adding %s (size %d align %d) pos=%x avail=%d" % (
# fitsection[2], fitsection[0], fitsection[1]
# , fitnextaddr, nextfixedaddr - fitnextaddr))
# Report stats
total = BUILD_BIOS_SIZE-firstfixed
slack = total - totalused
print ("Fixed space: 0x%x-0x%x total: %d slack: %d"
" Percent slack: %.1f%%" % (
firstfixed, BUILD_BIOS_SIZE, total, slack,
(float(slack) / total) * 100.0))
return firstfixed + BUILD_BIOS_ADDR
# Return the subset of sections with a given category
def getSectionsCategory(sections, category):
return [section for section in sections if section.category == category]
# Return the subset of sections with a given fileid
def getSectionsFileid(sections, fileid):
return [section for section in sections if section.fileid == fileid]
# Return the subset of sections with a given name prefix
def getSectionsPrefix(sections, prefix):
return [section for section in sections
if section.name.startswith(prefix)]
# The sections (and associated information) to be placed in output rom
class LayoutInfo:
sections = None
genreloc = None
sec32init_start = sec32init_end = sec32init_align = None
sec32low_start = sec32low_end = None
zonelow_base = final_sec32low_start = None
zonefseg_start = zonefseg_end = None
final_readonly_start = None
varlowsyms = entrysym = None
# Determine final memory addresses for sections
def doLayout(sections, config, genreloc):
li = LayoutInfo()
li.sections = sections
li.genreloc = genreloc
# Determine 16bit positions
sections16 = getSectionsCategory(sections, '16')
textsections = getSectionsPrefix(sections16, '.text.')
rodatasections = getSectionsPrefix(sections16, '.rodata')
datasections = getSectionsPrefix(sections16, '.data16.')
fixedsections = getSectionsCategory(sections, 'fixed')
firstfixed = fitSections(fixedsections, textsections)
remsections = [s for s in textsections+rodatasections+datasections
if s.finalloc is None]
sec16_start, sec16_align = setSectionsStart(
remsections, firstfixed, segoffset=BUILD_BIOS_ADDR)
# Determine 32seg positions
sections32seg = getSectionsCategory(sections, '32seg')
textsections = getSectionsPrefix(sections32seg, '.text.')
rodatasections = getSectionsPrefix(sections32seg, '.rodata')
datasections = getSectionsPrefix(sections32seg, '.data32seg.')
sec32seg_start, sec32seg_align = setSectionsStart(
textsections + rodatasections + datasections, sec16_start
, segoffset=BUILD_BIOS_ADDR)
# Determine 32bit "fseg memory" data positions
sections32textfseg = getSectionsCategory(sections, '32textfseg')
sec32textfseg_start, sec32textfseg_align = setSectionsStart(
sections32textfseg, sec32seg_start, 16)
sections32fseg = getSectionsCategory(sections, '32fseg')
sec32fseg_start, sec32fseg_align = setSectionsStart(
sections32fseg, sec32textfseg_start, 16
, segoffset=BUILD_BIOS_ADDR)
# Determine 32flat runtime positions
sections32flat = getSectionsCategory(sections, '32flat')
textsections = getSectionsPrefix(sections32flat, '.text.')
rodatasections = getSectionsPrefix(sections32flat, '.rodata')
datasections = getSectionsPrefix(sections32flat, '.data.')
bsssections = getSectionsPrefix(sections32flat, '.bss.')
sec32flat_start, sec32flat_align = setSectionsStart(
textsections + rodatasections + datasections + bsssections
, sec32fseg_start, 16)
# Determine 32flat init positions
sections32init = getSectionsCategory(sections, '32init')
init32_textsections = getSectionsPrefix(sections32init, '.text.')
init32_rodatasections = getSectionsPrefix(sections32init, '.rodata')
init32_datasections = getSectionsPrefix(sections32init, '.data.')
init32_bsssections = getSectionsPrefix(sections32init, '.bss.')
sec32init_start, sec32init_align = setSectionsStart(
init32_textsections + init32_rodatasections
+ init32_datasections + init32_bsssections
, sec32flat_start, 16)
# Determine location of ZoneFSeg memory.
zonefseg_end = sec32flat_start
if not genreloc:
zonefseg_end = sec32init_start
zonefseg_start = BUILD_BIOS_ADDR
if zonefseg_start + BUILD_MIN_BIOSTABLE > zonefseg_end:
# Not enough ZoneFSeg space - force a minimum space.
zonefseg_end = sec32fseg_start
zonefseg_start = zonefseg_end - BUILD_MIN_BIOSTABLE
sec32flat_start, sec32flat_align = setSectionsStart(
textsections + rodatasections + datasections + bsssections
, zonefseg_start, 16)
sec32init_start, sec32init_align = setSectionsStart(
init32_textsections + init32_rodatasections
+ init32_datasections + init32_bsssections
, sec32flat_start, 16)
li.sec32init_start = sec32init_start
li.sec32init_end = sec32flat_start
li.sec32init_align = sec32init_align
final_readonly_start = min(BUILD_BIOS_ADDR, sec32flat_start)
if not genreloc:
final_readonly_start = min(BUILD_BIOS_ADDR, sec32init_start)
li.zonefseg_start = zonefseg_start
li.zonefseg_end = zonefseg_end
li.final_readonly_start = final_readonly_start
# Determine "low memory" data positions
sections32low = getSectionsCategory(sections, '32low')
sec32low_end = sec32init_start
if config.get('CONFIG_MALLOC_UPPERMEMORY'):
final_sec32low_end = final_readonly_start
zonelow_base = final_sec32low_end - 64*1024
zonelow_base = max(BUILD_ROM_START, alignpos(zonelow_base, 2*1024))
else:
final_sec32low_end = BUILD_LOWRAM_END
zonelow_base = final_sec32low_end - 64*1024
relocdelta = final_sec32low_end - sec32low_end
li.sec32low_start, li.sec32low_align = setSectionsStart(
sections32low, sec32low_end, 16
, segoffset=zonelow_base - relocdelta)
li.sec32low_end = sec32low_end
li.zonelow_base = zonelow_base
li.final_sec32low_start = li.sec32low_start + relocdelta
# Print statistics
size16 = BUILD_BIOS_ADDR + BUILD_BIOS_SIZE - sec16_start
size32seg = sec16_start - sec32seg_start
size32textfseg = sec32seg_start - sec32textfseg_start
size32fseg = sec32textfseg_start - sec32fseg_start
size32flat = sec32fseg_start - sec32flat_start
size32init = sec32flat_start - sec32init_start
sizelow = li.sec32low_end - li.sec32low_start
print("16bit size: %d" % size16)
print("32bit segmented size: %d" % size32seg)
print("32bit flat size: %d" % (size32flat + size32textfseg))
print("32bit flat init size: %d" % size32init)
print("Lowmem size: %d" % sizelow)
print("f-segment var size: %d" % size32fseg)
return li
######################################################################
# Linker script output
######################################################################
# Write LD script includes for the given cross references
def outXRefs(sections, useseg=0, exportsyms=[], forcedelta=0):
xrefs = dict([(symbol.name, symbol) for symbol in exportsyms])
out = ""
for section in sections:
for reloc in section.relocs:
symbol = reloc.symbol
if (symbol.section is not None
and (symbol.section.fileid != section.fileid
or symbol.name != reloc.symbolname)):
xrefs[reloc.symbolname] = symbol
for symbolname, symbol in xrefs.items():
loc = symbol.section.finalloc
if useseg:
loc = symbol.section.finalsegloc
out += "%s = 0x%x ;\n" % (symbolname, loc + forcedelta + symbol.offset)
return out
# Write LD script includes for the given sections
def outSections(sections, useseg=0):
out = ""
for section in sections:
loc = section.finalloc
if useseg:
loc = section.finalsegloc
out += "%s 0x%x : { *(%s) }\n" % (section.name, loc, section.name)
return out
# Write LD script includes for the given sections using relative offsets
def outRelSections(sections, startsym, useseg=0):
sections = [(section.finalloc, section) for section in sections
if section.finalloc is not None]
sections.sort(key=operator.itemgetter(0))
out = ""
for addr, section in sections:
loc = section.finalloc
if useseg:
loc = section.finalsegloc
out += ". = ( 0x%x - %s ) ;\n" % (loc, startsym)
if section.name in ('.rodata.str1.1', '.rodata'):
out += "_rodata%s = . ;\n" % (section.fileid,)
out += "*%s.*(%s)\n" % (section.fileid, section.name)
return out
# Build linker script output for a list of relocations.
def strRelocs(outname, outrel, relocs):
relocs.sort()
return (" %s_start = ABSOLUTE(.) ;\n" % (outname,)
+ "".join(["LONG(0x%x - %s)\n" % (pos, outrel)
for pos in relocs])
+ " %s_end = ABSOLUTE(.) ;\n" % (outname,))
# Find relocations to the given sections
def getRelocs(sections, tosection, type=None):
return [section.finalloc + reloc.offset
for section in sections
for reloc in section.relocs
if (reloc.symbol.section in tosection
and (type is None or reloc.type == type))]
# Output the linker scripts for all required sections.
def writeLinkerScripts(li, out16, out32seg, out32flat):
# Write 16bit linker script
filesections16 = getSectionsFileid(li.sections, '16')
out = outXRefs(filesections16, useseg=1) + """
zonelow_base = 0x%x ;
_zonelow_seg = 0x%x ;
%s
""" % (li.zonelow_base,
int(li.zonelow_base / 16),
outSections(filesections16, useseg=1))
outfile = open(out16, 'w')
outfile.write(COMMONHEADER + out + COMMONTRAILER)
outfile.close()
# Write 32seg linker script
filesections32seg = getSectionsFileid(li.sections, '32seg')
out = (outXRefs(filesections32seg, useseg=1)
+ outSections(filesections32seg, useseg=1))
outfile = open(out32seg, 'w')
outfile.write(COMMONHEADER + out + COMMONTRAILER)
outfile.close()
# Write 32flat linker script
sec32all_start = li.sec32low_start
relocstr = ""
if li.genreloc:
# Generate relocations
initsections = dict([
(s, 1) for s in getSectionsCategory(li.sections, '32init')])
noninitsections = dict([(s, 1) for s in li.sections
if s not in initsections])
absrelocs = getRelocs(initsections, initsections, type='R_386_32')
relrelocs = getRelocs(initsections, noninitsections, type='R_386_PC32')
initrelocs = getRelocs(noninitsections, initsections)
relocstr = (strRelocs("_reloc_abs", "code32init_start", absrelocs)
+ strRelocs("_reloc_rel", "code32init_start", relrelocs)
+ strRelocs("_reloc_init", "code32flat_start", initrelocs))
numrelocs = len(absrelocs + relrelocs + initrelocs)
sec32all_start -= numrelocs * 4
filesections32flat = getSectionsFileid(li.sections, '32flat')
out = outXRefs([], exportsyms=li.varlowsyms
, forcedelta=li.final_sec32low_start-li.sec32low_start)
out += outXRefs(filesections32flat, exportsyms=[li.entrysym]) + """
_reloc_min_align = 0x%x ;
zonefseg_start = 0x%x ;
zonefseg_end = 0x%x ;
zonelow_base = 0x%x ;
final_varlow_start = 0x%x ;
final_readonly_start = 0x%x ;
varlow_start = 0x%x ;
varlow_end = 0x%x ;
code32init_start = 0x%x ;
code32init_end = 0x%x ;
code32flat_start = 0x%x ;
.text code32flat_start : {
%s
%s
code32flat_end = ABSOLUTE(.) ;
} :text
""" % (li.sec32init_align,
li.zonefseg_start,
li.zonefseg_end,
li.zonelow_base,
li.final_sec32low_start,
li.final_readonly_start,
li.sec32low_start,
li.sec32low_end,
li.sec32init_start,
li.sec32init_end,
sec32all_start,
relocstr,
outRelSections(li.sections, 'code32flat_start'))
out = COMMONHEADER + out + COMMONTRAILER + """
ENTRY(%s)
PHDRS
{
text PT_LOAD AT ( code32flat_start ) ;
}
""" % (li.entrysym.name,)
outfile = open(out32flat, 'w')
outfile.write(out)
outfile.close()
######################################################################
# Detection of unused sections and init sections
######################################################################
# Visit all sections reachable from a given set of start sections
def findReachable(anchorsections, checkreloc, data):
anchorsections = dict([(section, []) for section in anchorsections])
pending = list(anchorsections)
while pending:
section = pending.pop()
for reloc in section.relocs:
chain = anchorsections[section] + [section.name]
if not checkreloc(reloc, section, data, chain):
continue
nextsection = reloc.symbol.section
if nextsection not in anchorsections:
anchorsections[nextsection] = chain
pending.append(nextsection)
return anchorsections
# Find "runtime" sections (ie, not init only sections).
def checkRuntime(reloc, rsection, data, chain):
section = reloc.symbol.section
if section is None or '.init.' in section.name:
return 0
if '.data.varinit.' in section.name:
print("ERROR: %s is VARVERIFY32INIT but used from %s" % (
section.name, chain))
sys.exit(1)
return 1
# Find and keep the section associated with a symbol (if available).
def checkKeepSym(reloc, syms, fileid, isxref):
symbolname = reloc.symbolname
mustbecfunc = symbolname.startswith('_cfunc')
if mustbecfunc:
symprefix = '_cfunc' + fileid + '_'
if not symbolname.startswith(symprefix):
return 0
symbolname = symbolname[len(symprefix):]
symbol = syms.get(symbolname)
if (symbol is None or symbol.section is None
or symbol.section.name.startswith('.discard.')):
return 0
isdestcfunc = (symbol.section.name.startswith('.text.')
and not symbol.section.name.startswith('.text.asm.'))
if ((mustbecfunc and not isdestcfunc)
or (not mustbecfunc and isdestcfunc and isxref)):
return 0
reloc.symbol = symbol
return 1
# Resolve a relocation and check if it should be kept in the final binary.
def checkKeep(reloc, section, symbols, chain):
ret = checkKeepSym(reloc, symbols[section.fileid], section.fileid, 0)
if ret:
return ret
# Not in primary sections - it may be a cross 16/32 reference
for fileid in ('16', '32seg', '32flat'):
if fileid != section.fileid:
ret = checkKeepSym(reloc, symbols[fileid], fileid, 1)
if ret:
return ret
return 0
######################################################################
# Startup and input parsing
######################################################################
class Section:
name = size = alignment = fileid = relocs = None
finalloc = finalsegloc = category = None
class Reloc:
offset = type = symbolname = symbol = None
class Symbol:
name = offset = section = None
# Read in output from objdump
def parseObjDump(file, fileid):
# sections = [section, ...]
sections = []
sectionmap = {}
# symbols[symbolname] = symbol
symbols = {}
state = None
for line in file.readlines():
line = line.rstrip()
if line == 'Sections:':
state = 'section'
continue
if line == 'SYMBOL TABLE:':
state = 'symbol'
continue
if line.startswith('RELOCATION RECORDS FOR ['):
sectionname = line[24:-2]
if sectionname.startswith('.debug_'):
# Skip debugging sections (to reduce parsing time)
state = None
continue
state = 'reloc'
relocsection = sectionmap[sectionname]
continue
if state == 'section':
try:
idx, name, size, vma, lma, fileoff, align = line.split()
if align[:3] != '2**':
continue
section = Section()
section.name = name
section.size = int(size, 16)
section.align = 2**int(align[3:])
section.fileid = fileid
section.relocs = []
sections.append(section)
sectionmap[name] = section
except ValueError:
pass
continue
if state == 'symbol':
try:
parts = line[17:].split()
if len(parts) == 3:
sectionname, size, name = parts
elif len(parts) == 4 and parts[2] == '.hidden':
sectionname, size, hidden, name = parts
else:
continue
symbol = Symbol()
symbol.size = int(size, 16)
symbol.offset = int(line[:8], 16)
symbol.name = name
symbol.section = sectionmap.get(sectionname)
symbols[name] = symbol
except ValueError:
pass
continue
if state == 'reloc':
try:
off, type, symbolname = line.split()
reloc = Reloc()
reloc.offset = int(off, 16)
reloc.type = type
reloc.symbolname = symbolname
reloc.symbol = symbols.get(symbolname)
if reloc.symbol is None:
# Some binutils (2.20.1) give section name instead
# of a symbol - create a dummy symbol.
reloc.symbol = symbol = Symbol()
symbol.size = 0
symbol.offset = 0
symbol.name = symbolname
symbol.section = sectionmap.get(symbolname)
symbols[symbolname] = symbol
relocsection.relocs.append(reloc)
except ValueError:
pass
return sections, symbols
# Parser for constants in simple C header files.
def scanconfig(file):
f = open(file, 'r')
opts = {}
for l in f.readlines():
parts = l.split()
if len(parts) != 3:
continue
if parts[0] != '#define':
continue
value = parts[2]
if value.isdigit() or (value.startswith('0x') and value[2:].isdigit()):
value = int(value, 0)
opts[parts[1]] = value
return opts
def main():
# Get output name
in16, in32seg, in32flat, cfgfile, out16, out32seg, out32flat = sys.argv[1:]
# Read in the objdump information
infile16 = open(in16, 'r')
infile32seg = open(in32seg, 'r')
infile32flat = open(in32flat, 'r')
# infoX = (sections, symbols)
info16 = parseObjDump(infile16, '16')
info32seg = parseObjDump(infile32seg, '32seg')
info32flat = parseObjDump(infile32flat, '32flat')
# Read kconfig config file
config = scanconfig(cfgfile)
# Figure out which sections to keep.
allsections = info16[0] + info32seg[0] + info32flat[0]
symbols = {'16': info16[1], '32seg': info32seg[1], '32flat': info32flat[1]}
if config.get('CONFIG_COREBOOT'):
entrysym = symbols['16'].get('entry_elf')
elif config.get('CONFIG_CSM'):
entrysym = symbols['16'].get('entry_csm')
else:
entrysym = symbols['16'].get('reset_vector')
anchorsections = [entrysym.section] + [
section for section in allsections
if section.name.startswith('.fixedaddr.')]
keepsections = findReachable(anchorsections, checkKeep, symbols)
sections = [section for section in allsections if section in keepsections]
# Separate 32bit flat into runtime, init, and special variable parts
anchorsections = [
section for section in sections
if ('.data.varlow.' in section.name or '.data.varfseg.' in section.name
or '.fixedaddr.' in section.name or '.runtime.' in section.name)]
runtimesections = findReachable(anchorsections, checkRuntime, None)
for section in sections:
if section.name.startswith('.data.varlow.'):
section.category = '32low'
elif section.name.startswith('.data.varfseg.'):
section.category = '32fseg'
elif section.name.startswith('.text.32fseg.'):
section.category = '32textfseg'
elif section.name.startswith('.fixedaddr.'):
section.category = 'fixed'
elif section.fileid == '32flat' and section not in runtimesections:
section.category = '32init'
else:
section.category = section.fileid
# Determine the final memory locations of each kept section.
genreloc = '_reloc_abs_start' in symbols['32flat']
li = doLayout(sections, config, genreloc)
# Exported symbols
li.varlowsyms = [symbol for symbol in symbols['32flat'].values()
if (symbol.section is not None
and symbol.section.finalloc is not None
and '.data.varlow.' in symbol.section.name
and symbol.name != symbol.section.name)]
li.entrysym = entrysym
# Write out linker script files.
writeLinkerScripts(li, out16, out32seg, out32flat)
if __name__ == '__main__':
main()
|
gijs/AutobahnPython | refs/heads/master | autobahn/autobahn/prefixmap.py | 15 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
class PrefixMap:
"""
Provides a two-way mapping between CURIEs (Compact URI Expressions) and
full URIs. See http://www.w3.org/TR/curie/.
"""
def __init__(self):
self.index = {}
self.rindex = {}
## add a couple of well-know prefixes
##
#self.set("owl", "http://www.w3.org/2002/07/owl#")
#self.set("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#")
#self.set("rdfs", "http://www.w3.org/2000/01/rdf-schema#")
#self.set("rdfa", "http://www.w3.org/ns/rdfa#")
#self.set("xhv", "http://www.w3.org/1999/xhtml/vocab#")
#self.set("xml", "http://www.w3.org/XML/1998/namespace")
#self.set("xsd", "http://www.w3.org/2001/XMLSchema#")
def get(self, prefix):
"""
Returns the URI for the prefix or None if prefix has no mapped URI.
:param prefix: Prefix to map.
:type prefix: str
:returns: str -- Mapped URI for prefix or None.
"""
return self.index.get(prefix, None)
def set(self, prefix, uri):
"""
Set mapping of prefix to URI.
:param prefix: Prefix to be mapped.
:type prefix: str
:param uri: URI the prefix is to be mapped to.
:type uri: str
"""
self.index[prefix] = uri
self.rindex[uri] = prefix
def setDefault(self, uri):
"""
Set default URI mapping of empty prefix (prefix of length 0).
:param uri: URI the empty prefix to be mapped to (i.e. :label should map to uri:label).
:type str
"""
self.set("", uri)
def remove(self, prefix):
"""
Remove mapping of prefix to URI.
:param prefix: Prefix for which mapping should be removed.
:type str
"""
uri = index.get(index, None)
if uri:
del self.index[prefix]
del self.rindex[uri]
def resolve(self, curie):
"""
Resolve given CURIE to full URI.
:param curie: CURIE (i.e. "rdf:label").
:type curie: str
:returns: str -- Full URI for CURIE or None.
"""
i = curie.find(":")
if i > 0:
prefix = curie[:i]
if self.index.has_key(prefix):
return self.index[prefix] + curie[i+1:]
return None
def resolveOrPass(self, curieOrUri):
"""
Resolve given CURIE/URI and return string verbatim if cannot be resolved.
:param curieOrUri: CURIE or URI.
:type curieOrUri: str
:returns: str -- Full URI for CURIE or original string.
"""
u = self.resolve(curieOrUri)
if u:
return u
else:
return curieOrUri
def shrink(self, uri):
"""
Shrink given URI to CURIE. If no appropriate prefix mapping is available,
return original URI.
:param uri: URI to shrink.
:type uri: str
:returns str -- CURIE or original URI.
"""
for i in xrange(len(uri), 1, -1):
u = uri[:i]
p = self.rindex.get(u, None)
if p:
return p + ":" + uri[i:]
return uri
if __name__ == '__main__':
m = PrefixMap()
print m.resolve("http://www.w3.org/1999/02/22-rdf-syntax-ns#label")
print m.resolve("rdf:label")
print m.resolve("foobar:label")
print m.shrink("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
print m.shrink("http://www.w3.org/1999/02/22-rdf-syntax-ns#label")
print m.shrink("http://foobar.org#label")
|
fabioz/Pydev | refs/heads/master | plugins/org.python.pydev.jython/Lib/mutex.py | 243 | """Mutual exclusion -- for use with module sched
A mutex has two pieces of state -- a 'locked' bit and a queue.
When the mutex is not locked, the queue is empty.
Otherwise, the queue contains 0 or more (function, argument) pairs
representing functions (or methods) waiting to acquire the lock.
When the mutex is unlocked while the queue is not empty,
the first queue entry is removed and its function(argument) pair called,
implying it now has the lock.
Of course, no multi-threading is implied -- hence the funny interface
for lock, where a function is called once the lock is aquired.
"""
from warnings import warnpy3k
warnpy3k("the mutex module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
from collections import deque
class mutex:
def __init__(self):
"""Create a new mutex -- initially unlocked."""
self.locked = False
self.queue = deque()
def test(self):
"""Test the locked bit of the mutex."""
return self.locked
def testandset(self):
"""Atomic test-and-set -- grab the lock if it is not set,
return True if it succeeded."""
if not self.locked:
self.locked = True
return True
else:
return False
def lock(self, function, argument):
"""Lock a mutex, call the function with supplied argument
when it is acquired. If the mutex is already locked, place
function and argument in the queue."""
if self.testandset():
function(argument)
else:
self.queue.append((function, argument))
def unlock(self):
"""Unlock a mutex. If the queue is not empty, call the next
function with its argument."""
if self.queue:
function, argument = self.queue.popleft()
function(argument)
else:
self.locked = False
|
saisrisathya/whatsapps | refs/heads/master | yowsup/layers/protocol_groups/protocolentities/test_iq_groups.py | 61 | from yowsup.layers.protocol_iq.protocolentities.test_iq import IqProtocolEntityTest
class GroupsIqProtocolEntityTest(IqProtocolEntityTest):
pass
|
4022321818/2015cd_midterm2 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/sre_parse.py | 630 | #
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
from _sre import MAXREPEAT
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"a": SRE_FLAG_ASCII,
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def __iter__(self):
return iter(self.data)
def dump(self, level=0):
nl = 1
seqtypes = (tuple, list)
for op, av in self.data:
print(level*" " + op, end=' '); nl = 0
if op == "in":
# member sublanguage
print(); nl = 1
for op, a in av:
print((level+1)*" " + op, a)
elif op == "branch":
print(); nl = 1
i = 0
for a in av[1]:
if i > 0:
print(level*" " + "or")
a.dump(level+1); nl = 1
i = i + 1
elif isinstance(av, seqtypes):
for a in av:
if isinstance(a, SubPattern):
if not nl: print()
a.dump(level+1); nl = 1
else:
print(a, end=' ') ; nl = 0
else:
print(av, end=' ') ; nl = 0
if not nl: print()
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxsize
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + int(i) * av[0]
hi = hi + int(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxsize)), int(min(hi, sys.maxsize))
return self.width
class Tokenizer:
def __init__(self, string):
self.istext = isinstance(string, str)
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index:self.index+1]
# Special case for the str8, since indexing returns a integer
# XXX This is only needed for test_bug_926075 in test_re.py
if char and not self.istext:
char = chr(char[0])
if char == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error("bogus escape (end of line)")
if not self.istext:
c = chr(c)
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def getwhile(self, n, charset):
result = ''
for _ in range(n):
c = self.next
if c not in charset:
break
result += c
self.__next()
return result
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] == IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c in OCTDIGITS:
# octal escape (up to three digits)
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c == "0":
# octal escape
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error("cannot refer to open group")
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error("pattern not properly closed")
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error("conditional backref with more than two branches")
else:
item_no = None
if source.next and not source.match(")", 0):
raise error("pattern not properly closed")
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error("unexpected end of regular expression")
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error("bad character range")
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error("bad character range")
setappend((RANGE, (lo, hi)))
else:
raise error("unexpected end of regular expression")
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise error("bad repeat interval")
else:
raise error("not supported")
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error("nothing to repeat")
if item[0][0] in REPEATCODES:
raise error("multiple repeat")
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ">":
break
name = name + char
group = 1
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
name = name + char
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
gid = state.groupdict.get(name)
if gid is None:
raise error("unknown group name")
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
raise error("unknown specifier: ?P%s" % char)
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error("unbalanced parenthesis")
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error("syntax error")
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
condname = condname + char
group = 2
if not condname:
raise error("missing group name")
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error("unknown group name")
else:
try:
condgroup = int(condname)
except ValueError:
raise error("bad character in group name")
else:
# flags
if not source.next in FLAGS:
raise error("unexpected end of pattern")
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
if char == ")":
break
raise error("unknown extension")
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error("parser error")
return subpattern
def fix_flags(src, flags):
# Check and fix flags according to the type of pattern (str or bytes)
if isinstance(src, str):
if not flags & SRE_FLAG_ASCII:
flags |= SRE_FLAG_UNICODE
elif flags & SRE_FLAG_UNICODE:
raise ValueError("ASCII and UNICODE flags are incompatible")
else:
if flags & SRE_FLAG_UNICODE:
raise ValueError("can't use UNICODE flag with a bytes pattern")
return flags
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
p.pattern.flags = fix_flags(str, p.pattern.flags)
tail = source.get()
if tail == ")":
raise error("unbalanced parenthesis")
elif tail:
raise error("bogus characters at end of regular expression")
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if isinstance(sep, str):
makechar = chr
else:
makechar = chr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error("unterminated group name")
if char == ">":
break
name = name + char
if not name:
raise error("missing group name")
try:
index = int(name)
if index < 0:
raise error("negative group number")
except ValueError:
if not isname(name):
raise error("bad character in group name")
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError("unknown group name")
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
if isinstance(source, str):
encode = lambda x: x
else:
# The tokenizer implicitly decodes bytes objects as latin-1, we must
# therefore re-encode the final representation.
encode = lambda x: x.encode('latin-1')
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = encode(s)
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error("unmatched group")
except IndexError:
raise error("invalid group reference")
return sep.join(literals)
|
osynge/pmpman | refs/heads/master | pmpmanager/__init__.py | 1 | import pkg_resources
import sys
# If there is a conflicting non egg module,
# i.e. an older standard system module installed,
# then replace it with this requirement
def replace_dist(requirement):
try:
return pkg_resources.require(requirement)
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
dist=e.args[0]
req=e.args[1]
if dist.key == req.key and not dist.location.endswith('.egg'):
del pkg_resources.working_set.by_key[dist.key]
# We assume there is no need to adjust sys.path
# and the associated pkg_resources.working_set.entries
return pkg_resources.require(requirement)
replace_dist("SQLALchemy >= 0.6.3")
import cli as cli
|
OS2World/APP-INTERNET-torpak_2 | refs/heads/master | Lib/calendar.py | 3 | """Calendar printing functions
Note when comparing these calendars to the ones printed by cal(1): By
default, these calendars have Monday as the first day of the week, and
Sunday as the last (the European convention). Use setfirstweekday() to
set the first day of the week (0=Monday, 6=Sunday)."""
import datetime
__all__ = ["error","setfirstweekday","firstweekday","isleap",
"leapdays","weekday","monthrange","monthcalendar",
"prmonth","month","prcal","calendar","timegm",
"month_name", "month_abbr", "day_name", "day_abbr"]
# Exception raised for bad input (with string parameter for details)
error = ValueError
# Constants for months referenced later
January = 1
February = 2
# Number of days per month (except for February in leap years)
mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# This module used to have hard-coded lists of day and month names, as
# English strings. The classes following emulate a read-only version of
# that, but supply localized names. Note that the values are computed
# fresh on each call, in case the user changes locale between calls.
class _localized_month:
def __init__(self, format):
self.format = format
def __getitem__(self, i):
data = [datetime.date(2001, j, 1).strftime(self.format)
for j in range(1, 13)]
data.insert(0, "")
return data[i]
def __len__(self):
return 13
class _localized_day:
def __init__(self, format):
self.format = format
def __getitem__(self, i):
# January 1, 2001, was a Monday.
data = [datetime.date(2001, 1, j+1).strftime(self.format)
for j in range(7)]
return data[i]
def __len__(self_):
return 7
# Full and abbreviated names of weekdays
day_name = _localized_day('%A')
day_abbr = _localized_day('%a')
# Full and abbreviated names of months (1-based arrays!!!)
month_name = _localized_month('%B')
month_abbr = _localized_month('%b')
# Constants for weekdays
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
_firstweekday = 0 # 0 = Monday, 6 = Sunday
def firstweekday():
return _firstweekday
def setfirstweekday(weekday):
"""Set weekday (Monday=0, Sunday=6) to start each week."""
global _firstweekday
if not MONDAY <= weekday <= SUNDAY:
raise ValueError, \
'bad weekday number; must be 0 (Monday) to 6 (Sunday)'
_firstweekday = weekday
def isleap(year):
"""Return 1 for leap years, 0 for non-leap years."""
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def leapdays(y1, y2):
"""Return number of leap years in range [y1, y2).
Assume y1 <= y2."""
y1 -= 1
y2 -= 1
return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400)
def weekday(year, month, day):
"""Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
day (1-31)."""
return datetime.date(year, month, day).weekday()
def monthrange(year, month):
"""Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
year, month."""
if not 1 <= month <= 12:
raise ValueError, 'bad month number'
day1 = weekday(year, month, 1)
ndays = mdays[month] + (month == February and isleap(year))
return day1, ndays
def monthcalendar(year, month):
"""Return a matrix representing a month's calendar.
Each row represents a week; days outside this month are zero."""
day1, ndays = monthrange(year, month)
rows = []
r7 = range(7)
day = (_firstweekday - day1 + 6) % 7 - 5 # for leading 0's in first week
while day <= ndays:
row = [0, 0, 0, 0, 0, 0, 0]
for i in r7:
if 1 <= day <= ndays: row[i] = day
day = day + 1
rows.append(row)
return rows
def prweek(theweek, width):
"""Print a single week (no newline)."""
print week(theweek, width),
def week(theweek, width):
"""Returns a single week in a string (no newline)."""
days = []
for day in theweek:
if day == 0:
s = ''
else:
s = '%2i' % day # right-align single-digit days
days.append(s.center(width))
return ' '.join(days)
def weekheader(width):
"""Return a header for a week."""
if width >= 9:
names = day_name
else:
names = day_abbr
days = []
for i in range(_firstweekday, _firstweekday + 7):
days.append(names[i%7][:width].center(width))
return ' '.join(days)
def prmonth(theyear, themonth, w=0, l=0):
"""Print a month's calendar."""
print month(theyear, themonth, w, l),
def month(theyear, themonth, w=0, l=0):
"""Return a month's calendar string (multi-line)."""
w = max(2, w)
l = max(1, l)
s = ((month_name[themonth] + ' ' + `theyear`).center(
7 * (w + 1) - 1).rstrip() +
'\n' * l + weekheader(w).rstrip() + '\n' * l)
for aweek in monthcalendar(theyear, themonth):
s = s + week(aweek, w).rstrip() + '\n' * l
return s[:-l] + '\n'
# Spacing of month columns for 3-column year calendar
_colwidth = 7*3 - 1 # Amount printed by prweek()
_spacing = 6 # Number of spaces between columns
def format3c(a, b, c, colwidth=_colwidth, spacing=_spacing):
"""Prints 3-column formatting for year calendars"""
print format3cstring(a, b, c, colwidth, spacing)
def format3cstring(a, b, c, colwidth=_colwidth, spacing=_spacing):
"""Returns a string formatted from 3 strings, centered within 3 columns."""
return (a.center(colwidth) + ' ' * spacing + b.center(colwidth) +
' ' * spacing + c.center(colwidth))
def prcal(year, w=0, l=0, c=_spacing):
"""Print a year's calendar."""
print calendar(year, w, l, c),
def calendar(year, w=0, l=0, c=_spacing):
"""Returns a year's calendar as a multi-line string."""
w = max(2, w)
l = max(1, l)
c = max(2, c)
colwidth = (w + 1) * 7 - 1
s = `year`.center(colwidth * 3 + c * 2).rstrip() + '\n' * l
header = weekheader(w)
header = format3cstring(header, header, header, colwidth, c).rstrip()
for q in range(January, January+12, 3):
s = (s + '\n' * l +
format3cstring(month_name[q], month_name[q+1], month_name[q+2],
colwidth, c).rstrip() +
'\n' * l + header + '\n' * l)
data = []
height = 0
for amonth in range(q, q + 3):
cal = monthcalendar(year, amonth)
if len(cal) > height:
height = len(cal)
data.append(cal)
for i in range(height):
weeks = []
for cal in data:
if i >= len(cal):
weeks.append('')
else:
weeks.append(week(cal[i], w))
s = s + format3cstring(weeks[0], weeks[1], weeks[2],
colwidth, c).rstrip() + '\n' * l
return s[:-l] + '\n'
EPOCH = 1970
_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal()
def timegm(tuple):
"""Unrelated but handy function to calculate Unix timestamp from GMT."""
year, month, day, hour, minute, second = tuple[:6]
days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
return seconds
|
ar45/django | refs/heads/master | tests/migrations/migrations_test_apps/__init__.py | 12133432 | |
ammarkhann/FinalSeniorCode | refs/heads/master | lib/python2.7/site-packages/notebook/services/kernels/__init__.py | 12133432 | |
digideskio/merchant | refs/heads/master | billing/models/paylane_models.py | 7 | # -*- coding: utf-8 -*-
# vim:tabstop=4:expandtab:sw=4:softtabstop=4
from django.db import models
class PaylaneTransaction(models.Model):
transaction_date = models.DateTimeField(auto_now_add=True)
amount = models.FloatField()
customer_name = models.CharField(max_length=200)
customer_email = models.CharField(max_length=200)
product = models.CharField(max_length=200)
success = models.BooleanField(default=False)
error_code = models.IntegerField(default=0)
error_description = models.CharField(max_length=300, blank=True)
acquirer_error = models.CharField(max_length=40, blank=True)
acquirer_description = models.CharField(max_length=300, blank=True)
def __unicode__(self):
return u'Transaction for %s (%s)' % (self.customer_name, self.customer_email)
class Meta:
app_label = __name__.split(".")[0]
class PaylaneAuthorization(models.Model):
sale_authorization_id = models.BigIntegerField(db_index=True)
first_authorization = models.BooleanField(default=False)
transaction = models.OneToOneField(PaylaneTransaction)
def __unicode__(self):
return u'Authorization: %s' % (self.sale_authorization_id)
class Meta:
app_label = __name__.split(".")[0]
|
minhoryang/locust | refs/heads/master | examples/basic.py | 39 | from locust import HttpLocust, TaskSet, task
def index(l):
l.client.get("/")
def stats(l):
l.client.get("/stats/requests")
class UserTasks(TaskSet):
# one can specify tasks like this
tasks = [index, stats]
# but it might be convenient to use the @task decorator
@task
def page404(self):
self.client.get("/does_not_exist")
class WebsiteUser(HttpLocust):
"""
Locust user class that does requests to the locust web server running on localhost
"""
host = "http://127.0.0.1:8089"
min_wait = 2000
max_wait = 5000
task_set = UserTasks
|
adrian-ionescu/apache-spark | refs/heads/master | examples/src/main/python/ml/word2vec_example.py | 122 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import Word2Vec
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("Word2VecExample")\
.getOrCreate()
# $example on$
# Input data: Each row is a bag of words from a sentence or document.
documentDF = spark.createDataFrame([
("Hi I heard about Spark".split(" "), ),
("I wish Java could use case classes".split(" "), ),
("Logistic regression models are neat".split(" "), )
], ["text"])
# Learn a mapping from words to Vectors.
word2Vec = Word2Vec(vectorSize=3, minCount=0, inputCol="text", outputCol="result")
model = word2Vec.fit(documentDF)
result = model.transform(documentDF)
for row in result.collect():
text, vector = row
print("Text: [%s] => \nVector: %s\n" % (", ".join(text), str(vector)))
# $example off$
spark.stop()
|
ilpianista/ansible | refs/heads/devel | lib/ansible/parsing/yaml/loader.py | 191 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from _yaml import CParser, CEmitter
HAVE_PYYAML_C = True
except ImportError:
HAVE_PYYAML_C = False
from yaml.resolver import Resolver
from ansible.parsing.yaml.constructor import AnsibleConstructor
if HAVE_PYYAML_C:
class AnsibleLoader(CParser, AnsibleConstructor, Resolver):
def __init__(self, stream, file_name=None, vault_secrets=None):
CParser.__init__(self, stream)
AnsibleConstructor.__init__(self, file_name=file_name, vault_secrets=vault_secrets)
Resolver.__init__(self)
else:
from yaml.composer import Composer
from yaml.reader import Reader
from yaml.scanner import Scanner
from yaml.parser import Parser
class AnsibleLoader(Reader, Scanner, Parser, Composer, AnsibleConstructor, Resolver):
def __init__(self, stream, file_name=None, vault_secrets=None):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
AnsibleConstructor.__init__(self, file_name=file_name, vault_secrets=vault_secrets)
Resolver.__init__(self)
|
bitmazk/django-dynamic-content | refs/heads/master | dynamic_content/migrations/0004_auto_20170805_0152.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-08-05 01:52
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dynamic_content', '0003_auto_20150701_0755'),
]
operations = [
migrations.AlterModelOptions(
name='dynamiccontenttranslation',
options={'default_permissions': (), 'managed': True},
),
]
|
fayf/pyload | refs/heads/stable | module/plugins/crypter/XupPl.py | 15 | # -*- coding: utf-8 -*-
from module.plugins.internal.Crypter import Crypter
class XupPl(Crypter):
__name__ = "XupPl"
__type__ = "crypter"
__version__ = "0.12"
__status__ = "testing"
__pattern__ = r'https?://(?:[^/]*\.)?xup\.pl/.+'
__config__ = [("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
__description__ = """Xup.pl decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("z00nx", "[email protected]")]
def decrypt(self, pyfile):
header = self.load(pyfile.url, just_header=True)
if 'location' in header:
self.urls = [header['location']]
else:
self.fail(_("Unable to find link"))
|
cnsoft/kbengine-cocos2dx | refs/heads/cocos2dx-cnsoft | kbe/res/scripts/common/Lib/xml/sax/saxutils.py | 4 | """\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
import io
import codecs
from . import handler
from . import xmlreader
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
return sys.stdout
if isinstance(out, io.TextIOBase):
# use a text writer as is
return out
if isinstance(out, (codecs.StreamWriter, codecs.StreamReaderWriter)):
# use a codecs stream writer as is
return out
# wrap a binary writer with TextIOWrapper
if isinstance(out, io.RawIOBase):
# Keep the original file open when the TextIOWrapper is
# destroyed
class _wrapper:
__class__ = out.__class__
def __getattr__(self, name):
return getattr(out, name)
buffer = _wrapper()
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
return io.TextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n',
write_through=True)
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._short_empty_elements = short_empty_elements
self._pending_start_element = False
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
def _finish_pending_start_element(self,endElement=False):
if self._pending_start_element:
self._write('>')
self._pending_start_element = False
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._finish_pending_start_element()
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElement(self, name):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._finish_pending_start_element()
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElementNS(self, name, qname):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % self._qname(name))
def characters(self, content):
if content:
self._finish_pending_start_element()
self._write(escape(content))
def ignorableWhitespace(self, content):
if content:
self._finish_pending_start_element()
self._write(content)
def processingInstruction(self, target, data):
self._finish_pending_start_element()
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base=""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
|
sbusso/rethinkdb | refs/heads/next | test/memcached_workloads/multi_serial_mix.py | 29 | #!/usr/bin/env python
# Copyright 2010-2012 RethinkDB, all rights reserved.
from __future__ import print_function
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import multiprocessing, time, pickle
import memcached_workload_common, serial_mix
from vcoptparse import *
def child(opts, log_path, load, save):
# This is run in a separate process
import sys
# TODO: this overwrites existing log files
sys.stdout = sys.stderr = file(log_path, "w")
if load is None:
clone, deleted = {}, set()
else:
print("Loading from %r..." % load)
with open(load) as f:
clone, deleted = pickle.load(f)
print("Starting test against server at %s..." % opts["address"])
with memcached_workload_common.make_memcache_connection(opts) as mc:
serial_mix.test(opts, mc, clone, deleted)
if save is not None:
print("Saving to %r..." % save)
with open(save, "w") as f:
pickle.dump((clone, deleted), f)
print("Done with test.")
op = serial_mix.option_parser_for_serial_mix()
op["num_testers"] = IntFlag("--num-testers", 16)
op["load"] = StringFlag("--load", None)
op["save"] = StringFlag("--save", None)
opts = op.parse(sys.argv)
shutdown_grace_period = 15
tester_log_dir = "multi_serial_mix_out"
if not os.path.isdir(tester_log_dir): os.mkdir(tester_log_dir)
processes = []
try:
print("Starting %d child processes..." % opts["num_testers"])
print("Writing output from child processes to %r" % tester_log_dir)
for id in xrange(opts["num_testers"]):
log_path = os.path.join(tester_log_dir, "%d.txt" % id)
load_path = opts["load"] + "_%d" % id if opts["load"] is not None else None
save_path = opts["save"] + "_%d" % id if opts["save"] is not None else None
opts2 = dict(opts)
opts2["keysuffix"] = "_%d" % id # Prevent collisions between tests
process = multiprocessing.Process(target=child, args=(opts2, log_path, load_path, save_path))
process.start()
processes.append((process, id))
print("Waiting for child processes...")
start_time = time.time()
def time_remaining():
time_elapsed = time.time() - start_time
# Give subprocesses lots of extra time
return opts["duration"] * 2 - time_elapsed + 1
for process, id in processes:
tr = time_remaining()
if tr <= 0: tr = shutdown_grace_period
process.join(tr)
stuck = sorted(id for (process, id) in processes if process.is_alive())
failed = sorted(id for (process, id) in processes if not process.is_alive() and process.exitcode != 0)
if stuck or failed:
for id in stuck + failed:
with file(os.path.join(tester_log_dir, str(id) + ".txt")) as f:
for line in f:
sys.stdout.write(line)
if len(stuck) == opts["num_testers"]:
raise ValueError("All %d processes did not finish in time." % opts["num_testers"])
elif len(failed) == opts["num_testers"]:
raise ValueError("All %d processes failed." % opts["num_testers"])
else:
raise ValueError(
"Of processes [1 ... %d], the following did not finish in time: "
"%s and the following failed: %s" % (opts["num_testers"], stuck, failed)
)
finally:
for (process, id) in processes:
if process.is_alive():
process.terminate()
print("Done.")
|
joyider/op_mon | refs/heads/master | tests/factories.py | 1 | # -*- coding: utf-8 -*-
"""Factories to help in tests."""
from factory import PostGenerationMethodCall, Sequence
from factory.alchemy import SQLAlchemyModelFactory
from op_mon.database import db
from op_mon.user.models import User
class BaseFactory(SQLAlchemyModelFactory):
"""Base factory."""
class Meta:
"""Factory configuration."""
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
"""User factory."""
username = Sequence(lambda n: 'user{0}'.format(n))
email = Sequence(lambda n: 'user{0}@example.com'.format(n))
password = PostGenerationMethodCall('set_password', 'example')
active = True
class Meta:
"""Factory configuration."""
model = User
|
MalloyPower/parsing-python | refs/heads/master | front-end/testsuite-python-lib/Python-2.5/Lib/hmac.py | 1 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
"""
def _strxor(s1, s2):
"""Utility method. XOR the two strings s1 and s2 (must have same length).
"""
return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used.
digest_size = None
# A unique object passed by HMAC.copy() to the HMAC constructor, in order
# that the latter return very quickly. HMAC("") in contrast is quite
# expensive.
_secret_backdoor_key = []
class HMAC:
"""RFC2104 HMAC class.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. *OR*
A hashlib constructor returning a new hash object.
Defaults to hashlib.md5.
"""
if key is _secret_backdoor_key: # cheap
return
if digestmod is None:
import hashlib
digestmod = hashlib.md5
if callable(digestmod):
self.digest_cons = digestmod
else:
self.digest_cons = lambda d='': digestmod.new(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
blocksize = 64
ipad = "\x36" * blocksize
opad = "\x5C" * blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key + chr(0) * (blocksize - len(key))
self.outer.update(_strxor(key, opad))
self.inner.update(_strxor(key, ipad))
if msg is not None:
self.update(msg)
## def clear(self):
## raise NotImplementedError, "clear() method not available in HMAC."
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = HMAC(_secret_backdoor_key)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
return "".join([hex(ord(x))[2:].zfill(2)
for x in tuple(self.digest())])
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
|
virajs/lxc | refs/heads/master | config/yum/lxc-patch.py | 64 | # Yum plugin to re-patch container rootfs after a yum update is done
#
# Copyright (C) 2012 Oracle
#
# Authors:
# Dwight Engen <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
#
import os
from fnmatch import fnmatch
from yum.plugins import TYPE_INTERACTIVE
from yum.plugins import PluginYumExit
requires_api_version = '2.0'
plugin_type = (TYPE_INTERACTIVE,)
def posttrans_hook(conduit):
pkgs = []
patch_required = False
# If we aren't root, we can't have updated anything
if os.geteuid():
return
# See what packages have files that were patched
confpkgs = conduit.confString('main', 'packages')
if not confpkgs:
return
tmp = confpkgs.split(",")
for confpkg in tmp:
pkgs.append(confpkg.strip())
conduit.info(2, "lxc-patch: checking if updated pkgs need patching...")
ts = conduit.getTsInfo()
for tsmem in ts.getMembers():
for pkg in pkgs:
if fnmatch(pkg, tsmem.po.name):
patch_required = True
if patch_required:
conduit.info(2, "lxc-patch: patching container...")
os.spawnlp(os.P_WAIT, "lxc-patch", "lxc-patch", "--patch", "/")
|
bxshi/gem5 | refs/heads/master | src/sim/BaseTLB.py | 29 | # Copyright (c) 2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.SimObject import SimObject
class BaseTLB(SimObject):
type = 'BaseTLB'
abstract = True
cxx_header = "sim/tlb.hh"
|
elventear/ansible | refs/heads/devel | test/integration/targets/module_utils/module_utils/sub/bar/bar.py | 298 | #!/usr/bin/env python
bar = "BAR FROM sub/bar/bar.py"
|
nin042/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/QueueStatusServer/loggers/recordbotevent.py | 122 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from config.logging import queue_log_duration
from model.queuelog import QueueLog
class RecordBotEvent(object):
@classmethod
def record_activity(cls, queue_name, bot_id):
queue_log = QueueLog.get_current(queue_name, queue_log_duration)
if queue_log and bot_id not in queue_log.bot_ids_seen:
queue_log.bot_ids_seen.append(bot_id)
queue_log.put()
|
hectorEU/GroupCast | refs/heads/master | examples/Remote/s1_clientrbb.py | 1 | '''
Basic remote example sending tell messages. CLIENT
@author: Daniel Barcelona Pons
'''
from pyactor.context import \
set_context, create_host, setRabbitCredentials, shutdown
if __name__ == "__main__":
setRabbitCredentials('daniel', 'passs')
set_context()
host = create_host('amqp://127.0.0.1:1679')
e1 = host.lookup_url('amqp://127.0.0.1:1277/echo1', 'Echo', 's1_server')
e1.echo('Hi there!') # TELL message
e1.echo('See ya!')
shutdown()
|
drericstrong/pypbe | refs/heads/master | tests/tests_core.py | 1 | # -*- coding: utf-8 -*-
"""
pypbe test_core
~~~~~~~~~~~~~~~~~~
A set of unit tests for pypbe.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
import unittest
from pypbe.core import PBE
# These tests are for the user-supplied inputs during PBE object construction
class TestPBEInput(unittest.TestCase):
def test_numdice_string_input(self):
with self.assertRaises(ValueError):
PBE("wrong", 6)
def test_uppercase_PBEmap(self):
pbe = PBE(3, 6, pbe_map="PF")
correct = {3: -16, 4: -12, 5: -9, 6: -6, 7: -4, 8: -2, 9: -1, 10: 0,
11: 1, 12: 2, 13: 3, 14: 5, 15: 7, 16: 10, 17: 13, 18: 17}
self.assertEqual(pbe.pbe_map, correct)
def test_pbemap_int_input(self):
with self.assertRaises(ValueError):
PBE(3, 6, pbe_map=4)
def test_custompbemap_int_input(self):
with self.assertRaises(TypeError):
PBE(3, 6, custom_pbe_map=4)
def test_keepdice_greater_numdice(self):
with self.assertRaises(ValueError):
PBE(3, 6, keep_dice=4)
def test_keepatt_greater_numatt(self):
with self.assertRaises(ValueError):
PBE(3, 6, num_attribute=6, keep_attribute=7)
def test_rerolls_greater_dicetype(self):
with self.assertRaises(ValueError):
PBE(3, 6, reroll=6)
def test_lowval_default(self):
with self.assertRaises(ValueError):
PBE(3, 6, add_val=-1)
with self.assertRaises(ValueError):
PBE(2, 6)
with self.assertRaises(ValueError):
PBE(5, 6, keep_dice=2)
with self.assertRaises(ValueError):
PBE(5, 6, keep_dice=3, add_val=-1)
def test_highval_default(self):
with self.assertRaises(ValueError):
PBE(3, 6, add_val=1)
with self.assertRaises(ValueError):
PBE(4, 6)
with self.assertRaises(ValueError):
PBE(5, 6, keep_dice=4)
with self.assertRaises(ValueError):
PBE(5, 6, keep_dice=3, add_val=1)
def test_lowval_highrolllimit(self):
with self.assertRaises(ValueError):
PBE(3, 6, roll_high_limit = 2)
with self.assertRaises(ValueError):
PBE(2, 6, add_val=2, roll_high_limit=3)
def test_highval_lowrolllimit(self):
with self.assertRaises(ValueError):
PBE(3, 6, roll_low_limit = 19)
with self.assertRaises(ValueError):
PBE(2, 6, add_val=1, roll_low_limit=14)
# These tests are for static methods in the main class, PBE
# More work needs to be done. I did some manual verification of the guts
# of _roll_array and _roll_arrays, but it might be worth separating these
# functions further into sub_functions for better testing.
class TestStaticPBE(unittest.TestCase):
def test_findPBEMapping_default(self):
test = PBE._find_pb_mapping('pf')
correct = {3: -16, 4: -12, 5: -9, 6: -6, 7: -4, 8: -2, 9: -1, 10: 0,
11: 1, 12: 2, 13: 3, 14: 5, 15: 7, 16: 10, 17: 13, 18: 17}
self.assertEqual(test, correct)
def test_findPBEMapping_known(self):
test1 = PBE._find_pb_mapping('pf')
correct1 = {3: -16, 4: -12, 5: -9, 6: -6, 7: -4, 8: -2, 9: -1, 10: 0,
11: 1, 12: 2, 13: 3, 14: 5, 15: 7, 16: 10, 17: 13, 18: 17}
self.assertEqual(test1, correct1)
test2 = PBE._find_pb_mapping('3e')
correct2 = {3: -7, 4: -5, 5: -3, 6: -2, 7: -1, 8: 0, 9: 1, 10: 2,
11: 3, 12: 4, 13: 5, 14: 6, 15: 8, 16: 10, 17: 13, 18: 16}
self.assertEqual(test2, correct2)
test3 = PBE._find_pb_mapping('3.5e')
self.assertEqual(test3, correct2)
test4 = PBE._find_pb_mapping('5e')
self.assertEqual(test4, correct2)
test5 = PBE._find_pb_mapping('4e')
correct5 = {3: -12, 4: -9, 5: -7, 6: -5, 7: -3, 8: -2, 9: -1, 10: 0,
11: 1, 12: 2, 13: 3, 14: 5, 15: 7, 16: 9, 17: 12, 18: 16}
self.assertEqual(test5, correct5)
# num_hist, num_dice, dice_type, add_val, num_ability, best_dice, reroll
def test_rollArray_numHist_length(self):
test = PBE._roll_array(100, 4, 6, 2, 6, 3, 0, None, None)
test_size = len(test)
correct = 100
self.assertEqual(test_size, correct)
# num_dice, dice_type, add_val, num_ability, best_ability, best_dice,
# reroll, num_arrays)
def test_constructTitle_normal(self):
test = PBE._construct_title(3, 6, 0, 6, 6, 3, 0, 1)
correct = "Sum 3d6, 6 Attrs"
self.assertEqual(test, correct)
def test_constructTitle_bestDice(self):
test = PBE._construct_title(4, 6, 0, 6, 6, 3, 0, 1)
correct = "Sum 4d6k3, 6 Attrs"
self.assertEqual(test, correct)
def test_constructTitle_add(self):
test = PBE._construct_title(4, 6, 1, 6, 6, 3, 0, 1)
correct = "Sum 4d6+1k3, 6 Attrs"
self.assertEqual(test, correct)
def test_constructTitle_extraAbility(self):
test = PBE._construct_title(4, 6, 1, 7, 7, 3, 0, 1)
correct = "Sum 4d6+1k3, 7 Attrs"
self.assertEqual(test, correct)
def test_constructTitle_bestAbility(self):
test = PBE._construct_title(4, 6, 1, 7, 6, 3, 0, 1)
correct = "Sum 4d6+1k3, 7k6 Attrs"
self.assertEqual(test, correct)
def test_constructTitle_reroll1s(self):
test = PBE._construct_title(4, 6, 1, 7, 6, 3, 1, 1)
correct = "Sum 4d6+1k3, 7k6 Attrs, Reroll 1s"
self.assertEqual(test, correct)
def test_constructTitle_reroll1s2s(self):
test = PBE._construct_title(4, 6, 1, 7, 6, 3, 2, 1)
correct = "Sum 4d6+1k3, 7k6 Attrs, Reroll 1s/2s"
self.assertEqual(test, correct)
def test_constructTitle_arrays(self):
test = PBE._construct_title(4, 6, 1, 7, 6, 3, 2, 4)
correct = "Sum 4d6+1k3, 7k6 Attrs, 4 Arrays, Reroll 1s/2s"
self.assertEqual(test, correct)
# These test methods actually instantiate a PBE class for various test cases.
# Unfortunately, since Monte Carlo simulation is stochastic, the same exact
# result isn't returned every time, which is awful for unit testing. This
# means that these tests have a (small) possibility of failing, even though
# I'm only checking the approximate output. These are also slow.
class TestCasesPBE(unittest.TestCase):
def test_3d6(self):
pbe = PBE(3, 6)
pbe.roll_mc(int(10**6))
ar = pbe.arr_res
pb = pbe.pbe_res
correct_means_raw = [6.8, 8.5, 9.9, 11.1, 12.5, 14.2]
for ii, jj in zip(correct_means_raw, ar["means"]):
self.assertAlmostEqual(ii, jj, places=1)
correct_mean_pbe = 3.0
self.assertAlmostEqual(correct_mean_pbe, pb["means"], places=1)
def test_3d6_rolllowlimit(self):
pbe = PBE(3, 6, roll_low_limit=7)
pbe.roll_mc(int(10**6))
ar = pbe.arr_res
pb = pbe.pbe_res
correct_means_raw = [8.1, 9.2, 10.4, 11.5, 12.7, 14.4]
for ii, jj in zip(correct_means_raw, ar["means"]):
self.assertAlmostEqual(ii, jj, places=1)
correct_mean_pbe = 8.4
self.assertAlmostEqual(correct_mean_pbe, pb["means"], places=1)
def test_3d6_rollhighlimit(self):
pbe = PBE(3, 6, roll_high_limit=14)
pbe.roll_mc(int(10**6))
ar = pbe.arr_res
pb = pbe.pbe_res
correct_means_raw = [6.6, 8.3, 9.5, 10.7, 11.8, 12.9]
for ii, jj in zip(correct_means_raw, ar["means"]):
self.assertAlmostEqual(ii, jj, places=1)
correct_mean_pbe = -2.4
self.assertAlmostEqual(correct_mean_pbe, pb["means"], places=1)
def test_3d6_pbelowlimit(self):
pbe = PBE(3, 6, pbe_low_limit=5)
pbe.roll_mc(int(10**6))
ar = pbe.arr_res
pb = pbe.pbe_res
correct_means_raw = [7.8, 9.6, 10.9, 12.1, 13.5, 15.3]
for ii, jj in zip(correct_means_raw, ar["means"]):
self.assertAlmostEqual(ii, jj, places=1)
correct_mean_pbe = 12.7
self.assertAlmostEqual(correct_mean_pbe, pb["means"], places=1)
def test_3d6_pbehighlimit(self):
pbe = PBE(3, 6, pbe_high_limit=10)
pbe.roll_mc(int(10**6))
ar = pbe.arr_res
pb = pbe.pbe_res
correct_means_raw = [6.3, 8.1, 9.4, 10.6, 11.9, 13.7]
for ii, jj in zip(correct_means_raw, ar["means"]):
self.assertAlmostEqual(ii, jj, places=1)
correct_mean_pbe = -1.5
self.assertAlmostEqual(correct_mean_pbe, pb["means"], places=1)
def test_3d6_best3arrays(self):
pbe = PBE(3, 6, num_arrays=3)
pbe.roll_mc(int(10**6))
ar = pbe.arr_res
pb = pbe.pbe_res
correct_means_raw = [7.8, 9.5, 10.85, 12.1, 13.5, 15.3]
for ii, jj in zip(correct_means_raw, ar["means"]):
self.assertAlmostEqual(ii, jj, places=1)
correct_mean_pbe = 12.3
self.assertAlmostEqual(correct_mean_pbe, pb["means"], places=1)
def test_3d6_best3arrays_reroll1s(self):
pbe = PBE(3, 6, num_arrays=3, reroll=1)
pbe.roll_mc(int(10**6))
ar = pbe.arr_res
pb = pbe.pbe_res
correct_means_raw = [9.6, 11.1, 12.3, 13.4, 14.5, 16.0]
for ii, jj in zip(correct_means_raw, ar["means"]):
self.assertAlmostEqual(ii, jj, places=1)
correct_mean_pbe = 23.8
self.assertAlmostEqual(correct_mean_pbe, pb["means"], places=1)
def test_3d6_best6of7rolls(self):
pbe = PBE(3, 6, num_attribute=7, keep_attribute=6)
pbe.roll_mc(int(10**6))
ar = pbe.arr_res
pb = pbe.pbe_res
correct_means_raw = [8.2, 9.4, 10.5, 11.6, 12.8, 14.5]
for ii, jj in zip(correct_means_raw, ar["means"]):
self.assertAlmostEqual(ii, jj, places=1)
correct_mean_pbe = 9.1
self.assertAlmostEqual(correct_mean_pbe, pb["means"], places=1)
def test_4d6k3(self):
pbe = PBE(4, 6, keep_dice=3)
pbe.roll_mc(int(10**6))
ar = pbe.arr_res
pb = pbe.pbe_res
correct_means_raw = [8.5, 10.4, 11.8, 13.0, 14.2, 15.7]
for ii, jj in zip(correct_means_raw, ar["means"]):
self.assertAlmostEqual(ii, jj, places=1)
correct_mean_pbe = 18.85
self.assertAlmostEqual(correct_mean_pbe, pb["means"], places=1)
def test_4d6k3_best6of7rolls(self):
pbe = PBE(4, 6, keep_dice=3, num_attribute=7, keep_attribute=6)
pbe.roll_mc(int(10**6))
ar = pbe.arr_res
pb = pbe.pbe_res
correct_means_raw = [10.1, 11.3, 12.4, 13.4, 14.5, 15.9]
for ii, jj in zip(correct_means_raw, ar["means"]):
self.assertAlmostEqual(ii, jj, places=1)
correct_mean_pbe = 24.45
self.assertAlmostEqual(correct_mean_pbe, pb["means"], places=1)
def test_2d6p6(self):
pbe = PBE(2, 6, add_val=6)
pbe.roll_mc(int(10**6))
ar = pbe.arr_res
pb = pbe.pbe_res
correct_means_raw = [10.0, 11.4, 12.5, 13.5, 14.6, 16.0]
for ii, jj in zip(correct_means_raw, ar["means"]):
self.assertAlmostEqual(ii, jj, places=1)
correct_mean_pbe = 25.7
self.assertAlmostEqual(correct_mean_pbe, pb["means"], places=1)
def test_4d4p6k3_best6of7rolls(self):
pbe = PBE(4, 4, add_val=6, keep_dice=3,
num_attribute=7, keep_attribute=6)
pbe.roll_mc(int(10**6))
ar = pbe.arr_res
pb = pbe.pbe_res
correct_means_raw = [13.2, 14.0, 14.7, 15.4, 16.1, 17.0]
for ii, jj in zip(correct_means_raw, ar["means"]):
self.assertAlmostEqual(ii, jj, places=1)
correct_mean_pbe = 47.4
self.assertAlmostEqual(correct_mean_pbe, pb["means"], places=1)
if __name__ == '__main__':
unittest.main()
|
Allow2CEO/browser-ios | refs/heads/master | brave/node_modules/ad-block/vendor/depot_tools/third_party/pylint/checkers/newstyle.py | 66 | # Copyright (c) 2005-2014 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""check for new / old style related problems
"""
import sys
import astroid
from pylint.interfaces import IAstroidChecker, INFERENCE, INFERENCE_FAILURE, HIGH
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
check_messages,
has_known_bases,
node_frame_class,
)
MSGS = {
'E1001': ('Use of __slots__ on an old style class',
'slots-on-old-class',
'Used when an old style class uses the __slots__ attribute.',
{'maxversion': (3, 0)}),
'E1002': ('Use of super on an old style class',
'super-on-old-class',
'Used when an old style class uses the super builtin.',
{'maxversion': (3, 0)}),
'E1003': ('Bad first argument %r given to super()',
'bad-super-call',
'Used when another argument than the current class is given as \
first argument of the super builtin.'),
'E1004': ('Missing argument to super()',
'missing-super-argument',
'Used when the super builtin didn\'t receive an \
argument.',
{'maxversion': (3, 0)}),
'W1001': ('Use of "property" on an old style class',
'property-on-old-class',
'Used when Pylint detect the use of the builtin "property" \
on an old style class while this is relying on new style \
classes features.',
{'maxversion': (3, 0)}),
'C1001': ('Old-style class defined.',
'old-style-class',
'Used when a class is defined that does not inherit from another'
'class and does not inherit explicitly from "object".',
{'maxversion': (3, 0)})
}
class NewStyleConflictChecker(BaseChecker):
"""checks for usage of new style capabilities on old style classes and
other new/old styles conflicts problems
* use of property, __slots__, super
* "super" usage
"""
__implements__ = (IAstroidChecker,)
# configuration section name
name = 'newstyle'
# messages
msgs = MSGS
priority = -2
# configuration options
options = ()
@check_messages('slots-on-old-class', 'old-style-class')
def visit_class(self, node):
""" Check __slots__ in old style classes and old
style class definition.
"""
if '__slots__' in node and not node.newstyle:
confidence = (INFERENCE if has_known_bases(node)
else INFERENCE_FAILURE)
self.add_message('slots-on-old-class', node=node,
confidence=confidence)
# The node type could be class, exception, metaclass, or
# interface. Presumably, the non-class-type nodes would always
# have an explicit base class anyway.
if not node.bases and node.type == 'class' and not node.metaclass():
# We use confidence HIGH here because this message should only ever
# be emitted for classes at the root of the inheritance hierarchyself.
self.add_message('old-style-class', node=node, confidence=HIGH)
@check_messages('property-on-old-class')
def visit_callfunc(self, node):
"""check property usage"""
parent = node.parent.frame()
if (isinstance(parent, astroid.Class) and
not parent.newstyle and
isinstance(node.func, astroid.Name)):
confidence = (INFERENCE if has_known_bases(parent)
else INFERENCE_FAILURE)
name = node.func.name
if name == 'property':
self.add_message('property-on-old-class', node=node,
confidence=confidence)
@check_messages('super-on-old-class', 'bad-super-call', 'missing-super-argument')
def visit_function(self, node):
"""check use of super"""
# ignore actual functions or method within a new style class
if not node.is_method():
return
klass = node.parent.frame()
for stmt in node.nodes_of_class(astroid.CallFunc):
if node_frame_class(stmt) != node_frame_class(node):
# Don't look down in other scopes.
continue
expr = stmt.func
if not isinstance(expr, astroid.Getattr):
continue
call = expr.expr
# skip the test if using super
if isinstance(call, astroid.CallFunc) and \
isinstance(call.func, astroid.Name) and \
call.func.name == 'super':
confidence = (INFERENCE if has_known_bases(klass)
else INFERENCE_FAILURE)
if not klass.newstyle:
# super should not be used on an old style class
self.add_message('super-on-old-class', node=node,
confidence=confidence)
else:
# super first arg should be the class
if not call.args and sys.version_info[0] == 3:
# unless Python 3
continue
try:
supcls = (call.args and next(call.args[0].infer())
or None)
except astroid.InferenceError:
continue
if supcls is None:
self.add_message('missing-super-argument', node=call,
confidence=confidence)
continue
if klass is not supcls:
name = None
# if supcls is not YES, then supcls was infered
# and use its name. Otherwise, try to look
# for call.args[0].name
if supcls is not astroid.YES:
name = supcls.name
else:
if hasattr(call.args[0], 'name'):
name = call.args[0].name
if name is not None:
self.add_message('bad-super-call',
node=call,
args=(name, ),
confidence=confidence)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(NewStyleConflictChecker(linter))
|
52ai/django-ccsds | refs/heads/master | django/contrib/gis/db/backends/mysql/schema.py | 448 | import logging
from django.contrib.gis.db.models.fields import GeometryField
from django.db.backends.mysql.schema import DatabaseSchemaEditor
from django.db.utils import OperationalError
logger = logging.getLogger('django.contrib.gis')
class MySQLGISSchemaEditor(DatabaseSchemaEditor):
sql_add_spatial_index = 'CREATE SPATIAL INDEX %(index)s ON %(table)s(%(column)s)'
sql_drop_spatial_index = 'DROP INDEX %(index)s ON %(table)s'
def __init__(self, *args, **kwargs):
super(MySQLGISSchemaEditor, self).__init__(*args, **kwargs)
self.geometry_sql = []
def skip_default(self, field):
return (
super(MySQLGISSchemaEditor, self).skip_default(field) or
# Geometry fields are stored as BLOB/TEXT and can't have defaults.
isinstance(field, GeometryField)
)
def column_sql(self, model, field, include_default=False):
column_sql = super(MySQLGISSchemaEditor, self).column_sql(model, field, include_default)
# MySQL doesn't support spatial indexes on NULL columns
if isinstance(field, GeometryField) and field.spatial_index and not field.null:
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
self.geometry_sql.append(
self.sql_add_spatial_index % {
'index': qn(self._create_spatial_index_name(model, field)),
'table': qn(db_table),
'column': qn(field.column),
}
)
return column_sql
def create_model(self, model):
super(MySQLGISSchemaEditor, self).create_model(model)
self.create_spatial_indexes()
def add_field(self, model, field):
super(MySQLGISSchemaEditor, self).add_field(model, field)
self.create_spatial_indexes()
def remove_field(self, model, field):
if isinstance(field, GeometryField) and field.spatial_index:
qn = self.connection.ops.quote_name
sql = self.sql_drop_spatial_index % {
'index': qn(self._create_spatial_index_name(model, field)),
'table': qn(model._meta.db_table),
}
try:
self.execute(sql)
except OperationalError:
logger.error(
"Couldn't remove spatial index: %s (may be expected "
"if your storage engine doesn't support them)." % sql
)
super(MySQLGISSchemaEditor, self).remove_field(model, field)
def _create_spatial_index_name(self, model, field):
return '%s_%s_id' % (model._meta.db_table, field.column)
def create_spatial_indexes(self):
for sql in self.geometry_sql:
try:
self.execute(sql)
except OperationalError:
logger.error(
"Cannot create SPATIAL INDEX %s. Only MyISAM and (as of "
"MySQL 5.7.5) InnoDB support them." % sql
)
self.geometry_sql = []
|
elijah513/scikit-learn | refs/heads/master | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
googleads/google-ads-python | refs/heads/master | google/ads/googleads/v6/services/services/paid_organic_search_term_view_service/transports/base.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v6.resources.types import (
paid_organic_search_term_view,
)
from google.ads.googleads.v6.services.types import (
paid_organic_search_term_view_service,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class PaidOrganicSearchTermViewServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for PaidOrganicSearchTermViewService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_paid_organic_search_term_view: gapic_v1.method.wrap_method(
self.get_paid_organic_search_term_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_paid_organic_search_term_view(
self,
) -> typing.Callable[
[
paid_organic_search_term_view_service.GetPaidOrganicSearchTermViewRequest
],
paid_organic_search_term_view.PaidOrganicSearchTermView,
]:
raise NotImplementedError
__all__ = ("PaidOrganicSearchTermViewServiceTransport",)
|
jrumball/PyKoans | refs/heads/master | python 3/runner/runner_tests/test_helper.py | 32 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from runner import helper
class TestHelper(unittest.TestCase):
def test_that_get_class_name_works_with_a_string_instance(self):
self.assertEqual("str", helper.cls_name(str()))
def test_that_get_class_name_works_with_a_4(self):
self.assertEquals("int", helper.cls_name(4))
def test_that_get_class_name_works_with_a_tuple(self):
self.assertEquals("tuple", helper.cls_name((3,"pie", [])))
|
sgallagher/anaconda | refs/heads/master | pyanaconda/modules/storage/bootloader/image.py | 3 | #
# Copyright (C) 2019 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
__all__ = ["BootLoaderImage", "LinuxBootLoaderImage"]
class BootLoaderImage(object):
"""A base class for boot loader images.
Suitable for non-linux OS images.
"""
def __init__(self, device=None, label=None, short=None):
"""Initialize the image.
:param device: an instance of StorageDevice
:param label: a label string
:param short: a shorter label string
"""
self.label = label
self.short_label = short
self.device = device
class LinuxBootLoaderImage(BootLoaderImage):
"""Linux-OS image."""
def __init__(self, device=None, label=None, short=None, version=None):
"""Initialize the image.
:param device: an instance of StorageDevice
:param label: a label string
:param short: a shorter label string
:param version: a kernel version string
"""
super().__init__(device=device, label=label)
self.label = label
self.short_label = short
self.device = device
self.version = version
self._kernel = None
self._initrd = None
@property
def kernel(self):
"""Kernel filename.
:return: a filename string
"""
filename = self._kernel
if self.version and not filename:
filename = "vmlinuz-%s" % self.version
return filename
@property
def initrd(self):
"""Initrd filename.
:return: a filename string
"""
filename = self._initrd
if self.version and not filename:
filename = "initramfs-%s.img" % self.version
return filename
|
asnorkin/sentiment_analysis | refs/heads/master | site/lib/python2.7/site-packages/scipy/cluster/tests/test_vq.py | 14 |
from __future__ import division, print_function, absolute_import
import warnings
import sys
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
TestCase, run_module_suite, assert_raises, assert_allclose, assert_equal,
assert_)
from numpy.testing.decorators import skipif
from scipy.cluster.vq import (kmeans, kmeans2, py_vq, py_vq2, vq, whiten,
ClusterError, _krandinit)
from scipy.cluster import _vq
TESTDATA_2D = np.array([
-2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68,
-2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45,
2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28,
-4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07,
-0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29,
-2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25,
2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21,
-2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67,
-2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94,
-2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33,
2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8,
-1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29,
2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75,
-1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17,
0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44,
-0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83,
0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28,
3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62,
-1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35,
3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84,
-2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75,
-0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86,
-2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83,
0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75,
-2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03,
3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0,
3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99,
-1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21,
1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75,
4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37,
-3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0,
-1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84,
2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69,
-2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51,
-0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71,
-2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61,
2.11]).reshape((200, 2))
# Global data
X = np.array([[3.0, 3], [4, 3], [4, 2],
[9, 2], [5, 1], [6, 2], [9, 4],
[5, 2], [5, 4], [7, 4], [6, 5]])
CODET1 = np.array([[3.0000, 3.0000],
[6.2000, 4.0000],
[5.8000, 1.8000]])
CODET2 = np.array([[11.0/3, 8.0/3],
[6.7500, 4.2500],
[6.2500, 1.7500]])
LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1])
class TestWhiten(TestCase):
def test_whiten(self):
desired = np.array([[5.08738849, 2.97091878],
[3.19909255, 0.69660580],
[4.51041982, 0.02640918],
[4.38567074, 0.95120889],
[2.32191480, 1.63195503]])
for tp in np.array, np.matrix:
obs = tp([[0.98744510, 0.82766775],
[0.62093317, 0.19406729],
[0.87545741, 0.00735733],
[0.85124403, 0.26499712],
[0.45067590, 0.45464607]])
assert_allclose(whiten(obs), desired, rtol=1e-5)
def test_whiten_zero_std(self):
desired = np.array([[0., 1.0, 2.86666544],
[0., 1.0, 1.32460034],
[0., 1.0, 3.74382172]])
for tp in np.array, np.matrix:
obs = tp([[0., 1., 0.74109533],
[0., 1., 0.34243798],
[0., 1., 0.96785929]])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_allclose(whiten(obs), desired, rtol=1e-5)
assert_equal(len(w), 1)
assert_(issubclass(w[-1].category, RuntimeWarning))
def test_whiten_not_finite(self):
for tp in np.array, np.matrix:
for bad_value in np.nan, np.inf, -np.inf:
obs = tp([[0.98744510, bad_value],
[0.62093317, 0.19406729],
[0.87545741, 0.00735733],
[0.85124403, 0.26499712],
[0.45067590, 0.45464607]])
assert_raises(ValueError, whiten, obs)
class TestVq(TestCase):
def test_py_vq(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, np.matrix:
label1 = py_vq(tp(X), tp(initc))[0]
assert_array_equal(label1, LABEL1)
def test_py_vq2(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, np.matrix:
label1 = py_vq2(tp(X), tp(initc))[0]
assert_array_equal(label1, LABEL1)
def test_vq(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, np.matrix:
label1, dist = _vq.vq(tp(X), tp(initc))
assert_array_equal(label1, LABEL1)
tlabel1, tdist = vq(tp(X), tp(initc))
# def test_py_vq_1d(self):
# """Test special rank 1 vq algo, python implementation."""
# data = X[:, 0]
# initc = data[:3]
# a, b = _py_vq_1d(data, initc)
# ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
# assert_array_equal(a, ta)
# assert_array_equal(b, tb)
def test_vq_1d(self):
# Test special rank 1 vq algo, python implementation.
data = X[:, 0]
initc = data[:3]
a, b = _vq.vq(data, initc)
ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
assert_array_equal(a, ta)
assert_array_equal(b, tb)
def test__vq_sametype(self):
a = np.array([1.0, 2.0], dtype=np.float64)
b = a.astype(np.float32)
assert_raises(TypeError, _vq.vq, a, b)
def test__vq_invalid_type(self):
a = np.array([1, 2], dtype=int)
assert_raises(TypeError, _vq.vq, a, a)
def test_vq_large_nfeat(self):
X = np.random.rand(20, 20)
code_book = np.random.rand(3, 20)
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(X, code_book)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
X = X.astype(np.float32)
code_book = code_book.astype(np.float32)
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(X, code_book)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
def test_vq_large_features(self):
X = np.random.rand(10, 5) * 1000000
code_book = np.random.rand(2, 5) * 1000000
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(X, code_book)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
class TestKMean(TestCase):
def test_large_features(self):
# Generate a data set with large values, and run kmeans on it to
# (regression for 1077).
d = 300
n = 100
m1 = np.random.randn(d)
m2 = np.random.randn(d)
x = 10000 * np.random.randn(n, d) - 20000 * m1
y = 10000 * np.random.randn(n, d) + 20000 * m2
data = np.empty((x.shape[0] + y.shape[0], d), np.double)
data[:x.shape[0]] = x
data[x.shape[0]:] = y
kmeans(data, 2)
def test_kmeans_simple(self):
np.random.seed(54321)
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, np.matrix:
code1 = kmeans(tp(X), tp(initc), iter=1)[0]
assert_array_almost_equal(code1, CODET2)
def test_kmeans_lost_cluster(self):
# This will cause kmeans to have a cluster with no points.
data = TESTDATA_2D
initk = np.array([[-1.8127404, -0.67128041],
[2.04621601, 0.07401111],
[-2.31149087,-0.05160469]])
kmeans(data, initk)
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
kmeans2(data, initk, missing='warn')
assert_raises(ClusterError, kmeans2, data, initk, missing='raise')
def test_kmeans2_simple(self):
np.random.seed(12345678)
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, np.matrix:
code1 = kmeans2(tp(X), tp(initc), iter=1)[0]
code2 = kmeans2(tp(X), tp(initc), iter=2)[0]
assert_array_almost_equal(code1, CODET1)
assert_array_almost_equal(code2, CODET2)
def test_kmeans2_rank1(self):
data = TESTDATA_2D
data1 = data[:, 0]
initc = data1[:3]
code = initc.copy()
kmeans2(data1, code, iter=1)[0]
kmeans2(data1, code, iter=2)[0]
def test_kmeans2_rank1_2(self):
data = TESTDATA_2D
data1 = data[:, 0]
kmeans2(data1, 2, iter=1)
def test_kmeans2_high_dim(self):
# test kmeans2 when the number of dimensions exceeds the number
# of input points
data = TESTDATA_2D
data = data.reshape((20, 20))[:10]
kmeans2(data, 2)
def test_kmeans2_init(self):
data = TESTDATA_2D
kmeans2(data, 3, minit='points')
kmeans2(data[:, :1], 3, minit='points') # special case (1-D)
# minit='random' can give warnings, filter those
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message="One of the clusters is empty. Re-run")
kmeans2(data, 3, minit='random')
kmeans2(data[:, :1], 3, minit='random') # special case (1-D)
@skipif(sys.platform == 'win32', 'Fails with MemoryError in Wine.')
def test_krandinit(self):
data = TESTDATA_2D
datas = [data.reshape((200, 2)), data.reshape((20, 20))[:10]]
k = int(1e6)
for data in datas:
np.random.seed(1234)
init = _krandinit(data, k)
orig_cov = np.cov(data, rowvar=0)
init_cov = np.cov(init, rowvar=0)
assert_allclose(orig_cov, init_cov, atol=1e-2)
def test_kmeans2_empty(self):
# Regression test for gh-1032.
assert_raises(ValueError, kmeans2, [], 2)
def test_kmeans_0k(self):
# Regression test for gh-1073: fail when k arg is 0.
assert_raises(ValueError, kmeans, X, 0)
assert_raises(ValueError, kmeans2, X, 0)
assert_raises(ValueError, kmeans2, X, np.array([]))
def test_kmeans_large_thres(self):
# Regression test for gh-1774
x = np.array([1,2,3,4,10], dtype=float)
res = kmeans(x, 1, thresh=1e16)
assert_allclose(res[0], np.array([4.]))
assert_allclose(res[1], 2.3999999999999999)
def test_kmeans_no_duplicates(self):
# Regression test for gh-4044
np.random.seed(23495)
features = np.linspace(1, 2, num=20).reshape(10, 2)
# randint(0, 10, 3) will give a duplicate with this seed ([7, 7, 5])
codebook, distortion = kmeans(features, k_or_guess=3)
expected = np.array([[1.15789474, 1.21052632],
[1.52631579, 1.57894737],
[1.84210526, 1.89473684]])
assert_allclose(codebook, expected)
assert_allclose(distortion, 0.11909166841036592)
if __name__ == "__main__":
run_module_suite()
|
68foxboris/enigma2-openpli-vuplus | refs/heads/master | lib/python/Components/BlinkingPixmap.py | 133 | from Pixmap import PixmapConditional
from ConditionalWidget import BlinkingWidgetConditional, BlinkingWidget
class BlinkingPixmap(BlinkingWidget):
def __init__(self):
Widget.__init__(self)
class BlinkingPixmapConditional(BlinkingWidgetConditional, PixmapConditional):
def __init__(self):
BlinkingWidgetConditional.__init__(self)
PixmapConditional.__init__(self)
|
ccnmtl/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/django/contrib/messages/tests/user_messages.py | 241 | from django import http
from django.contrib.auth.models import User
from django.contrib.messages.storage.user_messages import UserMessagesStorage,\
LegacyFallbackStorage
from django.contrib.messages.tests.base import skipUnlessAuthIsInstalled
from django.contrib.messages.tests.cookie import set_cookie_data
from django.contrib.messages.tests.fallback import FallbackTest
from django.test import TestCase
class UserMessagesTest(TestCase):
def setUp(self):
self.user = User.objects.create(username='tester')
def test_add(self):
storage = UserMessagesStorage(http.HttpRequest())
self.assertRaises(NotImplementedError, storage.add, 'Test message 1')
def test_get_anonymous(self):
# Ensure that the storage still works if no user is attached to the
# request.
storage = UserMessagesStorage(http.HttpRequest())
self.assertEqual(len(storage), 0)
def test_get(self):
storage = UserMessagesStorage(http.HttpRequest())
storage.request.user = self.user
self.user.message_set.create(message='test message')
self.assertEqual(len(storage), 1)
self.assertEqual(list(storage)[0].message, 'test message')
UserMessagesTest = skipUnlessAuthIsInstalled(UserMessagesTest)
class LegacyFallbackTest(FallbackTest, TestCase):
storage_class = LegacyFallbackStorage
def setUp(self):
super(LegacyFallbackTest, self).setUp()
self.user = User.objects.create(username='tester')
def get_request(self, *args, **kwargs):
request = super(LegacyFallbackTest, self).get_request(*args, **kwargs)
request.user = self.user
return request
def test_get_legacy_only(self):
request = self.get_request()
storage = self.storage_class(request)
self.user.message_set.create(message='user message')
# Test that the message actually contains what we expect.
self.assertEqual(len(storage), 1)
self.assertEqual(list(storage)[0].message, 'user message')
def test_get_legacy(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
self.user.message_set.create(message='user message')
set_cookie_data(cookie_storage, ['cookie'])
# Test that the message actually contains what we expect.
self.assertEqual(len(storage), 2)
self.assertEqual(list(storage)[0].message, 'user message')
self.assertEqual(list(storage)[1], 'cookie')
LegacyFallbackTest = skipUnlessAuthIsInstalled(LegacyFallbackTest)
|
pygeek/django | refs/heads/master | tests/regressiontests/i18n/patterns/urls/path_unused.py | 175 | from django.conf.urls import url
from django.conf.urls import patterns
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name='dummy.html')
urlpatterns = patterns('',
url(r'^nl/foo/', view, name='not-translated'),
)
|
Sarah-Alsinan/muypicky | refs/heads/master | lib/python3.6/site-packages/django/utils/regex_helper.py | 45 | """
Functions for reversing a regular expression (used in reverse URL resolving).
Used internally by Django and not intended for external use.
This is not, and is not intended to be, a complete reg-exp decompiler. It
should be good enough for a large class of URLS, however.
"""
from __future__ import unicode_literals
import warnings
from django.utils import six
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.six.moves import zip
# Mapping of an escape character to a representative of that class. So, e.g.,
# "\w" is replaced by "x" in a reverse URL. A value of None means to ignore
# this sequence. Any missing key is mapped to itself.
ESCAPE_MAPPINGS = {
"A": None,
"b": None,
"B": None,
"d": "0",
"D": "x",
"s": " ",
"S": "x",
"w": "x",
"W": "!",
"Z": None,
}
class Choice(list):
"""
Used to represent multiple possibilities at this point in a pattern string.
We use a distinguished type, rather than a list, so that the usage in the
code is clear.
"""
class Group(list):
"""
Used to represent a capturing group in the pattern string.
"""
class NonCapture(list):
"""
Used to represent a non-capturing group in the pattern string.
"""
def normalize(pattern):
r"""
Given a reg-exp pattern, normalizes it to an iterable of forms that
suffice for reverse matching. This does the following:
(1) For any repeating sections, keeps the minimum number of occurrences
permitted (this means zero for optional groups).
(2) If an optional group includes parameters, include one occurrence of
that group (along with the zero occurrence case from step (1)).
(3) Select the first (essentially an arbitrary) element from any character
class. Select an arbitrary character for any unordered class (e.g. '.'
or '\w') in the pattern.
(4) Ignore look-ahead and look-behind assertions.
(5) Raise an error on any disjunctive ('|') constructs.
Django's URLs for forward resolving are either all positional arguments or
all keyword arguments. That is assumed here, as well. Although reverse
resolving can be done using positional args when keyword args are
specified, the two cannot be mixed in the same reverse() call.
"""
# Do a linear scan to work out the special features of this pattern. The
# idea is that we scan once here and collect all the information we need to
# make future decisions.
result = []
non_capturing_groups = []
consume_next = True
pattern_iter = next_char(iter(pattern))
num_args = 0
# A "while" loop is used here because later on we need to be able to peek
# at the next character and possibly go around without consuming another
# one at the top of the loop.
try:
ch, escaped = next(pattern_iter)
except StopIteration:
return [('', [])]
try:
while True:
if escaped:
result.append(ch)
elif ch == '.':
# Replace "any character" with an arbitrary representative.
result.append(".")
elif ch == '|':
# FIXME: One day we'll should do this, but not in 1.0.
raise NotImplementedError('Awaiting Implementation')
elif ch == "^":
pass
elif ch == '$':
break
elif ch == ')':
# This can only be the end of a non-capturing group, since all
# other unescaped parentheses are handled by the grouping
# section later (and the full group is handled there).
#
# We regroup everything inside the capturing group so that it
# can be quantified, if necessary.
start = non_capturing_groups.pop()
inner = NonCapture(result[start:])
result = result[:start] + [inner]
elif ch == '[':
# Replace ranges with the first character in the range.
ch, escaped = next(pattern_iter)
result.append(ch)
ch, escaped = next(pattern_iter)
while escaped or ch != ']':
ch, escaped = next(pattern_iter)
elif ch == '(':
# Some kind of group.
ch, escaped = next(pattern_iter)
if ch != '?' or escaped:
# A positional group
name = "_%d" % num_args
num_args += 1
result.append(Group((("%%(%s)s" % name), name)))
walk_to_end(ch, pattern_iter)
else:
ch, escaped = next(pattern_iter)
if ch in '!=<':
# All of these are ignorable. Walk to the end of the
# group.
walk_to_end(ch, pattern_iter)
elif ch in 'iLmsu#':
warnings.warn(
'Using (?%s) in url() patterns is deprecated.' % ch,
RemovedInDjango21Warning
)
walk_to_end(ch, pattern_iter)
elif ch == ':':
# Non-capturing group
non_capturing_groups.append(len(result))
elif ch != 'P':
# Anything else, other than a named group, is something
# we cannot reverse.
raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch)
else:
ch, escaped = next(pattern_iter)
if ch not in ('<', '='):
raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch)
# We are in a named capturing group. Extra the name and
# then skip to the end.
if ch == '<':
terminal_char = '>'
# We are in a named backreference.
else:
terminal_char = ')'
name = []
ch, escaped = next(pattern_iter)
while ch != terminal_char:
name.append(ch)
ch, escaped = next(pattern_iter)
param = ''.join(name)
# Named backreferences have already consumed the
# parenthesis.
if terminal_char != ')':
result.append(Group((("%%(%s)s" % param), param)))
walk_to_end(ch, pattern_iter)
else:
result.append(Group((("%%(%s)s" % param), None)))
elif ch in "*?+{":
# Quantifiers affect the previous item in the result list.
count, ch = get_quantifier(ch, pattern_iter)
if ch:
# We had to look ahead, but it wasn't need to compute the
# quantifier, so use this character next time around the
# main loop.
consume_next = False
if count == 0:
if contains(result[-1], Group):
# If we are quantifying a capturing group (or
# something containing such a group) and the minimum is
# zero, we must also handle the case of one occurrence
# being present. All the quantifiers (except {0,0},
# which we conveniently ignore) that have a 0 minimum
# also allow a single occurrence.
result[-1] = Choice([None, result[-1]])
else:
result.pop()
elif count > 1:
result.extend([result[-1]] * (count - 1))
else:
# Anything else is a literal.
result.append(ch)
if consume_next:
ch, escaped = next(pattern_iter)
else:
consume_next = True
except StopIteration:
pass
except NotImplementedError:
# A case of using the disjunctive form. No results for you!
return [('', [])]
return list(zip(*flatten_result(result)))
def next_char(input_iter):
r"""
An iterator that yields the next character from "pattern_iter", respecting
escape sequences. An escaped character is replaced by a representative of
its class (e.g. \w -> "x"). If the escaped character is one that is
skipped, it is not returned (the next character is returned instead).
Yields the next character, along with a boolean indicating whether it is a
raw (unescaped) character or not.
"""
for ch in input_iter:
if ch != '\\':
yield ch, False
continue
ch = next(input_iter)
representative = ESCAPE_MAPPINGS.get(ch, ch)
if representative is None:
continue
yield representative, True
def walk_to_end(ch, input_iter):
"""
The iterator is currently inside a capturing group. We want to walk to the
close of this group, skipping over any nested groups and handling escaped
parentheses correctly.
"""
if ch == '(':
nesting = 1
else:
nesting = 0
for ch, escaped in input_iter:
if escaped:
continue
elif ch == '(':
nesting += 1
elif ch == ')':
if not nesting:
return
nesting -= 1
def get_quantifier(ch, input_iter):
"""
Parse a quantifier from the input, where "ch" is the first character in the
quantifier.
Returns the minimum number of occurrences permitted by the quantifier and
either None or the next character from the input_iter if the next character
is not part of the quantifier.
"""
if ch in '*?+':
try:
ch2, escaped = next(input_iter)
except StopIteration:
ch2 = None
if ch2 == '?':
ch2 = None
if ch == '+':
return 1, ch2
return 0, ch2
quant = []
while ch != '}':
ch, escaped = next(input_iter)
quant.append(ch)
quant = quant[:-1]
values = ''.join(quant).split(',')
# Consume the trailing '?', if necessary.
try:
ch, escaped = next(input_iter)
except StopIteration:
ch = None
if ch == '?':
ch = None
return int(values[0]), ch
def contains(source, inst):
"""
Returns True if the "source" contains an instance of "inst". False,
otherwise.
"""
if isinstance(source, inst):
return True
if isinstance(source, NonCapture):
for elt in source:
if contains(elt, inst):
return True
return False
def flatten_result(source):
"""
Turns the given source sequence into a list of reg-exp possibilities and
their arguments. Returns a list of strings and a list of argument lists.
Each of the two lists will be of the same length.
"""
if source is None:
return [''], [[]]
if isinstance(source, Group):
if source[1] is None:
params = []
else:
params = [source[1]]
return [source[0]], [params]
result = ['']
result_args = [[]]
pos = last = 0
for pos, elt in enumerate(source):
if isinstance(elt, six.string_types):
continue
piece = ''.join(source[last:pos])
if isinstance(elt, Group):
piece += elt[0]
param = elt[1]
else:
param = None
last = pos + 1
for i in range(len(result)):
result[i] += piece
if param:
result_args[i].append(param)
if isinstance(elt, (Choice, NonCapture)):
if isinstance(elt, NonCapture):
elt = [elt]
inner_result, inner_args = [], []
for item in elt:
res, args = flatten_result(item)
inner_result.extend(res)
inner_args.extend(args)
new_result = []
new_args = []
for item, args in zip(result, result_args):
for i_item, i_args in zip(inner_result, inner_args):
new_result.append(item + i_item)
new_args.append(args[:] + i_args)
result = new_result
result_args = new_args
if pos >= last:
piece = ''.join(source[last:])
for i in range(len(result)):
result[i] += piece
return result, result_args
|
hogarthj/ansible | refs/heads/devel | lib/ansible/modules/cloud/centurylink/clc_server_snapshot.py | 56 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_server_snapshot
short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud.
description:
- An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
version_added: "2.0"
options:
server_ids:
description:
- The list of CLC server Ids.
required: True
expiration_days:
description:
- The number of days to keep the server snapshot before it expires.
default: 7
required: False
state:
description:
- The state to insure that the provided resources are in.
default: 'present'
required: False
choices: ['present', 'absent', 'restore']
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
default: True
required: False
choices: [True, False]
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Create server snapshot
clc_server_snapshot:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
expiration_days: 10
wait: True
state: present
- name: Restore server snapshot
clc_server_snapshot:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
wait: True
state: restore
- name: Delete server snapshot
clc_server_snapshot:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
wait: True
state: absent
'''
RETURN = '''
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
'''
__version__ = '${version}'
import os
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcSnapshot:
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
p = self.module.params
server_ids = p['server_ids']
expiration_days = p['expiration_days']
state = p['state']
request_list = []
changed = False
changed_servers = []
self._set_clc_credentials_from_env()
if state == 'present':
changed, request_list, changed_servers = self.ensure_server_snapshot_present(
server_ids=server_ids,
expiration_days=expiration_days)
elif state == 'absent':
changed, request_list, changed_servers = self.ensure_server_snapshot_absent(
server_ids=server_ids)
elif state == 'restore':
changed, request_list, changed_servers = self.ensure_server_snapshot_restore(
server_ids=server_ids)
self._wait_for_requests_to_complete(request_list)
return self.module.exit_json(
changed=changed,
server_ids=changed_servers)
def ensure_server_snapshot_present(self, server_ids, expiration_days):
"""
Ensures the given set of server_ids have the snapshots created
:param server_ids: The list of server_ids to create the snapshot
:param expiration_days: The number of days to keep the snapshot
:return: (changed, request_list, changed_servers)
changed: A flag indicating whether any change was made
request_list: the list of clc request objects from CLC API call
changed_servers: The list of servers ids that are modified
"""
request_list = []
changed = False
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.GetSnapshots()) == 0]
for server in servers_to_change:
changed = True
if not self.module.check_mode:
request = self._create_server_snapshot(server, expiration_days)
request_list.append(request)
changed_servers = [
server.id for server in servers_to_change if server.id]
return changed, request_list, changed_servers
def _create_server_snapshot(self, server, expiration_days):
"""
Create the snapshot for the CLC server
:param server: the CLC server object
:param expiration_days: The number of days to keep the snapshot
:return: the create request object from CLC API Call
"""
result = None
try:
result = server.CreateSnapshot(
delete_existing=True,
expiration_days=expiration_days)
except CLCException as ex:
self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def ensure_server_snapshot_absent(self, server_ids):
"""
Ensures the given set of server_ids have the snapshots removed
:param server_ids: The list of server_ids to delete the snapshot
:return: (changed, request_list, changed_servers)
changed: A flag indicating whether any change was made
request_list: the list of clc request objects from CLC API call
changed_servers: The list of servers ids that are modified
"""
request_list = []
changed = False
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.GetSnapshots()) > 0]
for server in servers_to_change:
changed = True
if not self.module.check_mode:
request = self._delete_server_snapshot(server)
request_list.append(request)
changed_servers = [
server.id for server in servers_to_change if server.id]
return changed, request_list, changed_servers
def _delete_server_snapshot(self, server):
"""
Delete snapshot for the CLC server
:param server: the CLC server object
:return: the delete snapshot request object from CLC API
"""
result = None
try:
result = server.DeleteSnapshot()
except CLCException as ex:
self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def ensure_server_snapshot_restore(self, server_ids):
"""
Ensures the given set of server_ids have the snapshots restored
:param server_ids: The list of server_ids to delete the snapshot
:return: (changed, request_list, changed_servers)
changed: A flag indicating whether any change was made
request_list: the list of clc request objects from CLC API call
changed_servers: The list of servers ids that are modified
"""
request_list = []
changed = False
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.GetSnapshots()) > 0]
for server in servers_to_change:
changed = True
if not self.module.check_mode:
request = self._restore_server_snapshot(server)
request_list.append(request)
changed_servers = [
server.id for server in servers_to_change if server.id]
return changed, request_list, changed_servers
def _restore_server_snapshot(self, server):
"""
Restore snapshot for the CLC server
:param server: the CLC server object
:return: the restore snapshot request object from CLC API
"""
result = None
try:
result = server.RestoreSnapshot()
except CLCException as ex:
self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def _wait_for_requests_to_complete(self, requests_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param requests_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in requests_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process server snapshot request')
@staticmethod
def define_argument_spec():
"""
This function defines the dictionary object required for
package module
:return: the package dictionary object
"""
argument_spec = dict(
server_ids=dict(type='list', required=True),
expiration_days=dict(default=7),
wait=dict(default=True),
state=dict(
default='present',
choices=[
'present',
'absent',
'restore']),
)
return argument_spec
def _get_servers_from_clc(self, server_list, message):
"""
Internal function to fetch list of CLC server objects from a list of server ids
:param server_list: The list of server ids
:param message: The error message to throw in case of any error
:return the list of CLC server objects
"""
try:
return self.clc.v2.Servers(server_list).servers
except CLCException as ex:
return self.module.fail_json(msg=message + ': %s' % ex)
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
Main function
:return: None
"""
module = AnsibleModule(
argument_spec=ClcSnapshot.define_argument_spec(),
supports_check_mode=True
)
clc_snapshot = ClcSnapshot(module)
clc_snapshot.process_request()
if __name__ == '__main__':
main()
|
chiefspace/udemy-rest-api | refs/heads/master | udemy_rest_api_section5/code/env/lib/python3.4/site-packages/setuptools/site-patch.py | 720 | def __boot():
import sys
import os
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys,'path_importer_cache',{})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
#print "searching",stdpath,sys.path
for item in stdpath:
if item==mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
import imp # Avoid import loop in Python >= 3.3
stream, path, descr = imp.find_module('site',[item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site',stream,path,descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
#print "loaded", __file__
known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys,'__egginsert',0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d, nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p, np = makepath(item)
if np==nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__=='site':
__boot()
del __boot
|
Aravinthu/odoo | refs/heads/master | doc/_extensions/autojsdoc/__main__.py | 2 | # -*- coding: utf-8 -*-
import cgitb
import fnmatch
import io
import logging
import click
import pyjsdoc
import pyjsparser
import sys
from .parser.parser import ModuleMatcher
from .parser.visitor import Visitor, SKIP
from . import jsdoc
class Printer(Visitor):
def __init__(self, level=0):
super(Printer, self).__init__()
self.level = level
def _print(self, text):
print ' ' * self.level, text
def enter_generic(self, node):
self._print(node['type'])
self.level += 1
def exit_generic(self, node):
self.level -= 1
def enter_Identifier(self, node):
self._print(node['name'])
return SKIP
def enter_Literal(self, node):
self._print(node['value'])
return SKIP
def enter_BinaryExpression(self, node):
self._print(node['operator'])
self.level += 1
def visit_files(files, visitor, ctx):
for name in files:
with io.open(name) as f:
ctx.logger.info("%s", name)
try:
yield visitor().visit(pyjsparser.parse(f.read()))
except Exception as e:
if ctx.logger.isEnabledFor(logging.DEBUG):
ctx.logger.exception("while visiting %s", name)
else:
ctx.logger.error("%s while visiting %s", e, name)
# bunch of modules various bits depend on which are not statically defined
# (or are outside the scope of the system)
ABSTRACT_MODULES = [
jsdoc.ModuleDoc({
'module': 'web.web_client',
'dependency': {'web.AbstractWebClient'},
'exports': jsdoc.NSDoc({
'name': 'web_client',
'doc': 'instance of AbstractWebClient',
}),
}),
jsdoc.ModuleDoc({
'module': 'web.Tour',
'dependency': {'web_tour.TourManager'},
'exports': jsdoc.NSDoc({
'name': 'Tour',
'doc': 'maybe tourmanager instance?',
}),
}),
# OH FOR FUCK'S SAKE
jsdoc.ModuleDoc({
'module': 'summernote/summernote',
'exports': jsdoc.NSDoc({'doc': "totally real summernote"}),
})
]
@click.group(context_settings={'help_option_names': ['-h', '--help']})
@click.option('-v', '--verbose', count=True)
@click.option('-q', '--quiet', count=True)
@click.pass_context
def autojsdoc(ctx, verbose, quiet):
logging.basicConfig(
level=logging.INFO + (quiet - verbose) * 10,
format="[%(levelname)s %(created)f] %(message)s",
)
ctx.logger = logging.getLogger('autojsdoc')
ctx.visitor = None
ctx.files = []
ctx.kw = {}
@autojsdoc.command()
@click.argument('files', type=click.Path(exists=True), nargs=-1)
@click.pass_context
def ast(ctx, files):
""" Prints a structure tree of the provided files
"""
if not files:
print(ctx.get_help())
visit_files(files, lambda: Printer(level=1), ctx.parent)
@autojsdoc.command()
@click.option('-m', '--module', multiple=True, help="Only shows dependencies matching any of the patterns")
@click.argument('files', type=click.Path(exists=True), nargs=-1)
@click.pass_context
def dependencies(ctx, module, files):
""" Prints a dot file of all modules to stdout
"""
if not files:
print(ctx.get_help())
byname = {
mod.name: mod.dependencies
for mod in ABSTRACT_MODULES
}
for modules in visit_files(files, ModuleMatcher, ctx.parent):
for mod in modules:
byname[mod.name] = mod.dependencies
print('digraph dependencies {')
todo = set()
# if module filters, roots are only matching modules
if module:
for f in module:
todo.update(fnmatch.filter(byname.keys(), f))
for m in todo:
# set a different box for selected roots
print(' "%s" [color=orangered]' % m)
else:
# otherwise check all modules
todo.update(byname)
done = set()
while todo:
node = todo.pop()
if node in done:
continue
done.add(node)
deps = byname[node]
todo.update(deps - done)
for dep in deps:
print(' "%s" -> "%s";' % (node, dep))
print('}')
try:
autojsdoc.main(prog_name='autojsdoc')
except Exception:
print(cgitb.text(sys.exc_info()))
|
jhaux/tensorflow | refs/heads/master | tensorflow/contrib/distributions/python/kernel_tests/geometric_test.py | 23 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Geometric distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops import geometric
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
# In all tests that follow, we use scipy.stats.geom, which
# represents the "Shifted" Geometric distribution. Hence, loc=-1 is passed
# in to each scipy function for testing.
class GeometricTest(test.TestCase):
def testGeometricShape(self):
with self.test_session():
probs = constant_op.constant([.1] * 5)
geom = geometric.Geometric(probs=probs)
self.assertEqual([5,], geom.batch_shape_tensor().eval())
self.assertAllEqual([], geom.event_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([5]), geom.batch_shape)
self.assertEqual(tensor_shape.TensorShape([]), geom.event_shape)
def testInvalidP(self):
invalid_ps = [-.01, -0.01, -2.]
with self.test_session():
with self.assertRaisesOpError("Condition x >= 0"):
geom = geometric.Geometric(probs=invalid_ps)
geom.probs.eval()
invalid_ps = [1.1, 3., 5.]
with self.test_session():
with self.assertRaisesOpError("Condition x <= y"):
geom = geometric.Geometric(probs=invalid_ps)
geom.probs.eval()
def testGeomLogPmf(self):
with self.test_session():
batch_size = 6
probs = constant_op.constant([.2] * batch_size)
probs_v = .2
x = np.array([2., 3., 4., 5., 6., 7.], dtype=np.float32)
geom = geometric.Geometric(probs=probs)
expected_log_prob = stats.geom.logpmf(x, probs_v, loc=-1)
log_prob = geom.log_prob(x)
self.assertEqual([6,], log_prob.get_shape())
self.assertAllClose(expected_log_prob, log_prob.eval())
pmf = geom.prob(x)
self.assertEqual([6,], pmf.get_shape())
self.assertAllClose(np.exp(expected_log_prob), pmf.eval())
def testGeometricLogPmf_validate_args(self):
with self.test_session():
batch_size = 6
probs = constant_op.constant([.9] * batch_size)
x = array_ops.placeholder(dtypes.float32, shape=[6])
feed_dict = {x: [2.5, 3.2, 4.3, 5.1, 6., 7.]}
geom = geometric.Geometric(probs=probs)
with self.assertRaisesOpError("Condition x == y"):
log_prob = geom.log_prob(x)
log_prob.eval(feed_dict=feed_dict)
with self.assertRaisesOpError("Condition x >= 0"):
log_prob = geom.log_prob(np.array([-1.], dtype=np.float32))
log_prob.eval()
geom = geometric.Geometric(probs=probs, validate_args=False)
log_prob = geom.log_prob(x)
self.assertEqual([6,], log_prob.get_shape())
pmf = geom.prob(x)
self.assertEqual([6,], pmf.get_shape())
def testGeometricLogPmfMultidimensional(self):
with self.test_session():
batch_size = 6
probs = constant_op.constant([[.2, .3, .5]] * batch_size)
probs_v = np.array([.2, .3, .5])
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
geom = geometric.Geometric(probs=probs)
expected_log_prob = stats.geom.logpmf(x, probs_v, loc=-1)
log_prob = geom.log_prob(x)
log_prob_values = log_prob.eval()
self.assertEqual([6, 3], log_prob.get_shape())
self.assertAllClose(expected_log_prob, log_prob_values)
pmf = geom.prob(x)
pmf_values = pmf.eval()
self.assertEqual([6, 3], pmf.get_shape())
self.assertAllClose(np.exp(expected_log_prob), pmf_values)
def testGeometricCDF(self):
with self.test_session():
batch_size = 6
probs = constant_op.constant([[.2, .4, .5]] * batch_size)
probs_v = np.array([.2, .4, .5])
x = np.array([[2., 3., 4., 5.5, 6., 7.]], dtype=np.float32).T
geom = geometric.Geometric(probs=probs)
expected_cdf = stats.geom.cdf(x, probs_v, loc=-1)
cdf = geom.cdf(x)
self.assertEqual([6, 3], cdf.get_shape())
self.assertAllClose(expected_cdf, cdf.eval())
def testGeometricEntropy(self):
with self.test_session():
probs_v = np.array([.1, .3, .25], dtype=np.float32)
geom = geometric.Geometric(probs=probs_v)
expected_entropy = stats.geom.entropy(probs_v, loc=-1)
self.assertEqual([3], geom.entropy().get_shape())
self.assertAllClose(expected_entropy, geom.entropy().eval())
def testGeometricMean(self):
with self.test_session():
probs_v = np.array([.1, .3, .25])
geom = geometric.Geometric(probs=probs_v)
expected_means = stats.geom.mean(probs_v, loc=-1)
self.assertEqual([3], geom.mean().get_shape())
self.assertAllClose(expected_means, geom.mean().eval())
def testGeometricVariance(self):
with self.test_session():
probs_v = np.array([.1, .3, .25])
geom = geometric.Geometric(probs=probs_v)
expected_vars = stats.geom.var(probs_v, loc=-1)
self.assertEqual([3], geom.variance().get_shape())
self.assertAllClose(expected_vars, geom.variance().eval())
def testGeometricStddev(self):
with self.test_session():
probs_v = np.array([.1, .3, .25])
geom = geometric.Geometric(probs=probs_v)
expected_stddevs = stats.geom.std(probs_v, loc=-1)
self.assertEqual([3], geom.stddev().get_shape())
self.assertAllClose(geom.stddev().eval(), expected_stddevs)
def testGeometricMode(self):
with self.test_session():
probs_v = np.array([.1, .3, .25])
geom = geometric.Geometric(probs=probs_v)
self.assertEqual([3,], geom.mode().get_shape())
self.assertAllClose([0.] * 3, geom.mode().eval())
def testGeometricSample(self):
with self.test_session():
probs_v = [.3, .9]
probs = constant_op.constant(probs_v)
n = constant_op.constant(100000)
geom = geometric.Geometric(probs=probs)
samples = geom.sample(n, seed=12345)
self.assertEqual([100000, 2], samples.get_shape())
sample_values = samples.eval()
self.assertFalse(np.any(sample_values < 0.0))
for i in range(2):
self.assertAllClose(sample_values[:, i].mean(),
stats.geom.mean(probs_v[i], loc=-1),
rtol=.02)
self.assertAllClose(sample_values[:, i].var(),
stats.geom.var(probs_v[i], loc=-1),
rtol=.02)
def testGeometricSampleMultiDimensional(self):
with self.test_session():
batch_size = 2
probs_v = [.3, .9]
probs = constant_op.constant([probs_v] * batch_size)
geom = geometric.Geometric(probs=probs)
n = 400000
samples = geom.sample(n, seed=12345)
self.assertEqual([n, batch_size, 2], samples.get_shape())
sample_values = samples.eval()
self.assertFalse(np.any(sample_values < 0.0))
for i in range(2):
self.assertAllClose(sample_values[:, 0, i].mean(),
stats.geom.mean(probs_v[i], loc=-1),
rtol=.02)
self.assertAllClose(sample_values[:, 0, i].var(),
stats.geom.var(probs_v[i], loc=-1),
rtol=.02)
self.assertAllClose(sample_values[:, 1, i].mean(),
stats.geom.mean(probs_v[i], loc=-1),
rtol=.02)
self.assertAllClose(sample_values[:, 1, i].var(),
stats.geom.var(probs_v[i], loc=-1),
rtol=.02)
def testGeometricAtBoundary(self):
with self.test_session():
geom = geometric.Geometric(probs=1.)
x = np.array([0., 2., 3., 4., 5., 6., 7.], dtype=np.float32)
expected_log_prob = stats.geom.logpmf(x, [1.], loc=-1)
# Scipy incorrectly returns nan.
expected_log_prob[np.isnan(expected_log_prob)] = 0.
log_prob = geom.log_prob(x)
self.assertEqual([7,], log_prob.get_shape())
self.assertAllClose(expected_log_prob, log_prob.eval())
pmf = geom.prob(x)
self.assertEqual([7,], pmf.get_shape())
self.assertAllClose(np.exp(expected_log_prob), pmf.eval())
expected_log_cdf = stats.geom.logcdf(x, 1., loc=-1)
log_cdf = geom.log_cdf(x)
self.assertEqual([7,], log_cdf.get_shape())
self.assertAllClose(expected_log_cdf, log_cdf.eval())
cdf = geom.cdf(x)
self.assertEqual([7,], cdf.get_shape())
self.assertAllClose(np.exp(expected_log_cdf), cdf.eval())
expected_mean = stats.geom.mean(1., loc=-1)
self.assertEqual([], geom.mean().get_shape())
self.assertAllClose(expected_mean, geom.mean().eval())
expected_variance = stats.geom.var(1., loc=-1)
self.assertEqual([], geom.variance().get_shape())
self.assertAllClose(expected_variance, geom.variance().eval())
with self.assertRaisesOpError("Entropy is undefined"):
geom.entropy().eval()
if __name__ == "__main__":
test.main()
|
jrbadiabo/Coursera-Stanford-ML-Class | refs/heads/master | Python_Version/Ex7.K-Means_PCA_-_Clustering/runkMeans.py | 2 | from computeCentroids import computeCentroids
from plotProgresskMeans import plotProgresskMeans
from findClosestCentroids import findClosestCentroids
import matplotlib.pyplot as plt
import numpy as np
import itertools
def runkMeans(X, initial_centroids, max_iters, plot_progress=False):
"""runs the K-Means algorithm on data matrix X, where each
row of X is a single example. It uses initial_centroids used as the
initial centroids. max_iters specifies the total number of interactions
of K-Means to execute. plot_progress is a true/false flag that
indicates if the function should also plot its progress as the
learning happens. This is set to false by default. runkMeans returns
centroids, a Kxn matrix of the computed centroids and idx, a m x 1
vector of centroid assignments (i.e. each entry in range [1..K])
"""
# Plot the data if we are plotting progress
if plot_progress:
plt.figure()
# Initialize values
m, n = X.shape
K = len(initial_centroids)
centroids = initial_centroids
previous_centroids = centroids
idx = np.zeros(m)
c = itertools.cycle('012')
rgb = np.eye(3)
# Run K-Means
for i in range(max_iters):
# Output progress
print 'K-Means iteration %d/%d...' % (i, max_iters)
# For each example in X, assign it to the closest centroid
_, idx = findClosestCentroids(X, centroids)
# Optionally, plot progress here
if plot_progress:
color = rgb[int(next(c))]
plotProgresskMeans(X, np.array(centroids),
np.array(previous_centroids), idx, K, i, color)
previous_centroids = centroids
# raw_input("Press Enter to continue...")
# Given the memberships, compute new centroids
centroids = computeCentroids(X, idx, K)
# Hold off if we are plotting progress
if plot_progress:
pass
# hold off
return centroids, idx
|
osborne6/luminotes | refs/heads/master | controller/Notebooks.py | 1 | import re
import cgi
import cherrypy
from datetime import datetime
from Expose import expose
from Validate import validate, Valid_string, Validation_error, Valid_bool, Valid_int
from Database import Valid_id, Valid_revision, end_transaction
from Users import grab_user_id, Access_error
from Expire import strongly_expire, weakly_expire
from Html_nuker import Html_nuker
from Html_differ import Html_differ
from Files import Upload_file
from model.Notebook import Notebook
from model.Note import Note
from model.Invite import Invite
from model.User import User
from model.User_revision import User_revision
from model.File import File
from model.Tag import Tag
from view.Main_page import Main_page
from view.Json import Json
from view.Note_tree_area import Note_tree_area
from view.Notebook_rss import Notebook_rss
from view.Updates_rss import Updates_rss
from view.Update_link_page import Update_link_page
class Import_error( Exception ):
def __init__( self, message = None ):
if message is None:
message = u"An error occurred when trying to import your file. Please try a different file, or contact support for help."
Exception.__init__( self, message )
self.__message = message
def to_dict( self ):
return dict(
error = self.__message
)
class Notebooks( object ):
WHITESPACE_PATTERN = re.compile( u"\s+" )
LINK_PATTERN = re.compile( u'<a\s+((?:[^>]+\s)?href="([^"]+)"(?:\s+target="([^"]*)")?[^>]*)>(<img [^>]+>)?([^<]*)</a>', re.IGNORECASE )
FILE_PATTERN = re.compile( u'/files/' )
NEW_FILE_PATTERN = re.compile( u'/files/new' )
EXPORT_FORMAT_PATTERN = re.compile( u"^[a-zA-Z0-9_]+$" )
"""
Controller for dealing with notebooks and their notes, corresponding to the "/notebooks" URL.
"""
def __init__( self, database, users, files, https_url ):
"""
Create a new Notebooks object.
@type database: controller.Database
@param database: database that notebooks are stored in
@type users: controller.Users
@param users: controller for all users, used here for updating storage utilization
@type files: controller.Files
@param files: controller for all uploaded files, used here for deleting files that are no longer
referenced within saved notes
@type https_url: unicode
@param https_url: base URL to use for SSL http requests, or an empty string
@return: newly constructed Notebooks
"""
self.__database = database
self.__users = users
self.__files = files
self.__https_url = https_url
@expose( view = Main_page, rss = Notebook_rss )
@strongly_expire
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
note_id = Valid_id(),
parent_id = Valid_id(),
revision = Valid_revision(),
previous_revision = Valid_revision( none_okay = True ),
rename = Valid_bool(),
deleted_id = Valid_id(),
preview = Valid_string(),
user_id = Valid_id( none_okay = True ),
)
def default( self, notebook_id, note_id = None, parent_id = None, revision = None,
previous_revision = None, rename = False, deleted_id = None, preview = None,
user_id = None ):
"""
Provide the information necessary to display the page for a particular notebook. If a
particular note id is given without a revision, then the most recent version of that note is
displayed.
@type notebook_id: unicode
@param notebook_id: id of the notebook to display
@type note_id: unicode or NoneType
@param note_id: id of single note in this notebook to display (optional)
@type parent_id: unicode or NoneType
@param parent_id: id of parent notebook to this notebook (optional)
@type revision: unicode or NoneType
@param revision: revision timestamp of the provided note (optional)
@type previous_revision: unicode or NoneType
@param previous_revision: older revision timestamp to diff with the given revision (optional)
@type rename: bool or NoneType
@param rename: whether this is a new notebook and should be renamed (optional, defaults to False)
@type deleted_id: unicode or NoneType
@param deleted_id: id of the notebook that was just deleted, if any (optional)
@type preview: unicode
@param preview: type of access with which to preview this notebook, either "collaborator",
"viewer", "owner", or "default" (optional, defaults to "default"). access must
be equal to or lower than user's own access level to this notebook
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any)
@rtype: unicode
@return: rendered HTML page
"""
result = self.__users.current( user_id )
if preview == u"collaborator":
read_write = True
owner = False
result[ u"notebooks" ] = [
notebook for notebook in result[ "notebooks" ] if notebook.object_id == notebook_id
]
if len( result[ u"notebooks" ] ) == 1:
result[ u"notebooks" ][ 0 ].owner = False
elif preview == u"viewer":
read_write = False
owner = False
result[ u"notebooks" ] = [
notebook for notebook in result[ "notebooks" ] if notebook.object_id == notebook_id
]
if len( result[ u"notebooks" ] ) == 1:
result[ u"notebooks" ][ 0 ].read_write = Notebook.READ_ONLY
result[ u"notebooks" ][ 0 ].owner = False
elif preview in ( u"owner", u"default", None ):
read_write = True
owner = True
else:
raise Access_error()
result.update( self.contents( notebook_id, note_id, revision, previous_revision, read_write, owner, user_id ) )
result[ "parent_id" ] = parent_id
if revision:
result[ "note_read_write" ] = False
notebook = result[ u"notebook" ]
# if this is a forum thread notebook, redirect to the forum thread page
forum_tags = [ tag for tag in notebook.tags if tag.name == u"forum" ]
if forum_tags:
forum_name = forum_tags[ 0 ].value
if forum_name == "blog":
redirect = u"/blog/%s" % notebook.friendly_id
else:
redirect = u"/forums/%s/%s" % ( forum_name, notebook_id )
if note_id:
redirect += u"?note_id=%s" % note_id
return dict(
redirect = redirect,
)
if notebook.name != u"Luminotes":
result[ "recent_notes" ] = self.__database.select_many( Note, notebook.sql_load_notes_in_update_order( start = 0, count = 10 ) )
# if the user doesn't have any storage bytes yet, they're a new user, so see what type of
# conversion this is (demo or signup)
if result[ "user" ].username != u"anonymous" and result[ "user" ].storage_bytes == 0:
if u"this is a demo" in [ note.title for note in result[ "startup_notes" ] ]:
result[ "conversion" ] = u"demo"
else:
result[ "conversion" ] = u"signup"
result[ "rename" ] = rename
result[ "deleted_id" ] = deleted_id
return result
def contents( self, notebook_id, note_id = None, revision = None, previous_revision = None,
read_write = True, owner = True, user_id = None ):
"""
Return information about the requested notebook, including its startup notes. Optionally include
a single requested note as well.
@type notebook_id: unicode
@param notebook_id: id of notebook to return
@type note_id: unicode or NoneType
@param note_id: id of single note in this notebook to return (optional)
@type revision: unicode or NoneType
@param revision: revision timestamp of the provided note (optional)
@type previous_revision: unicode or NoneType
@param previous_revision: older revision timestamp to diff with the given revision (optional)
@type read_write: bool or NoneType
@param read_write: whether the notebook should be returned as read-write (optional, defaults to True).
this can only lower access, not elevate it
@type owner: bool or NoneType
@param owner: whether the notebook should be returned as owner-level access (optional, defaults to True).
this can only lower access, not elevate it
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any)
@rtype: dict
@return: {
'notebook': notebook,
'startup_notes': notelist,
'total_notes_count': notecount,
'notes': notelist,
'invites': invitelist
}
@raise Access_error: the current user doesn't have access to the given notebook or note
@raise Validation_error: one of the arguments is invalid
"""
notebook = self.__users.load_notebook( user_id, notebook_id )
anonymous = self.__database.select_one( User, User.sql_load_by_username( u"anonymous" ), use_cache = True )
if notebook is None or anonymous is None:
raise Access_error()
if read_write is False:
notebook.read_write = Notebook.READ_ONLY
if owner is False:
notebook.owner = False
if note_id:
note = self.__database.load( Note, note_id, revision )
if note and note.notebook_id != notebook_id:
if note.notebook_id == notebook.trash_id:
note = None
else:
raise Access_error()
# if two revisions were provided, then make the returned note's contents into a diff
if note and revision and previous_revision:
previous_note = self.__database.load( Note, note_id, previous_revision )
if previous_note and previous_note.contents:
note.replace_contents( Html_differ().diff( previous_note.contents, note.contents ) )
else:
note = None
notebook.tags = \
self.__database.select_many( Tag, notebook.sql_load_tags( user_id ) ) + \
self.__database.select_many( Tag, notebook.sql_load_tags( anonymous.object_id ) )
startup_notes = self.__database.select_many( Note, notebook.sql_load_startup_notes() )
total_notes_count = self.__database.select_one( int, notebook.sql_count_notes(), use_cache = True )
if self.__users.load_notebook( user_id, notebook_id, owner = True ):
invites = self.__database.select_many( Invite, Invite.sql_load_notebook_invites( notebook_id ) )
else:
invites = []
return dict(
notebook = notebook,
startup_notes = startup_notes,
total_notes_count = total_notes_count,
notes = note and [ note ] or [],
invites = invites or [],
)
@expose( view = None, rss = Updates_rss )
@strongly_expire
@end_transaction
@validate(
notebook_id = Valid_id(),
notebook_name = Valid_string(),
)
def updates( self, notebook_id, notebook_name ):
"""
Provide the information necessary to display an updated notes RSS feed for the given notebook.
This method does not require any sort of login.
@type notebook_id: unicode
@param notebook_id: id of the notebook to provide updates for
@type notebook_name: unicode
@param notebook_name: name of the notebook to include in the RSS feed
@rtype: unicode
@return: rendered RSS feed
"""
notebook = self.__database.load( Notebook, notebook_id )
if not notebook:
return dict(
recent_notes = [],
notebook_id = notebook_id,
notebook_name = notebook_name,
https_url = self.__https_url,
)
recent_notes = self.__database.select_many( Note, notebook.sql_load_notes_in_update_order( start = 0, count = 10 ) )
return dict(
recent_notes = [ ( note.object_id, note.revision ) for note in recent_notes ],
notebook_id = notebook_id,
notebook_name = notebook_name,
https_url = self.__https_url,
)
@expose( view = Update_link_page )
@strongly_expire
@end_transaction
@validate(
notebook_id = Valid_id(),
notebook_name = Valid_string(),
note_id = Valid_id(),
revision = Valid_revision(),
)
def get_update_link( self, notebook_id, notebook_name, note_id, revision ):
"""
Provide the information necessary to display a link to an updated note. This method does not
require any sort of login.
@type notebook_id: unicode
@param notebook_id: id of the notebook the note is in
@type notebook_name: unicode
@param notebook_name: name of the notebook
@type note_id: unicode
@param note_id: id of the note to link to
@type revision: unicode
@param revision: ignored; present so RSS feed readers distinguish between different revisions
@rtype: unicode
@return: rendered HTML page
"""
return dict(
notebook_id = notebook_id,
notebook_name = notebook_name,
note_id = note_id,
https_url = self.__https_url,
)
@expose( view = Json )
@strongly_expire
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
note_id = Valid_id(),
revision = Valid_revision(),
previous_revision = Valid_revision( none_okay = True ),
summarize = Valid_bool(),
user_id = Valid_id( none_okay = True ),
)
def load_note( self, notebook_id, note_id, revision = None, previous_revision = None, summarize = False, user_id = None ):
"""
Return the information on a particular note by its id.
@type notebook_id: unicode
@param notebook_id: id of notebook the note is in
@type note_id: unicode
@param note_id: id of note to return
@type revision: unicode or NoneType
@param revision: revision timestamp of the note (optional)
@type previous_revision: unicode or NoneType
@param previous_revision: older revision timestamp to diff with the given revision (optional)
@type summarize: bool or NoneType
@param summarize: True to return a summary of the note's contents, False to return full text
(optional, defaults to False)
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any), determined by @grab_user_id
@rtype: json dict
@return: { 'note': notedict or None }
@raise Access_error: the current user doesn't have access to the given notebook or note
@raise Validation_error: one of the arguments is invalid
"""
notebook = self.__users.load_notebook( user_id, notebook_id )
if not notebook:
raise Access_error()
note = self.__database.load( Note, note_id, revision )
# if the note has no notebook, it has been deleted "forever"
if note and note.notebook_id is None:
return dict(
note = None,
)
if note and note.notebook_id != notebook_id:
if note.notebook_id == notebook.trash_id:
if revision:
return dict(
note = summarize and self.summarize_note( note ) or note,
)
return dict(
note = None,
note_id_in_trash = note.object_id,
)
raise Access_error()
if note and revision and previous_revision:
previous_note = self.__database.load( Note, note_id, previous_revision )
if previous_note and previous_note.contents:
note.replace_contents( Html_differ().diff( previous_note.contents, note.contents ) )
return dict(
note = summarize and self.summarize_note( note ) or note,
)
@expose( view = Json )
@strongly_expire
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
note_title = Valid_string( min = 1, max = 500 ),
summarize = Valid_bool(),
user_id = Valid_id( none_okay = True ),
)
def load_note_by_title( self, notebook_id, note_title, summarize = False, user_id = None ):
"""
Return the information on a particular note by its title. The lookup by title is performed
case-insensitively.
@type notebook_id: unicode
@param notebook_id: id of notebook the note is in
@type note_title: unicode
@param note_title: title of the note to return
@type summarize: bool or NoneType
@param summarize: True to return a summary of the note's contents, False to return full text
(optional, defaults to False)
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any), determined by @grab_user_id
@rtype: json dict
@return: { 'note': notedict or None }
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid
"""
notebook = self.__users.load_notebook( user_id, notebook_id )
if not notebook:
raise Access_error()
note = self.__database.select_one( Note, notebook.sql_load_note_by_title( note_title ) )
return dict(
note = summarize and self.summarize_note( note ) or note,
)
def summarize_note( self, note, max_summary_length = None, word_count = None, highlight_text = None ):
"""
Create a truncated, HTML-free note summary for the given note, and then return the note with
its summary set.
@type note: model.Note or NoneType
@param note: note to summarize, or None
@type max_summary_length: int or NoneType
@param max_summary_length: the length to which the summary is truncated (optional, defaults
to a reasonable length)
@type word_count: int or NoneType
@param word_count: the number of words to which the summary is truncated (optional, defaults
to a reasonable number of words)
@type highlight_text: unicode or NoneType
@param highlight_text: text to emphasize within the summary (optional, defaults to no emphasis)
@rtype: model.Note or NoneType
@return: note with its summary member set, or None if no note was provided
"""
DEFAULT_MAX_SUMMARY_LENGTH = 40
DEFAULT_WORD_COUNT = 10
if not max_summary_length:
max_summary_length = DEFAULT_MAX_SUMMARY_LENGTH
if not word_count:
word_count = DEFAULT_WORD_COUNT
if note is None:
return None
if note.contents is None:
return note
# remove all HTML from the contents and also remove the title
summary = Html_nuker().nuke( note.contents )
if note.title and summary.startswith( note.title ):
summary = summary[ len( note.title ) : ]
# split the summary on whitespace
words = self.WHITESPACE_PATTERN.split( summary )
def first_words( words, word_count ):
return u" ".join( words[ : word_count ] )
# find a summary less than MAX_SUMMARY_LENGTH and, if possible, truncated on a word boundary
truncated = False
summary = first_words( words, word_count )
while len( summary ) > max_summary_length:
word_count -= 1
summary = first_words( words, word_count )
# if the first word is just ridiculously long, truncate it without finding a word boundary
if word_count == 1:
summary = summary[ : max_summary_length ]
truncated = True
break
if truncated or word_count < len( words ):
summary += " ..."
if highlight_text:
summary = summary.replace( highlight_text, "<b>%s</b>" % highlight_text )
note.summary = summary
return note
@expose( view = Json )
@strongly_expire
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
note_title = Valid_string( min = 1, max = 500 ),
user_id = Valid_id( none_okay = True ),
)
def lookup_note_id( self, notebook_id, note_title, user_id ):
"""
Return a note's id by looking up its title. The lookup by title is performed
case-insensitively.
@type notebook_id: unicode
@param notebook_id: id of notebook the note is in
@type note_title: unicode
@param note_title: title of the note id to return
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any), determined by @grab_user_id
@rtype: json dict
@return: { 'note_id': noteid or None }
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid
"""
notebook = self.__users.load_notebook( user_id, notebook_id )
if not notebook:
raise Access_error()
note = self.__database.select_one( Note, notebook.sql_load_note_by_title( note_title ) )
return dict(
note_id = note and note.object_id or None,
)
@expose( view = Json )
@strongly_expire
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
note_id = Valid_id(),
user_id = Valid_id( none_okay = True ),
)
def load_note_revisions( self, notebook_id, note_id, user_id = None ):
"""
Return the full list of revision timestamps for this note in chronological order.
@type notebook_id: unicode
@param notebook_id: id of notebook the note is in
@type note_id: unicode
@param note_id: id of note in question
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any), determined by @grab_user_id
@rtype: json dict
@return: { 'revisions': userrevisionlist or None }
@raise Access_error: the current user doesn't have access to the given notebook or note
@raise Validation_error: one of the arguments is invalid
"""
notebook = self.__users.load_notebook( user_id, notebook_id )
if not notebook:
raise Access_error()
note = self.__database.load( Note, note_id )
if note:
if note and note.notebook_id is None:
return dict(
revisions = None,
)
if note.notebook_id != notebook_id:
if note.notebook_id == notebook.trash_id:
return dict(
revisions = None,
)
raise Access_error()
revisions = self.__database.select_many( User_revision, note.sql_load_revisions() )
else:
revisions = None
return dict(
revisions = revisions,
)
@expose( view = Json )
@strongly_expire
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
note_id = Valid_id(),
user_id = Valid_id( none_okay = True ),
)
def load_note_links( self, notebook_id, note_id, user_id = None ):
"""
Return a list of HTTP links found within the contents of the given note.
@type notebook_id: unicode
@param notebook_id: id of notebook the note is in
@type note_id: unicode
@param note_id: id of note in question
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any), determined by @grab_user_id
@rtype: json dict
@return: { 'tree_html': html_fragment }
@raise Access_error: the current user doesn't have access to the given notebook or note
@raise Validation_error: one of the arguments is invalid
"""
notebook = self.__users.load_notebook( user_id, notebook_id )
if not notebook:
raise Access_error()
note = self.__database.load( Note, note_id )
if note is None or note.notebook_id not in ( notebook_id, notebook.trash_id ):
raise Access_error()
items = []
for match in self.LINK_PATTERN.finditer( note.contents ):
( attributes, href, target, embedded_image, title ) = match.groups()
# if it has a link target, it's a link to an external web site
if target:
items.append( Note_tree_area.make_item( title, attributes, u"note_tree_external_link" ) )
continue
# if it has '/files/' in its path, it's an uploaded file link
if self.FILE_PATTERN.search( href ):
if not self.NEW_FILE_PATTERN.search( href ): # ignore files that haven't been uploaded yet
if embedded_image:
title = u"embedded image"
items.append( Note_tree_area.make_item( title, attributes, u"note_tree_file_link", target = u"_new" ) )
continue
# if it has a note_id, load that child note and see whether it has any children of its own
child_note_ids = cgi.parse_qs( href.split( '?' )[ -1 ] ).get( u"note_id" )
if child_note_ids:
child_note_id = child_note_ids[ 0 ]
child_note = self.__database.load( Note, child_note_id )
if child_note and child_note.contents and self.LINK_PATTERN.search( child_note.contents ):
items.append( Note_tree_area.make_item( title, attributes, u"note_tree_link", has_children = True ) )
continue
# otherwise, it's childless
items.append( Note_tree_area.make_item( title, attributes, u"note_tree_link", has_children = False ) )
return dict(
tree_html = unicode( Note_tree_area.make_tree( items ) ),
)
@expose( view = Json )
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
note_id = Valid_id(),
contents = Valid_string( min = 1, max = 50000, escape_html = False ),
startup = Valid_bool(),
previous_revision = Valid_revision( none_okay = True ),
position_after = Valid_id( none_okay = True ),
position_before = Valid_id( none_okay = True ),
user_id = Valid_id( none_okay = True ),
)
def save_note( self, notebook_id, note_id, contents, startup, previous_revision = None,
position_after = None, position_before = None, user_id = None ):
"""
Save a new revision of the given note. This function will work both for creating a new note and
for updating an existing note. If the note exists and the given contents are identical to the
existing contents for the given previous_revision, then no saving takes place and a new_revision
of None is returned. Otherwise this method returns the timestamp of the new revision.
@type notebook_id: unicode
@param notebook_id: id of notebook the note is in
@type note_id: unicode
@param note_id: id of note to save
@type contents: unicode
@param contents: new textual contents of the note, including its title
@type startup: bool
@param startup: whether the note should be displayed on startup
@type previous_revision: unicode or NoneType
@param previous_revision: previous known revision timestamp of the provided note, or None if
the note is new
@type position_after: unicode or NoneType
@param position_after: id of note to position the saved note after (optional)
@type position_before: unicode or NoneType
@param position_before: id of note to position the saved note before (optional)
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any), determined by @grab_user_id
@rtype: json dict
@return: {
'new_revision': User_revision of saved note, or None if nothing was saved
'previous_revision': User_revision immediately before new_revision, or None if the note is new
'storage_bytes': current storage usage by user
'rank': float rank of the saved note, or None
}
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid
"""
notebook = self.__users.load_notebook( user_id, notebook_id, read_write = True, note_id = note_id )
user = self.__database.load( User, user_id )
if not user or not notebook:
raise Access_error();
note = self.__database.load( Note, note_id )
# if the user has read-write access only to their own notes in this notebook, force the startup
# flag to be True for this note. also ignore note positioning parameters
if notebook.read_write == Notebook.READ_WRITE_FOR_OWN_NOTES:
startup = True
position_before = None
position_after = None
def update_rank( position_after, position_before ):
after_note = position_after and self.__database.load( Note, position_after ) or None
before_note = position_before and self.__database.load( Note, position_before ) or None
if after_note and before_note:
new_rank = float( after_note.rank ) + 1.0
# if necessary, increment the rank of all subsequent notes to make "room" for this note
if new_rank >= before_note.rank:
# clear the cache of before_note and all notes with subsequent rank
self.__database.uncache_many(
Note,
self.__database.select_many(
unicode,
notebook.sql_load_note_ids_starting_from_rank( before_note.rank )
)
)
self.__database.execute( notebook.sql_increment_rank( before_note.rank ), commit = False )
return new_rank
elif after_note:
return float( after_note.rank ) + 1.0
elif before_note:
return float( before_note.rank ) - 1.0
return 0.0
# check whether the provided note contents have been changed since the previous revision
def update_note( current_notebook, old_note, startup, user ):
# the note hasn't been changed, so bail without updating it
if not position_after and not position_before and startup == old_note.startup and \
contents.replace( u"\n", u"" ) == old_note.contents.replace( u"\n", "" ):
new_revision = None
# the note has changed, so update it
else:
note.contents = contents
note.startup = startup
if position_after or position_before:
note.rank = update_rank( position_after, position_before )
elif note.rank is None:
note.rank = self.__database.select_one( float, notebook.sql_highest_note_rank() ) + 1
note.user_id = user.object_id
new_revision = User_revision( note.revision, note.user_id, user.username )
self.__files.purge_unused( note )
return new_revision
# if the note is already in the given notebook, load it and update it
if note and note.notebook_id == notebook.object_id:
old_note = self.__database.load( Note, note_id, previous_revision )
previous_user = self.__database.load( User, note.user_id )
previous_revision = User_revision( note.revision, note.user_id, previous_user and previous_user.username or None )
new_revision = update_note( notebook, old_note, startup, user )
# the note is not already in the given notebook, so look for it in the trash
elif note and notebook.trash_id and note.notebook_id == notebook.trash_id:
old_note = self.__database.load( Note, note_id, previous_revision )
# undelete the note, putting it back in the given notebook
previous_user = self.__database.load( User, note.user_id )
previous_revision = User_revision( note.revision, note.user_id, previous_user and previous_user.username or None )
note.notebook_id = notebook.object_id
note.deleted_from_id = None
new_revision = update_note( notebook, old_note, startup, user )
# otherwise, create a new note
else:
if position_after or position_before:
rank = update_rank( position_after, position_before )
else:
rank = self.__database.select_one( float, notebook.sql_highest_note_rank() ) + 1
previous_revision = None
note = Note.create( note_id, contents, notebook_id = notebook.object_id, startup = startup, rank = rank, user_id = user_id )
new_revision = User_revision( note.revision, note.user_id, user.username )
if new_revision:
self.__database.save( note, commit = False )
user = self.__users.update_storage( user_id, commit = False )
self.__database.uncache_command( notebook.sql_count_notes() ) # cached note count is now invalid
self.__database.commit()
user.group_storage_bytes = self.__users.calculate_group_storage( user )
else:
user = None
if note.rank is None:
rank = None
else:
rank = float( note.rank )
return dict(
new_revision = new_revision,
previous_revision = previous_revision,
storage_bytes = user and user.storage_bytes or 0,
rank = rank,
)
@expose( view = Json )
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
note_id = Valid_id(),
revision = Valid_revision(),
user_id = Valid_id( none_okay = True ),
)
def revert_note( self, notebook_id, note_id, revision, user_id ):
"""
Revert the contents of a note to that of an earlier revision, thereby creating a new revision.
The timestamp of the new revision is returned.
@type notebook_id: unicode
@param notebook_id: id of notebook the note is in
@type note_id: unicode
@param note_id: id of note to revert
@type revision: unicode or NoneType
@param revision: revision timestamp to revert to for the provided note
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any), determined by @grab_user_id
@rtype: json dict
@return: {
'new_revision': User_revision of the reverted note
'previous_revision': User_revision immediately before new_revision
'storage_bytes': current storage usage by user,
}
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid
"""
notebook = self.__users.load_notebook( user_id, notebook_id, read_write = True )
user = self.__database.load( User, user_id )
if not user or not notebook:
raise Access_error()
note = self.__database.load( Note, note_id )
if not note:
raise Access_error()
if not self.__users.load_notebook( user_id, note.notebook_id, read_write = True, note_id = note.object_id ):
raise Access_error()
# check whether the provided note contents have been changed since the previous revision
def update_note( current_notebook, old_note, user ):
# if the revision to revert to is already the newest revision, bail without updating the note
if old_note.revision == note.revision:
new_revision = None
# otherwise, revert the note's contents to that of the older revision
else:
note.contents = old_note.contents
note.user_id = user.object_id
new_revision = User_revision( note.revision, note.user_id, user.username )
self.__files.purge_unused( note )
return new_revision
previous_user = self.__database.load( User, note.user_id )
previous_revision = User_revision( note.revision, note.user_id, previous_user and previous_user.username or None )
# if the note is already in the given notebook, load it and revert it
if note and note.notebook_id == notebook.object_id:
old_note = self.__database.load( Note, note_id, revision )
new_revision = update_note( notebook, old_note, user )
# the note is not already in the given notebook, so look for it in the trash
elif note and notebook.trash_id and note.notebook_id == notebook.trash_id:
old_note = self.__database.load( Note, note_id, revision )
# undelete the note, putting it back in the given notebook
note.notebook_id = notebook.object_id
note.deleted_from_id = None
new_revision = update_note( notebook, old_note, user )
# otherwise, the note doesn't exist
else:
raise Access_error()
if new_revision:
self.__database.save( note, commit = False )
user = self.__users.update_storage( user_id, commit = False )
self.__database.commit()
user.group_storage_bytes = self.__users.calculate_group_storage( user )
else:
user = None
return dict(
new_revision = new_revision,
previous_revision = previous_revision,
storage_bytes = user and user.storage_bytes or 0,
contents = note.contents,
)
@expose( view = Json )
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
note_id = Valid_id(),
user_id = Valid_id( none_okay = True ),
)
def delete_note( self, notebook_id, note_id, user_id ):
"""
Delete the given note from its notebook and move it to the notebook's trash. The note is added
as a startup note within the trash. If the given notebook is the trash and the given note is
already there, then it is deleted from the trash forever.
@type notebook_id: unicode
@param notebook_id: id of notebook the note is in
@type note_id: unicode
@param note_id: id of note to delete
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any), determined by @grab_user_id
@rtype: json dict
@return: { 'storage_bytes': current storage usage by user }
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid
"""
notebook = self.__users.load_notebook( user_id, notebook_id, read_write = True, note_id = note_id )
if not notebook:
raise Access_error()
note = self.__database.load( Note, note_id )
if note and note.notebook_id == notebook_id:
if notebook.trash_id:
note.deleted_from_id = notebook_id
note.notebook_id = notebook.trash_id
note.startup = True
else:
self.__files.purge_unused( note, purge_all_links = True )
note.notebook_id = None
note.user_id = user_id
self.__database.save( note, commit = False )
user = self.__users.update_storage( user_id, commit = False )
self.__database.uncache_command( notebook.sql_count_notes() ) # cached note count is now invalid
self.__database.commit()
user.group_storage_bytes = self.__users.calculate_group_storage( user )
return dict( storage_bytes = user.storage_bytes )
else:
return dict( storage_bytes = 0 )
@expose( view = Json )
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
note_id = Valid_id(),
user_id = Valid_id( none_okay = True ),
)
def undelete_note( self, notebook_id, note_id, user_id ):
"""
Undelete the given note from the trash, moving it back into its notebook. The note is added
as a startup note within its notebook.
@type notebook_id: unicode
@param notebook_id: id of notebook the note was in
@type note_id: unicode
@param note_id: id of note to undelete
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any), determined by @grab_user_id
@rtype: json dict
@return: { 'storage_bytes': current storage usage by user }
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid
"""
notebook = self.__users.load_notebook( user_id, notebook_id, read_write = True, note_id = note_id )
if not notebook:
raise Access_error()
note = self.__database.load( Note, note_id )
if note and notebook.trash_id:
# if the note isn't deleted, and it's already in this notebook, just return
if note.deleted_from_id is None and note.notebook_id == notebook_id:
return dict( storage_bytes = 0 )
# if the note was deleted from a different notebook than the notebook given, raise
if note.deleted_from_id != notebook_id:
raise Access_error()
note.notebook_id = note.deleted_from_id
note.deleted_from_id = None
note.startup = True
note.user_id = user_id
self.__database.save( note, commit = False )
user = self.__users.update_storage( user_id, commit = False )
self.__database.uncache_command( notebook.sql_count_notes() ) # cached note count is now invalid
self.__database.commit()
user.group_storage_bytes = self.__users.calculate_group_storage( user )
return dict( storage_bytes = user.storage_bytes )
else:
return dict( storage_bytes = 0 )
@expose( view = Json )
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
user_id = Valid_id( none_okay = True ),
)
def delete_all_notes( self, notebook_id, user_id ):
"""
Delete all notes from the given notebook and move them to the notebook's trash (if any). The
notes are added as startup notes within the trash. If the given notebook is the trash, then
all notes in the trash are deleted forever.
@type notebook_id: unicode
@param notebook_id: id of notebook the note is in
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any), determined by @grab_user_id
@rtype: json dict
@return: { 'storage_bytes': current storage usage by user }
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid
"""
notebook = self.__users.load_notebook( user_id, notebook_id, read_write = True )
if not notebook or notebook.read_write == Notebook.READ_WRITE_FOR_OWN_NOTES:
raise Access_error()
notes = self.__database.select_many( Note, notebook.sql_load_notes_in_update_order() )
for note in notes:
if notebook.trash_id:
note.deleted_from_id = notebook_id
note.notebook_id = notebook.trash_id
note.startup = True
else:
self.__files.purge_unused( note, purge_all_links = True )
note.notebook_id = None
note.user_id = user_id
self.__database.save( note, commit = False )
user = self.__users.update_storage( user_id, commit = False )
self.__database.uncache_command( notebook.sql_count_notes() ) # cached note count is now invalid
self.__database.commit()
user.group_storage_bytes = self.__users.calculate_group_storage( user )
return dict(
storage_bytes = user.storage_bytes,
)
@expose( view = Json )
@strongly_expire
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
search_text = unicode,
user_id = Valid_id( none_okay = True ),
)
def search_titles( self, notebook_id, search_text, user_id ):
"""
Search the note titles within the given notebook for the given search text, and return matching
notes. The search is case-insensitive. The returned notes include title summaries with the
search term highlighted and are ordered by descending revision timestamp.
@type notebook_id: unicode
@param notebook_id: id of notebook to search
@type search_text: unicode
@param search_text: search term
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any), determined by @grab_user_id
@rtype: json dict
@return: { 'notes': [ matching notes ] }
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid
@raise Search_error: the provided search_text is invalid
"""
notebook = self.__users.load_notebook( user_id, notebook_id )
if not notebook:
raise Access_error()
MAX_SEARCH_TEXT_LENGTH = 256
if len( search_text ) > MAX_SEARCH_TEXT_LENGTH:
raise Validation_error( u"search_text", None, unicode, message = u"is too long" )
if len( search_text ) == 0:
raise Validation_error( u"search_text", None, unicode, message = u"is missing" )
notes = self.__database.select_many( Note, Notebook.sql_search_titles( notebook_id, search_text ) )
for note in notes:
# do a case-insensitive replace to wrap the search term with bold
search_text_pattern = re.compile( u"(%s)" % re.escape( search_text ), re.I )
note.summary = search_text_pattern.sub( r"<b>\1</b>", note.summary )
return dict(
notes = notes,
)
@expose( view = Json )
@strongly_expire
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
search_text = unicode,
user_id = Valid_id( none_okay = True ),
)
def search( self, notebook_id, search_text, user_id ):
"""
Search the notes within all notebooks that the user has access to for the given search text.
Note that the search is case-insensitive, and all HTML tags are ignored. Notes with title
matches are generally ranked higher than matches that are only in the note contents. The
returned notes include content summaries with the search terms highlighted.
@type notebook_id: unicode
@param notebook_id: id of notebook to show first in search results
@type search_text: unicode
@param search_text: search term
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any), determined by @grab_user_id
@rtype: json dict
@return: { 'notes': [ matching notes ] }
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid
@raise Search_error: the provided search_text is invalid
"""
# if the anonymous user has access to the given notebook, then run the search as the anonymous
# user instead of the given user id
anonymous = self.__database.select_one( User, User.sql_load_by_username( u"anonymous" ), use_cache = True )
if not anonymous:
raise Access_error()
notebook = self.__users.load_notebook( anonymous.object_id, notebook_id )
if notebook:
user_id = anonymous.object_id
else:
notebook = self.__users.load_notebook( user_id, notebook_id )
if not notebook:
raise Access_error()
MAX_SEARCH_TEXT_LENGTH = 256
if len( search_text ) > MAX_SEARCH_TEXT_LENGTH:
raise Validation_error( u"search_text", None, unicode, message = u"is too long" )
if len( search_text ) == 0:
raise Validation_error( u"search_text", None, unicode, message = u"is missing" )
notes = self.__database.select_many( Note, Notebook.sql_search_notes( user_id, notebook_id, search_text, self.__database.backend ) )
# make a summary for each note that doesn't have one
notes = [
note.summary and note or
self.summarize_note( note, max_summary_length = 80, word_count = 30, highlight_text = search_text )
for note in notes
]
return dict(
notes = notes,
)
@expose()
@weakly_expire
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
format = Valid_string( min = 1, max = 100 ),
note_id = Valid_id( none_okay = True ),
user_id = Valid_id( none_okay = True ),
)
def export( self, notebook_id, format, note_id = None, user_id = None ):
"""
Download the entire contents of the given notebook as a stand-alone file.
@type notebook_id: unicode
@param notebook_id: id of notebook to export
@type format: unicode
@param format: string indicating the export plugin to use, currently one of: "html", "csv"
@type notebook_id: unicode
@param note_id: id of single note within the notebook to export (optional)
@type user_id: unicode
@param user_id: id of current logged-in user (if any), determined by @grab_user_id
@rtype: unicode or generator (for streaming files)
@return: exported file with appropriate headers to trigger a download
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid or the format is unknown
"""
if not self.EXPORT_FORMAT_PATTERN.search( format ):
raise Validation_error( u"format", format, Valid_string, message = u"is invalid" )
notebook = self.__users.load_notebook( user_id, notebook_id )
if not notebook:
raise Access_error()
if note_id:
note = self.__database.load( Note, note_id )
if not note:
raise Access_error()
notes = [ note ]
notebook = None
else:
startup_notes = self.__database.select_many( Note, notebook.sql_load_startup_notes() )
other_notes = self.__database.select_many( Note, notebook.sql_load_non_startup_notes() )
notes = startup_notes + other_notes
from plugins.Invoke import invoke
try:
return invoke(
plugin_type = u"export",
plugin_name = format,
database = self.__database,
notebook = notebook,
notes = notes,
response_headers = cherrypy.response.headerMap,
)
except ( ImportError, AttributeError ):
raise Validation_error( u"format", format, Valid_string, message = u"is unknown" )
@expose( view = Json )
@end_transaction
@grab_user_id
@validate(
user_id = Valid_id( none_okay = True ),
)
def create( self, user_id ):
"""
Create a new notebook and give it a default name.
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any)
@rtype dict
@return { 'redirect': new_notebook_url }
@raise Access_error: the current user doesn't have access to create a notebook
@raise Validation_error: one of the arguments is invalid
"""
if user_id is None:
raise Access_error()
user = self.__database.load( User, user_id )
notebook = self.__create_notebook( u"new notebook", user )
return dict(
redirect = u"/notebooks/%s?rename=true" % notebook.object_id,
)
def __create_notebook( self, name, user, commit = True ):
# create the notebook along with a trash
trash_id = self.__database.next_id( Notebook, commit = False )
trash = Notebook.create( trash_id, u"trash", user_id = user.object_id )
self.__database.save( trash, commit = False )
notebook_id = self.__database.next_id( Notebook, commit = False )
notebook = Notebook.create( notebook_id, name, trash_id, user_id = user.object_id )
self.__database.save( notebook, commit = False )
# record the fact that the user has access to their new notebook
rank = self.__database.select_one( float, user.sql_highest_notebook_rank() ) + 1
self.__database.execute( user.sql_save_notebook( notebook_id, read_write = True, owner = True, rank = rank ), commit = False )
self.__database.execute( user.sql_save_notebook( trash_id, read_write = True, owner = True ), commit = False )
if commit:
self.__database.commit()
return notebook
@expose( view = Json )
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
name = Valid_string( min = 1, max = 100 ),
user_id = Valid_id( none_okay = True ),
)
def rename( self, notebook_id, name, user_id ):
"""
Change the name of the given notebook.
@type notebook_id: unicode
@param notebook_id: id of notebook to rename
@type name: unicode
@param name: new name of the notebook
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any)
@rtype dict
@return {}
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid
"""
notebook = self.__users.load_notebook( user_id, notebook_id, read_write = True, owner = True )
# special case to allow the creator of a READ_WRITE_FOR_OWN_NOTES notebook to rename it
if notebook is None:
notebook = self.__users.load_notebook( user_id, notebook_id, read_write = True )
if not notebook or not ( notebook.read_write == Notebook.READ_WRITE_FOR_OWN_NOTES and
notebook.user_id == user_id ):
raise Access_error()
user = self.__database.load( User, user_id )
if not user or not notebook:
raise Access_error()
# prevent renaming of the trash notebook to anything
if notebook.name == u"trash":
raise Access_error()
# prevent just anyone from making official Luminotes notebooks
if name.startswith( u"Luminotes" ) and not notebook.name.startswith( u"Luminotes" ):
raise Access_error()
# prevent renaming of another notebook to "trash"
if name == u"trash":
raise Access_error()
notebook.name = name
notebook.user_id = user_id
self.__database.save( notebook, commit = False )
self.__database.commit()
return dict()
@expose( view = Json )
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
user_id = Valid_id( none_okay = True ),
)
def delete( self, notebook_id, user_id ):
"""
Delete the given notebook and redirect to a remaining read-write notebook. If there is none,
create one.
@type notebook_id: unicode
@param notebook_id: id of notebook to delete
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any)
@rtype dict
@return { 'redirect': remaining_notebook_url }
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid
"""
if user_id is None:
raise Access_error()
notebook = self.__users.load_notebook( user_id, notebook_id, read_write = True, owner = True )
user = self.__database.load( User, user_id )
if not user or not notebook:
raise Access_error()
# prevent deletion of a trash notebook directly
if notebook.name == u"trash":
raise Access_error()
notebook.deleted = True
notebook.user_id = user_id
self.__database.save( notebook, commit = False )
# redirect to a remaining undeleted read-write notebook, or if there isn't one, create an empty notebook
remaining_notebook = self.__database.select_one( Notebook, user.sql_load_notebooks(
parents_only = True, undeleted_only = True, read_write = True,
) )
if remaining_notebook is None:
remaining_notebook = self.__create_notebook( u"my notebook", user, commit = False )
self.__database.commit()
return dict(
redirect = u"/notebooks/%s?deleted_id=%s" % ( remaining_notebook.object_id, notebook.object_id ),
)
@expose( view = Json )
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
user_id = Valid_id( none_okay = True ),
)
def delete_forever( self, notebook_id, user_id ):
"""
Delete the given notebook permanently (by simply revoking the user's access to it).
@type notebook_id: unicode
@param notebook_id: id of notebook to delete
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any)
@rtype dict
@return: { 'storage_bytes': current storage usage by user }
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid
"""
if user_id is None:
raise Access_error()
notebook = self.__users.load_notebook( user_id, notebook_id, read_write = True, owner = True )
user = self.__database.load( User, user_id )
if not user or not notebook:
raise Access_error()
# prevent deletion of a trash notebook directly
if notebook.name == u"trash":
raise Access_error()
self.__database.execute( user.sql_remove_notebook( notebook_id ), commit = False )
user = self.__users.update_storage( user_id, commit = False )
self.__database.commit()
user.group_storage_bytes = self.__users.calculate_group_storage( user )
return dict( storage_bytes = user.storage_bytes )
@expose( view = Json )
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
user_id = Valid_id( none_okay = True ),
)
def undelete( self, notebook_id, user_id ):
"""
Undelete the given notebook and redirect to it.
@type notebook_id: unicode
@param notebook_id: id of notebook to undelete
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any)
@rtype dict
@return { 'redirect': notebook_url }
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid
"""
if user_id is None:
raise Access_error()
notebook = self.__users.load_notebook( user_id, notebook_id, read_write = True, owner = True )
if not notebook:
raise Access_error()
notebook.deleted = False
notebook.user_id = user_id
self.__database.save( notebook, commit = False )
self.__database.commit()
return dict(
redirect = u"/notebooks/%s" % notebook.object_id,
)
@expose( view = Json )
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
user_id = Valid_id( none_okay = True ),
)
def move_up( self, notebook_id, user_id ):
"""
Reorder the user's notebooks by moving the given notebook up by one. If the notebook is already
first, then wrap it around to be the last notebook.
@type notebook_id: unicode
@param notebook_id: id of notebook to move up
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any)
@rtype json dict
@return {}
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid
"""
notebook = self.__users.load_notebook( user_id, notebook_id )
user = self.__database.load( User, user_id )
if not user or not notebook:
raise Access_error()
# load the notebooks to which this user has access
notebooks = self.__database.select_many(
Notebook,
user.sql_load_notebooks( parents_only = True, undeleted_only = True ),
)
if not notebooks:
raise Access_error()
# find the given notebook and the one previous to it
previous_notebook = None
current_notebook = None
for notebook in notebooks:
if notebook.object_id == notebook_id:
current_notebook = notebook
break
previous_notebook = notebook
if current_notebook is None:
raise Access_error()
# if there is no previous notebook, then the current notebook is first. so, move it after the
# last notebook
if previous_notebook is None:
last_notebook = notebooks[ -1 ]
self.__database.execute(
user.sql_update_notebook_rank( current_notebook.object_id, last_notebook.rank + 1 ),
commit = False,
)
# otherwise, save the current and previous notebooks back to the database with swapped ranks
else:
self.__database.execute(
user.sql_update_notebook_rank( current_notebook.object_id, previous_notebook.rank ),
commit = False,
)
self.__database.execute(
user.sql_update_notebook_rank( previous_notebook.object_id, current_notebook.rank ),
commit = False,
)
self.__database.commit()
return dict()
@expose( view = Json )
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
user_id = Valid_id( none_okay = True ),
)
def move_down( self, notebook_id, user_id ):
"""
Reorder the user's notebooks by moving the given notebook down by one. If the notebook is
already last, then wrap it around to be the first notebook.
@type notebook_id: unicode
@param notebook_id: id of notebook to move down
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any)
@rtype json dict
@return {}
@raise Access_error: the current user doesn't have access to the given notebook
@raise Validation_error: one of the arguments is invalid
"""
notebook = self.__users.load_notebook( user_id, notebook_id )
user = self.__database.load( User, user_id )
if not user or not notebook:
raise Access_error()
# load the notebooks to which this user has access
notebooks = self.__database.select_many(
Notebook,
user.sql_load_notebooks( parents_only = True, undeleted_only = True ),
)
if not notebooks:
raise Access_error()
# find the given notebook and the one after it
current_notebook = None
next_notebook = None
for notebook in notebooks:
if notebook.object_id == notebook_id:
current_notebook = notebook
elif current_notebook:
next_notebook = notebook
break
if current_notebook is None:
raise Access_error()
# if there is no next notebook, then the current notebook is last. so, move it before the
# first notebook
if next_notebook is None:
first_notebook = notebooks[ 0 ]
self.__database.execute(
user.sql_update_notebook_rank( current_notebook.object_id, first_notebook.rank - 1 ),
commit = False,
)
# otherwise, save the current and next notebooks back to the database with swapped ranks
else:
self.__database.execute(
user.sql_update_notebook_rank( current_notebook.object_id, next_notebook.rank ),
commit = False,
)
self.__database.execute(
user.sql_update_notebook_rank( next_notebook.object_id, current_notebook.rank ),
commit = False,
)
self.__database.commit()
return dict()
@expose( view = Json )
@strongly_expire
@end_transaction
@grab_user_id
@validate(
notebook_id = Valid_id(),
start = Valid_int( min = 0 ),
count = Valid_int( min = 1 ),
user_id = Valid_id( none_okay = True ),
)
def load_recent_updates( self, notebook_id, start, count, user_id = None ):
"""
Provide the information necessary to display a notebook's recent updated/created notes, in
reverse chronological order by update time.
@type notebook_id: unicode
@param notebook_id: id of the notebook containing the notes
@type start: unicode or NoneType
@param start: index of recent note to start with (defaults to 0, the most recent note)
@type count: int or NoneType
@param count: number of recent notes to display (defaults to 10 notes)
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any)
@rtype: json dict
@return: { 'notes': recent_notes_list }
@raise Access_error: the current user doesn't have access to the given notebook or note
"""
notebook = self.__users.load_notebook( user_id, notebook_id )
if notebook is None:
raise Access_error()
recent_notes = self.__database.select_many( Note, notebook.sql_load_notes_in_update_order( start = start, count = count ) )
return dict(
notes = recent_notes,
)
def recent_notes( self, notebook_id, start = 0, count = 10, user_id = None ):
"""
Return the given notebook's recently created notes in reverse chronological order by creation
time.
@type notebook_id: unicode
@param notebook_id: id of the notebook containing the notes
@type start: unicode or NoneType
@param start: index of recent note to start with (defaults to 0, the most recent note)
@type count: int or NoneType
@param count: number of recent notes to return (defaults to 10 notes)
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any)
@rtype: dict
@return: data for Main_page() constructor
@raise Access_error: the current user doesn't have access to the given notebook or note
"""
notebook = self.__users.load_notebook( user_id, notebook_id )
if notebook is None:
raise Access_error()
notes = self.__database.select_many( Note, notebook.sql_load_notes_in_creation_order( start, count ) )
result = self.__users.current( user_id )
result.update( self.contents( notebook_id, user_id = user_id ) )
result[ "notes" ] = notes
result[ "start" ] = start
result[ "count" ] = count
return result
def old_notes( self, notebook_id, start = 0, count = 10, user_id = None ):
"""
Return the given notebook's oldest notes in chronological order by creation time.
@type notebook_id: unicode
@param notebook_id: id of the notebook containing the notes
@type start: unicode or NoneType
@param start: index of recent note to start with (defaults to 0, the oldest note)
@type count: int or NoneType
@param count: number of notes to return (defaults to 10 notes)
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any)
@rtype: dict
@return: data for Main_page() constructor
@raise Access_error: the current user doesn't have access to the given notebook or note
"""
notebook = self.__users.load_notebook( user_id, notebook_id )
if notebook is None:
raise Access_error()
notes = self.__database.select_many( Note, notebook.sql_load_notes_in_creation_order( start, count, reverse = True ) )
result = self.__users.current( user_id )
result.update( self.contents( notebook_id, user_id = user_id ) )
result[ "notes" ] = notes
result[ "start" ] = start
result[ "count" ] = count
return result
WHITESPACE_PATTERN = re.compile( "\s+" )
NEWLINE_PATTERN = re.compile( "\r?\n" )
NOTE_LINK_PATTERN = re.compile( '(<a\s+(?:[^>]+\s+)?href=")[^"]*/notebooks/(\w+)\?note_id=(\w+)("[^>]*>)', re.IGNORECASE )
@expose( view = Json )
@strongly_expire
@end_transaction
@grab_user_id
@validate(
file_id = Valid_id(),
content_column = Valid_int( min = 0 ),
title_column = Valid_int( min = 0, none_okay = True ),
plaintext = Valid_bool(),
import_button = unicode,
user_id = Valid_id( none_okay = True ),
)
def import_csv( self, file_id, content_column, title_column, plaintext, import_button, user_id = None ):
"""
Import a previously uploaded CSV file of notes as a new notebook. Delete the file once the
import is complete.
Plaintext contents are left mostly untouched, just stripping HTML and converting newlines to
<br> tags. HTML contents are cleaned of any disallowed/harmful HTML tags, and target="_new"
attributes are added to all links without targets, except internal note links.
Internal note links are rewritten such that they point to the newly imported notes. This is
accomplished by looking for a "note_id" column and determining what note each link points to.
Then each internal note link is rewritten to point at the new notebook id and note id.
@type file_id: unicode
@param file_id: id of the previously uploaded CSV file to import
@type content_column: int
@param content_column: zero-based index of the column containing note contents
@type title_column: int or NoneType
@param title_column: zero-based index of the column containing note titles (None indicates
the lack of any such column, in which case titles are derived from the
first few words of each note's contents if no title is already present
in the note's contents)
@type plaintext: bool
@param plaintext: True if the note contents are plaintext, or False if they're HTML
@type import_button: unicode
@param import_button: ignored
@type user_id: unicode or NoneType
@param user_id: id of current logged-in user (if any)
@rtype: dict
@return: { 'redirect': new_notebook_url }
@raise Access_error: the current user doesn't have access to the given file
@raise Files.Parse_error: there was an error in parsing the given file
@raise Import_error: there was an error in importing the notes from the file
"""
TRUNCATED_TITLE_CHAR_LENGTH = 80
if user_id is None:
raise Access_error()
user = self.__database.load( User, user_id )
if user is None:
raise Access_error()
db_file = self.__database.load( File, file_id )
if db_file is None:
raise Access_error()
db_notebook = self.__users.load_notebook( user_id, db_file.notebook_id )
if db_notebook is None or db_notebook.read_write == Notebook.READ_WRITE_FOR_OWN_NOTES:
raise Access_error()
# if the file has a "note_id" header column, record its index
note_id_column = None
note_ids = {} # map of original CSV note id to imported note id
parser = self.__files.parse_csv( file_id, skip_header = False )
row = parser.next()
if row and u"note_id" in row:
note_id_column = row.index( u"note_id" )
parser = self.__files.parse_csv( file_id, skip_header = True )
# create a new notebook for the imported notes
notebook = self.__create_notebook( u"imported notebook", user, commit = False )
# import the notes into the new notebook
for row in parser:
row_length = len( row )
if content_column >= row_length:
raise Import_error()
if title_column is not None and title_column >= row_length:
raise Import_error()
title = None
# if there is a title column, use it. otherwise, if the note doesn't already contain a title,
# use the first line of the content column as the title
if title_column and title_column != content_column and len( row[ title_column ].strip() ) > 0:
title = Html_nuker( allow_refs = True ).nuke( Valid_string( escape_html = plaintext )( row[ title_column ].strip() ) )
elif plaintext or not Note.TITLE_PATTERN.search( row[ content_column ] ):
content_text = Html_nuker( allow_refs = True ).nuke( Valid_string( escape_html = plaintext )( row[ content_column ].strip() ) )
content_lines = [ line for line in self.NEWLINE_PATTERN.split( content_text ) if line.strip() ]
# skip notes with empty contents
if len( content_lines ) == 0:
continue
title = content_lines[ 0 ]
# truncate the makeshift title to a reasonable length, but truncate on a word boundary
if len( title ) > TRUNCATED_TITLE_CHAR_LENGTH:
title_words = self.WHITESPACE_PATTERN.split( title )
for i in range( 1, len( title_words ) ):
title_candidate = u" ".join( title_words[ : i ] )
if len( title_candidate ) <= TRUNCATED_TITLE_CHAR_LENGTH:
title = title_candidate
else:
break
contents = Valid_string( max = 50000, escape_html = plaintext, require_link_target = True )( row[ content_column ] )
if plaintext:
contents = contents.replace( u"\n", u"<br />" )
note_id = self.__database.next_id( Note, commit = False )
note = Note.create( note_id, contents, notebook_id = notebook.object_id, startup = False, rank = None, user_id = user_id )
# if the note doesn't have a title yet, then tack the given title onto the start of the contents
if title and note.title is None:
note.contents = u"<h3>%s</h3>%s" % ( title, note.contents )
# if there is a note id column, then map the original CSV note id to its new imported note id
if note_id_column:
try:
original_note_id = Valid_id( none_okay = True )( row[ note_id_column ].strip() )
except ValueError:
original_note_id = None
if original_note_id:
note_ids[ original_note_id ] = note_id
self.__database.save( note, commit = False )
def rewrite_link( match ):
( link_start, original_notebook_id, original_note_id, link_end ) = match.groups()
note_id = note_ids.get( original_note_id )
if note_id:
return "%s/notebooks/%s?note_id=%s%s" % ( link_start, notebook.object_id, note_id, link_end )
# if we don't know how to rewrite the link (for lack of the new note id), then don't rewrite
# it and leave the link as it is
return "%s/notebooks/%s?note_id=%s%s" % ( link_start, original_notebook_id, original_note_id, link_end )
# do a pass over all the imported notes to rewrite internal note links so that they point to
# the newly imported note ids in the new notebook
for ( original_note_id, note_id ) in note_ids.items():
note = self.__database.load( Note, note_id )
if note:
( rewritten_contents, rewritten_count ) = self.NOTE_LINK_PATTERN.subn( rewrite_link, note.contents )
if rewritten_count > 0:
note.contents = rewritten_contents
self.__database.save( note, commit = False )
# delete the CSV file now that it's been imported
self.__database.execute( db_file.sql_delete(), commit = False )
self.__database.uncache( db_file )
self.__database.commit()
Upload_file.delete_file( file_id )
return dict(
redirect = u"/notebooks/%s?rename=true" % notebook.object_id,
)
|
marcosmodesto/django-testapp | refs/heads/master | djangotoolbox/djangotoolbox/tests.py | 35 | from __future__ import with_statement
from decimal import Decimal, InvalidOperation
import time
from django.core import serializers
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save
from django.db.utils import DatabaseError
from django.dispatch.dispatcher import receiver
from django.test import TestCase
from django.utils.unittest import expectedFailure, skip
from .fields import ListField, SetField, DictField, EmbeddedModelField
def count_calls(func):
def wrapper(*args, **kwargs):
wrapper.calls += 1
return func(*args, **kwargs)
wrapper.calls = 0
return wrapper
class Target(models.Model):
index = models.IntegerField()
class Source(models.Model):
target = models.ForeignKey(Target)
index = models.IntegerField()
class DecimalModel(models.Model):
decimal = models.DecimalField(max_digits=9, decimal_places=2)
class DecimalKey(models.Model):
decimal = models.DecimalField(max_digits=9, decimal_places=2, primary_key=True)
class DecimalParent(models.Model):
child = models.ForeignKey(DecimalKey)
class DecimalsList(models.Model):
decimals = ListField(models.ForeignKey(DecimalKey))
class ListModel(models.Model):
integer = models.IntegerField(primary_key=True)
floating_point = models.FloatField()
names = ListField(models.CharField)
names_with_default = ListField(models.CharField(max_length=500),
default=[])
names_nullable = ListField(models.CharField(max_length=500), null=True)
class OrderedListModel(models.Model):
ordered_ints = ListField(models.IntegerField(max_length=500), default=[],
ordering=count_calls(lambda x: x), null=True)
ordered_nullable = ListField(ordering=lambda x: x, null=True)
class SetModel(models.Model):
setfield = SetField(models.IntegerField())
class DictModel(models.Model):
dictfield = DictField(models.IntegerField)
dictfield_nullable = DictField(null=True)
auto_now = DictField(models.DateTimeField(auto_now=True))
class EmbeddedModelFieldModel(models.Model):
simple = EmbeddedModelField('EmbeddedModel', null=True)
simple_untyped = EmbeddedModelField(null=True)
decimal_parent = EmbeddedModelField(DecimalParent, null=True)
typed_list = ListField(EmbeddedModelField('SetModel'))
typed_list2 = ListField(EmbeddedModelField('EmbeddedModel'))
untyped_list = ListField(EmbeddedModelField())
untyped_dict = DictField(EmbeddedModelField())
ordered_list = ListField(EmbeddedModelField(),
ordering=lambda obj: obj.index)
class EmbeddedModel(models.Model):
some_relation = models.ForeignKey(DictModel, null=True)
someint = models.IntegerField(db_column='custom')
auto_now = models.DateTimeField(auto_now=True)
auto_now_add = models.DateTimeField(auto_now_add=True)
class IterableFieldsTest(TestCase):
floats = [5.3, 2.6, 9.1, 1.58]
names = [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']
unordered_ints = [4, 2, 6, 1]
def setUp(self):
for i, float in zip(range(1, 5), IterableFieldsTest.floats):
ListModel(integer=i, floating_point=float,
names=IterableFieldsTest.names[:i]).save()
def test_startswith(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.filter(names__startswith='Sa')]),
dict([(3, ['Kakashi', 'Naruto', 'Sasuke']),
(4, ['Kakashi', 'Naruto', 'Sasuke', 'Sakura']), ]))
def test_options(self):
self.assertEqual([entity.names_with_default for entity in
ListModel.objects.filter(names__startswith='Sa')],
[[], []])
self.assertEqual([entity.names_nullable for entity in
ListModel.objects.filter(names__startswith='Sa')],
[None, None])
def test_default_value(self):
# Make sure default value is copied.
ListModel().names_with_default.append(2)
self.assertEqual(ListModel().names_with_default, [])
def test_ordering(self):
f = OrderedListModel._meta.fields[1]
f.ordering.calls = 0
# Ensure no ordering happens on assignment.
obj = OrderedListModel()
obj.ordered_ints = self.unordered_ints
self.assertEqual(f.ordering.calls, 0)
obj.save()
self.assertEqual(OrderedListModel.objects.get().ordered_ints,
sorted(self.unordered_ints))
# Ordering should happen only once, i.e. the order function may
# be called N times at most (N being the number of items in the
# list).
self.assertLessEqual(f.ordering.calls, len(self.unordered_ints))
def test_gt(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.filter(names__gt='Kakashi')]),
dict([(2, [u'Kakashi', u'Naruto']),
(3, [u'Kakashi', u'Naruto', u'Sasuke']),
(4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ]))
def test_lt(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.filter(names__lt='Naruto')]),
dict([(1, [u'Kakashi']),
(2, [u'Kakashi', u'Naruto']),
(3, [u'Kakashi', u'Naruto', u'Sasuke']),
(4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ]))
def test_gte(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.filter(names__gte='Sakura')]),
dict([(3, [u'Kakashi', u'Naruto', u'Sasuke']),
(4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ]))
def test_lte(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.filter(names__lte='Kakashi')]),
dict([(1, [u'Kakashi']),
(2, [u'Kakashi', u'Naruto']),
(3, [u'Kakashi', u'Naruto', u'Sasuke']),
(4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ]))
def test_equals(self):
self.assertEquals([entity.names for entity in
ListModel.objects.filter(names='Sakura')],
[[u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']])
# Test with additonal pk filter (for DBs that have special pk
# queries).
query = ListModel.objects.filter(names='Sakura')
self.assertEquals(query.get(pk=query[0].pk).names,
[u'Kakashi', u'Naruto', u'Sasuke', u'Sakura'])
def test_is_null(self):
self.assertEquals(ListModel.objects.filter(
names__isnull=True).count(), 0)
def test_exclude(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.all().exclude(names__lt='Sakura')]),
dict([(3, [u'Kakashi', u'Naruto', u'Sasuke']),
(4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ]))
def test_chained_filter(self):
self.assertEquals(
[entity.names for entity in ListModel.objects
.filter(names='Sasuke').filter(names='Sakura')],
[['Kakashi', 'Naruto', 'Sasuke', 'Sakura'], ])
self.assertEquals(
[entity.names for entity in ListModel.objects
.filter(names__startswith='Sa').filter(names='Sakura')],
[['Kakashi', 'Naruto', 'Sasuke', 'Sakura']])
# Test across multiple columns. On app engine only one filter
# is allowed to be an inequality filter.
self.assertEquals(
[entity.names for entity in ListModel.objects
.filter(floating_point=9.1).filter(names__startswith='Sa')],
[['Kakashi', 'Naruto', 'Sasuke'], ])
def test_setfield(self):
setdata = [1, 2, 3, 2, 1]
# At the same time test value conversion.
SetModel(setfield=map(str, setdata)).save()
item = SetModel.objects.filter(setfield=3)[0]
self.assertEqual(item.setfield, set(setdata))
# This shouldn't raise an error because the default value is
# an empty list.
SetModel().save()
def test_dictfield(self):
DictModel(dictfield=dict(a=1, b='55', foo=3.14),
auto_now={'a': None}).save()
item = DictModel.objects.get()
self.assertEqual(item.dictfield, {u'a': 1, u'b': 55, u'foo': 3})
dt = item.auto_now['a']
self.assertNotEqual(dt, None)
item.save()
time.sleep(0.5) # Sleep to avoid false positive failure on the assertion below
self.assertGreater(DictModel.objects.get().auto_now['a'], dt)
item.delete()
# Saving empty dicts shouldn't throw errors.
DictModel().save()
# Regression tests for djangoappengine issue #39.
DictModel.add_to_class('new_dict_field', DictField())
DictModel.objects.get()
@skip("GAE specific?")
def test_Q_objects(self):
self.assertEquals(
[entity.names for entity in ListModel.objects
.exclude(Q(names__lt='Sakura') | Q(names__gte='Sasuke'))],
[['Kakashi', 'Naruto', 'Sasuke', 'Sakura']])
def test_list_with_foreignkeys(self):
class ReferenceList(models.Model):
keys = ListField(models.ForeignKey('Model'))
class Model(models.Model):
pass
model1 = Model.objects.create()
model2 = Model.objects.create()
ReferenceList.objects.create(keys=[model1.pk, model2.pk])
self.assertEqual(ReferenceList.objects.get().keys[0], model1.pk)
self.assertEqual(ReferenceList.objects.filter(keys=model1.pk).count(), 1)
def test_list_with_foreign_conversion(self):
decimal = DecimalKey.objects.create(decimal=Decimal('1.5'))
DecimalsList.objects.create(decimals=[decimal.pk])
@expectedFailure
def test_nested_list(self):
"""
Some back-ends expect lists to be strongly typed or not contain
other lists (e.g. GAE), this limits how the ListField can be
used (unless the back-end were to serialize all lists).
"""
class UntypedListModel(models.Model):
untyped_list = ListField()
UntypedListModel.objects.create(untyped_list=[1, [2, 3]])
class Child(models.Model):
pass
class Parent(models.Model):
id = models.IntegerField(primary_key=True)
integer_list = ListField(models.IntegerField)
integer_dict = DictField(models.IntegerField)
embedded_list = ListField(EmbeddedModelField(Child))
embedded_dict = DictField(EmbeddedModelField(Child))
class EmbeddedModelFieldTest(TestCase):
def assertEqualDatetime(self, d1, d2):
"""Compares d1 and d2, ignoring microseconds."""
self.assertEqual(d1.replace(microsecond=0),
d2.replace(microsecond=0))
def assertNotEqualDatetime(self, d1, d2):
self.assertNotEqual(d1.replace(microsecond=0),
d2.replace(microsecond=0))
def _simple_instance(self):
EmbeddedModelFieldModel.objects.create(
simple=EmbeddedModel(someint='5'))
return EmbeddedModelFieldModel.objects.get()
def test_simple(self):
instance = self._simple_instance()
self.assertIsInstance(instance.simple, EmbeddedModel)
# Make sure get_prep_value is called.
self.assertEqual(instance.simple.someint, 5)
# Primary keys should not be populated...
self.assertEqual(instance.simple.id, None)
# ... unless set explicitly.
instance.simple.id = instance.id
instance.save()
instance = EmbeddedModelFieldModel.objects.get()
self.assertEqual(instance.simple.id, instance.id)
def _test_pre_save(self, instance, get_field):
# Make sure field.pre_save is called for embedded objects.
from time import sleep
instance.save()
auto_now = get_field(instance).auto_now
auto_now_add = get_field(instance).auto_now_add
self.assertNotEqual(auto_now, None)
self.assertNotEqual(auto_now_add, None)
sleep(1) # FIXME
instance.save()
self.assertNotEqualDatetime(get_field(instance).auto_now,
get_field(instance).auto_now_add)
instance = EmbeddedModelFieldModel.objects.get()
instance.save()
# auto_now_add shouldn't have changed now, but auto_now should.
self.assertEqualDatetime(get_field(instance).auto_now_add,
auto_now_add)
self.assertGreater(get_field(instance).auto_now, auto_now)
def test_pre_save(self):
obj = EmbeddedModelFieldModel(simple=EmbeddedModel())
self._test_pre_save(obj, lambda instance: instance.simple)
def test_pre_save_untyped(self):
obj = EmbeddedModelFieldModel(simple_untyped=EmbeddedModel())
self._test_pre_save(obj, lambda instance: instance.simple_untyped)
def test_pre_save_in_list(self):
obj = EmbeddedModelFieldModel(untyped_list=[EmbeddedModel()])
self._test_pre_save(obj, lambda instance: instance.untyped_list[0])
def test_pre_save_in_dict(self):
obj = EmbeddedModelFieldModel(untyped_dict={'a': EmbeddedModel()})
self._test_pre_save(obj, lambda instance: instance.untyped_dict['a'])
def test_pre_save_list(self):
# Also make sure auto_now{,add} works for embedded object *lists*.
EmbeddedModelFieldModel.objects.create(typed_list2=[EmbeddedModel()])
instance = EmbeddedModelFieldModel.objects.get()
auto_now = instance.typed_list2[0].auto_now
auto_now_add = instance.typed_list2[0].auto_now_add
self.assertNotEqual(auto_now, None)
self.assertNotEqual(auto_now_add, None)
instance.typed_list2.append(EmbeddedModel())
instance.save()
instance = EmbeddedModelFieldModel.objects.get()
self.assertEqualDatetime(instance.typed_list2[0].auto_now_add,
auto_now_add)
self.assertGreater(instance.typed_list2[0].auto_now, auto_now)
self.assertNotEqual(instance.typed_list2[1].auto_now, None)
self.assertNotEqual(instance.typed_list2[1].auto_now_add, None)
def test_error_messages(self):
for kwargs, expected in (
({'simple': 42}, EmbeddedModel),
({'simple_untyped': 42}, models.Model),
({'typed_list': [EmbeddedModel()]}, SetModel)):
self.assertRaisesRegexp(
TypeError, "Expected instance of type %r." % expected,
EmbeddedModelFieldModel(**kwargs).save)
def test_typed_listfield(self):
EmbeddedModelFieldModel.objects.create(
typed_list=[SetModel(setfield=range(3)),
SetModel(setfield=range(9))],
ordered_list=[Target(index=i) for i in xrange(5, 0, -1)])
obj = EmbeddedModelFieldModel.objects.get()
self.assertIn(5, obj.typed_list[1].setfield)
self.assertEqual([target.index for target in obj.ordered_list],
range(1, 6))
def test_untyped_listfield(self):
EmbeddedModelFieldModel.objects.create(untyped_list=[
EmbeddedModel(someint=7),
OrderedListModel(ordered_ints=range(5, 0, -1)),
SetModel(setfield=[1, 2, 2, 3])])
instances = EmbeddedModelFieldModel.objects.get().untyped_list
for instance, cls in zip(instances,
[EmbeddedModel, OrderedListModel, SetModel]):
self.assertIsInstance(instance, cls)
self.assertNotEqual(instances[0].auto_now, None)
self.assertEqual(instances[1].ordered_ints, range(1, 6))
def test_untyped_dict(self):
EmbeddedModelFieldModel.objects.create(untyped_dict={
'a': SetModel(setfield=range(3)),
'b': DictModel(dictfield={'a': 1, 'b': 2}),
'c': DictModel(dictfield={}, auto_now={'y': 1})})
data = EmbeddedModelFieldModel.objects.get().untyped_dict
self.assertIsInstance(data['a'], SetModel)
self.assertNotEqual(data['c'].auto_now['y'], None)
def test_foreignkey_in_embedded_object(self):
simple = EmbeddedModel(some_relation=DictModel.objects.create())
obj = EmbeddedModelFieldModel.objects.create(simple=simple)
simple = EmbeddedModelFieldModel.objects.get().simple
self.assertNotIn('some_relation', simple.__dict__)
self.assertIsInstance(simple.__dict__['some_relation_id'],
type(obj.id))
self.assertIsInstance(simple.some_relation, DictModel)
def test_embedded_field_with_foreign_conversion(self):
decimal = DecimalKey.objects.create(decimal=Decimal('1.5'))
decimal_parent = DecimalParent.objects.create(child=decimal)
EmbeddedModelFieldModel.objects.create(decimal_parent=decimal_parent)
def test_update(self):
"""
Test that update can be used on an a subset of objects
containing collections of embedded instances; see issue #13.
Also ensure that updated values are coerced according to
collection field.
"""
child1 = Child.objects.create()
child2 = Child.objects.create()
parent = Parent.objects.create(pk=1,
integer_list=[1], integer_dict={'a': 2},
embedded_list=[child1], embedded_dict={'a': child2})
Parent.objects.filter(pk=1).update(
integer_list=['3'], integer_dict={'b': '3'},
embedded_list=[child2], embedded_dict={'b': child1})
parent = Parent.objects.get()
self.assertEqual(parent.integer_list, [3])
self.assertEqual(parent.integer_dict, {'b': 3})
self.assertEqual(parent.embedded_list, [child2])
self.assertEqual(parent.embedded_dict, {'b': child1})
class BaseModel(models.Model):
pass
class ExtendedModel(BaseModel):
name = models.CharField(max_length=20)
class BaseModelProxy(BaseModel):
class Meta:
proxy = True
class ExtendedModelProxy(ExtendedModel):
class Meta:
proxy = True
class ProxyTest(TestCase):
def test_proxy(self):
list(BaseModelProxy.objects.all())
def test_proxy_with_inheritance(self):
self.assertRaises(DatabaseError,
lambda: list(ExtendedModelProxy.objects.all()))
class SignalTest(TestCase):
def test_post_save(self):
created = []
@receiver(post_save, sender=SetModel)
def handle(**kwargs):
created.append(kwargs['created'])
SetModel().save()
self.assertEqual(created, [True])
SetModel.objects.get().save()
self.assertEqual(created, [True, False])
qs = SetModel.objects.all()
list(qs)[0].save()
self.assertEqual(created, [True, False, False])
list(qs)[0].save()
self.assertEqual(created, [True, False, False, False])
list(qs.select_related())[0].save()
self.assertEqual(created, [True, False, False, False, False])
class SelectRelatedTest(TestCase):
def test_select_related(self):
target = Target(index=5)
target.save()
Source(target=target, index=8).save()
source = Source.objects.all().select_related()[0]
self.assertEqual(source.target.pk, target.pk)
self.assertEqual(source.target.index, target.index)
source = Source.objects.all().select_related('target')[0]
self.assertEqual(source.target.pk, target.pk)
self.assertEqual(source.target.index, target.index)
class DBColumn(models.Model):
a = models.IntegerField(db_column='b')
class OrderByTest(TestCase):
def test_foreign_keys(self):
target1 = Target.objects.create(index=1)
target2 = Target.objects.create(index=2)
source1 = Source.objects.create(target=target1, index=3)
source2 = Source.objects.create(target=target2, index=4)
self.assertEqual(list(Source.objects.all().order_by('target')),
[source1, source2])
self.assertEqual(list(Source.objects.all().order_by('-target')),
[source2, source1])
def test_db_column(self):
model1 = DBColumn.objects.create(a=1)
model2 = DBColumn.objects.create(a=2)
self.assertEqual(list(DBColumn.objects.all().order_by('a')),
[model1, model2])
self.assertEqual(list(DBColumn.objects.all().order_by('-a')),
[model2, model1])
def test_reverse(self):
model1 = DBColumn.objects.create(a=1)
model2 = DBColumn.objects.create(a=2)
self.assertEqual(list(DBColumn.objects.all().order_by('a').reverse()),
[model2, model1])
self.assertEqual(list(DBColumn.objects.all().order_by('-a').reverse()),
[model1, model2])
def test_chain(self):
model1 = Target.objects.create(index=1)
model2 = Target.objects.create(index=2)
self.assertEqual(
list(Target.objects.all().order_by('index').order_by('-index')),
[model2, model1])
class SerializableSetModel(models.Model):
setfield = SetField(models.IntegerField())
setcharfield = SetField(models.CharField(), null=True)
class SerializationTest(TestCase):
"""
JSON doesn't support sets, so they need to be converted to lists
for serialization; see issue #12.
TODO: Check if the fix works with embedded models / nested sets.
"""
names = ['foo', 'bar', 'baz', 'monkey']
def test_json_listfield(self):
for i in range(1, 5):
ListModel(integer=i, floating_point=0,
names=SerializationTest.names[:i]).save()
objects = ListModel.objects.all()
serialized = serializers.serialize('json', objects)
deserialized = serializers.deserialize('json', serialized)
for m in deserialized:
integer = m.object.integer
names = m.object.names
self.assertEqual(names, SerializationTest.names[:integer])
def test_json_setfield(self):
for i in range(1, 5):
SerializableSetModel(
setfield=set([i - 1]),
setcharfield=set(SerializationTest.names[:i])).save()
objects = SerializableSetModel.objects.all()
serialized = serializers.serialize('json', objects)
deserialized = serializers.deserialize('json', serialized)
for m in deserialized:
integer = m.object.setfield.pop()
names = m.object.setcharfield
self.assertEqual(names, set(SerializationTest.names[:integer + 1]))
class String(models.Model):
s = models.CharField(max_length=20)
class LazyObjectsTest(TestCase):
def test_translation(self):
"""
Using a lazy translation call should work just the same as
a non-lazy one (or a plain string).
"""
from django.utils.translation import ugettext_lazy
a = String.objects.create(s='a')
b = String.objects.create(s=ugettext_lazy('b'))
self.assertEqual(String.objects.get(s='a'), a)
self.assertEqual(list(String.objects.filter(s='a')), [a])
self.assertEqual(list(String.objects.filter(s__lte='a')), [a])
self.assertEqual(String.objects.get(s=ugettext_lazy('a')), a)
self.assertEqual(
list(String.objects.filter(s__lte=ugettext_lazy('a'))), [a])
self.assertEqual(String.objects.get(s='b'), b)
self.assertEqual(list(String.objects.filter(s='b')), [b])
self.assertEqual(list(String.objects.filter(s__gte='b')), [b])
self.assertEqual(String.objects.get(s=ugettext_lazy('b')), b)
self.assertEqual(
list(String.objects.filter(s__gte=ugettext_lazy('b'))), [b])
def test_marked_strings(self):
"""
Check that strings marked as safe or needing escaping do not
confuse the back-end.
"""
from django.utils.safestring import mark_safe, mark_for_escaping
a = String.objects.create(s='a')
b = String.objects.create(s=mark_safe('b'))
c = String.objects.create(s=mark_for_escaping('c'))
self.assertEqual(String.objects.get(s='a'), a)
self.assertEqual(list(String.objects.filter(s__startswith='a')), [a])
self.assertEqual(String.objects.get(s=mark_safe('a')), a)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_safe('a'))), [a])
self.assertEqual(String.objects.get(s=mark_for_escaping('a')), a)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_for_escaping('a'))),
[a])
self.assertEqual(String.objects.get(s='b'), b)
self.assertEqual(list(String.objects.filter(s__startswith='b')), [b])
self.assertEqual(String.objects.get(s=mark_safe('b')), b)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_safe('b'))), [b])
self.assertEqual(String.objects.get(s=mark_for_escaping('b')), b)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_for_escaping('b'))),
[b])
self.assertEqual(String.objects.get(s='c'), c)
self.assertEqual(list(String.objects.filter(s__startswith='c')), [c])
self.assertEqual(String.objects.get(s=mark_safe('c')), c)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_safe('c'))), [c])
self.assertEqual(String.objects.get(s=mark_for_escaping('c')), c)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_for_escaping('c'))),
[c])
class FeaturesTest(TestCase):
"""
Some things are unlikely to cause problems for SQL back-ends, but
require special handling in nonrel.
"""
def test_subqueries(self):
"""
Django includes SQL statements as WHERE tree values when
filtering using a QuerySet -- this won't "just work" with
nonrel back-ends.
TODO: Subqueries handling may require a bit of Django
changing, but should be easy to support.
"""
target = Target.objects.create(index=1)
source = Source.objects.create(index=2, target=target)
targets = Target.objects.all()
with self.assertRaises(DatabaseError):
Source.objects.get(target__in=targets)
self.assertEqual(
Source.objects.get(target__in=list(targets)),
source)
class DecimalFieldTest(TestCase):
"""
Some NoSQL databases can't handle Decimals, so respective back-ends
convert them to strings or floats. This can cause some precision
and sorting problems.
"""
def setUp(self):
for d in (Decimal('12345.6789'), Decimal('5'), Decimal('345.67'),
Decimal('45.6'), Decimal('2345.678'),):
DecimalModel(decimal=d).save()
def test_filter(self):
d = DecimalModel.objects.get(decimal=Decimal('5.0'))
self.assertTrue(isinstance(d.decimal, Decimal))
self.assertEquals(str(d.decimal), '5.00')
d = DecimalModel.objects.get(decimal=Decimal('45.60'))
self.assertEquals(str(d.decimal), '45.60')
# Filter argument should be converted to Decimal with 2 decimal
#_places.
d = DecimalModel.objects.get(decimal='0000345.67333333333333333')
self.assertEquals(str(d.decimal), '345.67')
def test_order(self):
"""
Standard Django decimal-to-string conversion isn't monotonic
(see `django.db.backends.util.format_number`).
"""
rows = DecimalModel.objects.all().order_by('decimal')
values = list(d.decimal for d in rows)
self.assertEquals(values, sorted(values))
def test_sign_extend(self):
DecimalModel(decimal=Decimal('-0.0')).save()
try:
# If we've written a valid string we should be able to
# retrieve the DecimalModel object without error.
DecimalModel.objects.filter(decimal__lt=1)[0]
except InvalidOperation:
self.assertTrue(False)
|
hermanliang/gitinspector | refs/heads/master | gitinspector/filtering.py | 49 | # coding: utf-8
#
# Copyright © 2012-2014 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import unicode_literals
from localization import N_
from outputable import Outputable
import re
import terminal
import textwrap
__filters__ = {"file": [[], set()], "author": [[], set()], "email": [[], set()], "revision": [[], set()]}
class InvalidRegExpError(ValueError):
def __init__(self, msg):
super(InvalidRegExpError, self).__init__(msg)
self.msg = msg
def get():
return __filters__
def __add_one__(string):
for i in __filters__:
if (i + ":").lower() == string[0:len(i) + 1].lower():
__filters__[i][0].append(string[len(i) + 1:])
return
__filters__["file"][0].append(string)
def add(string):
rules = string.split(",")
for rule in rules:
__add_one__(rule)
def clear():
for i in __filters__:
__filters__[i][0] = []
def get_filered(filter_type="file"):
return __filters__[filter_type][1]
def has_filtered():
for i in __filters__:
if __filters__[i][1]:
return True
return False
def set_filtered(string, filter_type="file"):
string = string.strip()
if len(string) > 0:
for i in __filters__[filter_type][0]:
try:
if re.search(i, string) != None:
__filters__[filter_type][1].add(string)
return True
except:
raise InvalidRegExpError(_("invalid regular expression specified"))
return False
FILTERING_INFO_TEXT = N_("The following files were excluded from the statistics due to the specified exclusion patterns")
FILTERING_AUTHOR_INFO_TEXT = N_("The following authors were excluded from the statistics due to the specified exclusion patterns")
FILTERING_EMAIL_INFO_TEXT = N_("The authors with the following emails were excluded from the statistics due to the specified " \
"exclusion patterns")
FILTERING_EMAIL_INFO_TEXT = N_("The following commit revisions were excluded from the statistics due to the specified " \
"exclusion patterns")
class Filtering(Outputable):
@staticmethod
def __output_html_section__(info_string, filtered):
filtering_xml = ""
if filtered:
filtering_xml += "<p>" + info_string + "."+ "</p>"
for i in filtered:
filtering_xml += "<p>" + i + "</p>"
return filtering_xml
def output_html(self):
if has_filtered():
filtering_xml = "<div><div class=\"box\">"
Filtering.__output_html_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1])
Filtering.__output_html_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1])
Filtering.__output_html_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1])
Filtering.__output_html_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["revision"][1])
filtering_xml += "</div></div>"
print(filtering_xml)
@staticmethod
def __output_text_section__(info_string, filtered):
if filtered:
print("\n" + textwrap.fill(info_string + ":", width=terminal.get_size()[0]))
for i in filtered:
(width, _unused) = terminal.get_size()
print("...%s" % i[-width+3:] if len(i) > width else i)
def output_text(self):
Filtering.__output_text_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1])
Filtering.__output_text_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1])
Filtering.__output_text_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1])
Filtering.__output_text_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["revision"][1])
@staticmethod
def __output_xml_section__(info_string, filtered, container_tagname):
if filtered:
message_xml = "\t\t\t<message>" +info_string + "</message>\n"
filtering_xml = ""
for i in filtered:
filtering_xml += "\t\t\t\t<entry>".format(container_tagname) + i + "</entry>\n".format(container_tagname)
print("\t\t<{0}>".format(container_tagname))
print(message_xml + "\t\t\t<entries>\n" + filtering_xml + "\t\t\t</entries>\n")
print("\t\t</{0}>".format(container_tagname))
def output_xml(self):
if has_filtered():
print("\t<filtering>")
Filtering.__output_xml_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1], "files")
Filtering.__output_xml_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1], "authors")
Filtering.__output_xml_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1], "emails")
Filtering.__output_xml_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["revision"][1].union(), "revisions")
print("\t</filtering>")
|
Peddle/hue | refs/heads/master | apps/hbase/src/hbase/management/commands/hbase_setup.py | 32 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext as _
from desktop.lib.paths import get_apps_root
from useradmin.models import install_sample_user
from hbased.ttypes import AlreadyExists
from hbase.api import HbaseApi
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Create and fill some demo tables in the first configured cluster.'
args = '<username>'
def handle(self, *args, **options):
if args:
user = args[0]
else:
user = install_sample_user()
api = HbaseApi(user=user)
cluster_name = api.getClusters()[0]['name'] # Currently pick first configured cluster
# Check connectivity
api.connectCluster(cluster_name)
self.create_analytics_table(api, cluster_name)
self.load_analytics_table(api, cluster_name)
self.create_binary_table(api, cluster_name)
self.load_binary_table(api, cluster_name)
def create_analytics_table(self, api, cluster_name):
try:
api.createTable(cluster_name, 'analytics_demo', [{'properties': {'name': 'hour'}}, {'properties': {'name': 'day'}}, {'properties': {'name': 'total'}}])
except AlreadyExists:
pass
def load_analytics_table(self, api, cluster_name):
table_data = os.path.join(get_apps_root(), 'hbase', 'example', 'analytics', 'hbase-analytics.tsv')
api.bulkUpload(cluster_name, 'analytics_demo', open(table_data))
def create_binary_table(self, api, cluster_name):
try:
api.createTable(cluster_name, 'document_demo', [{'properties': {'name': 'doc'}}])
except AlreadyExists:
pass
def load_binary_table(self, api, cluster_name):
today = datetime.now().strftime('%Y%m%d')
tomorrow = (datetime.now() + timedelta(days=1)).strftime('%Y%m%d')
api.putRow(cluster_name, 'document_demo', today, {'doc:txt': 'Hue is awesome!'})
api.putRow(cluster_name, 'document_demo', today, {'doc:json': '{"user": "hue", "coolness": "extra"}'})
api.putRow(cluster_name, 'document_demo', tomorrow, {'doc:version': '<xml>I like HBase</xml>'})
api.putRow(cluster_name, 'document_demo', tomorrow, {'doc:version': '<xml>I LOVE HBase</xml>'})
root = os.path.join(get_apps_root(), 'hbase', 'example', 'documents')
api.putRow(cluster_name, 'document_demo', today, {'doc:img': open(root + '/hue-logo.png', "rb").read()})
api.putRow(cluster_name, 'document_demo', today, {'doc:html': open(root + '/gethue.com.html', "rb").read()})
api.putRow(cluster_name, 'document_demo', today, {'doc:pdf': open(root + '/gethue.pdf', "rb").read()})
|
likaiguo/flask-admin | refs/heads/master | flask_admin/_compat.py | 24 | # -*- coding: utf-8 -*-
"""
flask_admin._compat
~~~~~~~~~~~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
VER = sys.version_info
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int, )
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
filter_list = lambda f, l: list(filter(f, l))
def as_unicode(s):
if isinstance(s, bytes):
return s.decode('utf-8')
return str(s)
def csv_encode(s):
''' Returns unicode string expected by Python 3's csv module '''
return as_unicode(s)
# Various tools
from functools import reduce
from urllib.parse import urljoin, urlparse
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
filter_list = filter
def as_unicode(s):
if isinstance(s, str):
return s.decode('utf-8')
return unicode(s)
def csv_encode(s):
''' Returns byte string expected by Python 2's csv module '''
return as_unicode(s).encode('utf-8')
# Helpers
reduce = __builtins__['reduce'] if isinstance(__builtins__, dict) else __builtins__.reduce
from urlparse import urljoin, urlparse
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
try:
from collections import OrderedDict
except ImportError:
# Bare-bones OrderedDict implementation for Python2.6 compatibility
class OrderedDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.ordered_keys = []
def __setitem__(self, key, value):
self.ordered_keys.append(key)
dict.__setitem__(self, key, value)
def __iter__(self):
return (k for k in self.ordered_keys)
def iteritems(self):
return ((k, self[k]) for k in self.ordered_keys)
def items(self):
return list(self.iteritems())
|
tschneidereit/servo | refs/heads/master | tests/wpt/web-platform-tests/webdriver/network.py | 212 | # this comes from this stack overflow post:
# http://stackoverflow.com/a/1947766/725944
# module for getting the lan ip address of the computer
import os
import socket
if os.name != "nt":
import fcntl
import struct
def get_interface_ip(ifname):
sckt = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
sckt.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def get_lan_ip():
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith("127.") and os.name != "nt":
interfaces = ["eth0","eth1","eth2","wlan0","wlan1","wifi0","ath0","ath1","ppp0"]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
break
except IOError:
pass
return ip
|
ecosoft-odoo/odoo | refs/heads/8.0 | addons/website_mail/controllers/__init__.py | 390 | import email_designer
import main
|
jgravois/ArcREST | refs/heads/master | src/arcrest/ags/_geoprocessing.py | 1 | import json
from _gpobjects import *
from .._abstract.abstract import BaseAGSServer, BaseGPObject
from ..common.spatial import featureclass_to_json, recordset_to_json
from ..common.general import local_time_to_online
from ..security import security
import urllib
import time
import datetime
########################################################################
class GPService(BaseAGSServer):
"""
Geoprocessing is a fundamental part of enterprise GIS operations.
Geoprocessing provides GIS users with data analysis, data management,
and data conversion tools.
A geoprocessing service represents a collection of published tools that
perform tasks necessary for manipulating and analyzing geographic
information across a wide range of disciplines. Each tool performs one
or more operations, such as projecting a data set from one map
projection to another, adding fields to a table, or creating buffer
zones around features. A tool accepts input (such as feature sets,
tables, and property values), executes operations using the input data,
and generates output for presentation in a map or further processing by
the client. Tools can be executed synchronously (in sequence) or
asynchronously. When used with the REST API, a geoprocessing service
should always be published as a pooled service.
Use a geoprocessing service to do the following:
List available tools and their input/output properties
Execute a task synchronously
Submit a job to a task asynchronously
Get job details, including job status
Display results using a map service
Retrieve results for further processing by the client
Many GIS tasks involve the repetition of work, and this creates the
need for a framework to provide automation of workflows. Geoprocessing
services answer this need by using a model to combine a series of
operations in a sequence, then exposing the model as a tool.
The REST API GP Service resource provides basic information associated
with the service, such as the service description, the tasks provided,
the execution type, and the result's map server name.
The GP Service resource has operations that return results after a task
is successfully completed. The supported operations are as follows:
Execute task-Used when the execution type is synchronous. When a task
is executed synchronously, a user must wait for the results.
Submit job-Used when the execution type is asynchronous. When a job is
submitted asynchronously, a user can do other things while awaiting
notice that the task is completed.
"""
_resultMapServerName = None
_tasks = None
_executionType = None
_currentVersion = None
_maximumRecords = None
_serviceDescription = None
_securityHandler = None
_json = None
_json_dict = None
_proxy_url = None
_proxy_port = None
#----------------------------------------------------------------------
def __init__(self,
url,
securityHandler=None,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
self._url = url
if securityHandler is not None:
self._securityHandler = securityHandler
self._referer_url = securityHandler.referer_url
self._proxy_url = proxy_url
self._proxy_port = proxy_port
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" initializes the GP tools """
params = {
"f" : "json"
}
json_dict = self._do_get(url=self._url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json_dict = json_dict
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
if k == "tasks":
self._tasks = []
for t in v:
self._tasks.append(
GPTask(url=self._url + "/%s" % t,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=False)
)
else:
setattr(self, "_"+ k, json_dict[k])
else:
print k, " - attribute not implemented for gp service."
#----------------------------------------------------------------------
def __str__(self):
"""returns the object as a string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
@property
def currentVersion(self):
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def resultMapServerName(self):
""" returns the result mapserver name """
if self._resultMapServerName is None:
self.__init()
return self._resultMapServerName
#----------------------------------------------------------------------
@property
def tasks(self):
""" returns the tasks in the GP service """
if self._tasks is None:
self.__init()
return self._tasks
#----------------------------------------------------------------------
@property
def executionType(self):
""" returns the execution type """
if self._executionType is None:
self.__init()
return self._executionType
#----------------------------------------------------------------------
@property
def maximumRecords(self):
""" the maximum number of rows returned from service """
if self._maximumRecords is None:
self.__init()
return self._maximumRecords
#----------------------------------------------------------------------
@property
def serviceDescription(self):
""" returns the service description """
if self._serviceDescription is None:
self.__init()
return self._serviceDescription
########################################################################
class GPTask(BaseAGSServer):
""" This is the GP task that performs the operation """
_securityHandler = None
_proxy_port = None
_proxy_url = None
_url = None
_category = None
_displayName = None
_name = None
_parameters = None
_executionType = None
_helpUrl = None
_description = None
#----------------------------------------------------------------------
def __init__(self, url, securityHandler=None,
proxy_url=None, proxy_port=None,
initialize=False):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" initializes all the properties """
params = {
"f" : "json"
}
json_dict = self._do_get(url=self._url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print k, " - attribute not implemented in GPTask."
#----------------------------------------------------------------------
@property
def category(self):
""" returns the category """
if self._category is None:
self.__init()
return self._category
#----------------------------------------------------------------------
@property
def displayName(self):
""" returns the tools display name """
if self._displayName is None:
self.__init()
return self._displayName
#----------------------------------------------------------------------
@property
def name(self):
""" returns the name of the service """
if self._name is None:
self.__init()
return self._name
#----------------------------------------------------------------------
@property
def parameters(self):
""" returns the default parameters """
if self._parameters is None:
self.__init()
for param in self._parameters:
if not isinstance(param['defaultValue'], BaseGPObject):
if param['dataType'] == "GPFeatureRecordSetLayer":
param['defaultValue'] = GPFeatureRecordSetLayer.fromJSON(json.dumps(param))
elif param['dataType'] == "GPString":
param['defaultValue'] = GPString.fromJSON(json.dumps(param))
elif param['dataType'] == "GPLong":
param['defaultValue'] = GPLong.fromJSON(json.dumps(param))
elif param['dataType'] == "GPDouble":
param['defaultValue'] = GPDouble.fromJSON(json.dumps(param))
elif param['dataType'] == "GPDate":
param['defaultValue'] = GPDate.fromJSON(json.dumps(param))
elif param['dataType'] == "GPBoolean":
param['defaultValue'] = GPBoolean.fromJSON(json.dumps(param))
elif param['dataType'] == "GPDataFile":
param['defaultValue'] = GPDataFile.fromJSON(json.dumps(param))
elif param['dataType'] == "GPLinearUnit":
param['defaultValue'] = GPLinearUnit.fromJSON(json.dumps(param))
elif param['dataType'] == "GPMultiValue":
param['defaultValue'] = GPMultiValue.fromJSON(json.dumps(param))
elif param['dataType'] == "GPRasterData":
param['defaultValue'] = GPRasterData.fromJSON(json.dumps(param))
elif param['dataType'] == "GPRasterDataLayer":
param['defaultValue'] = GPRasterDataLayer.fromJSON(json.dumps(param))
elif param['dataType'] == "GPRecordSet":
param['defaultValue'] = GPRecordSet.fromJSON(json.dumps(param))
return self._parameters
#----------------------------------------------------------------------
@property
def executionType(self):
""" returns the execution type """
if self._executionType is None:
self.__init()
return self._executionType
#----------------------------------------------------------------------
@property
def helpUrl(self):
""" returns the help url """
if self._helpUrl is None:
self.__init()
return self._helpUrl
#----------------------------------------------------------------------
@property
def description(self):
""" returns the description of the service """
if self._description is None:
self.__init()
return self._description
#----------------------------------------------------------------------
def getJob(self, jobID):
""" returns the results or status of a job """
url = self._url + "/jobs/%s" % (jobID)
return GPJob(url=url,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def submitJob(self, inputs, method="POST",
outSR=None, processSR=None,
returnZ=False, returnM=False):
"""
submits a job to the current task, and returns a job ID
Inputs:
inputs - list of GP object values
method - string - either GET or POST. The way the service is
submitted.
outSR - spatial reference of output geometries
processSR - spatial reference that the model will use to
perform geometry operations
returnZ - Z values will be included in the result if true
returnM - M values will be included in the results if true
Ouput:
JOB ID as a string
"""
url = self._url + "/submitJob"
params = { "f" : "json" }
if not outSR is None:
params['env:outSR'] = outSR
if not processSR is None:
params['end:processSR'] = processSR
params['returnZ'] = returnZ
params['returnM'] = returnM
if not inputs is None:
for p in inputs:
if isinstance(p, BaseGPObject):
params[p.paramName] = p.value
if method.lower() == "get":
res = self._do_get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
jobUrl = self._url + "/jobs/%s" % res['jobId']
return GPJob(url=jobUrl,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=True)
elif method.lower() == "post":
res = self._do_post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
jobUrl = self._url + "/jobs/%s" % res['jobId']
return GPJob(url=jobUrl,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=True)
else:
raise AttributeError("Invalid input: %s. Must be GET or POST" \
% method)
#----------------------------------------------------------------------
def executeTask(self,
inputs,
outSR=None,
processSR=None,
returnZ=False,
returnM=False,
f="json",
method="POST"
):
"""
performs the execute task method
"""
params = {
"f" : f
}
url = self._url + "/execute"
params = { "f" : "json" }
if not outSR is None:
params['env:outSR'] = outSR
if not processSR is None:
params['end:processSR'] = processSR
params['returnZ'] = returnZ
params['returnM'] = returnM
for p in inputs:
if isinstance(p, BaseGPObject):
params[p.paramName] = p.value
del p
if method.lower() == "post":
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
else:
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
########################################################################
class GPJob(BaseAGSServer):
"""
Represents an ArcGIS GeoProcessing Job
"""
_proxy_url = None
_proxy_port = None
_jobId = None
_messages = None
_results = None
_jobStatus = None
_inputs = None
_json = None
_securityHandler = None
#----------------------------------------------------------------------
def __init__(self, url, securityHandler=None,
proxy_url=None, proxy_port=None,
initialize=False):
"""Constructor"""
self._url = url
if securityHandler is not None:
self._securityHandler = securityHandler
self._referer_url = securityHandler.referer_url
self._proxy_url = proxy_url
self._proxy_port = proxy_port
if initialize:
self.__init()
#----------------------------------------------------------------------
def __str__(self):
"""returns object as a string"""
self.__init()
return self._json
#----------------------------------------------------------------------
def __init(self):
""" initializes all the properties """
params = {"f" : "json"}
json_dict = self._do_get(url=self._url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print k, " - attribute not implemented for GPJob."
#----------------------------------------------------------------------
def cancelJob(self):
""" cancels the job """
params = {
"f" : "json"
}
return self._do_get(url=self._url + "/cancel",
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def messages(self):
""" returns the messages """
self.__init()
return self._messages
#----------------------------------------------------------------------
def _get_json(self, urlpart):
"""
gets the result object dictionary
"""
url = self._url + "/%s" % urlpart
params = {
"f" : "json",
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self.proxy_port)
#----------------------------------------------------------------------
@property
def results(self):
""" returns the results """
self.__init()
_tempRes = []
for k,v in self._results.iteritems():
param = self._get_json(v['paramUrl'])
if param['dataType'] == "GPFeatureRecordSetLayer":
self._results[k] = GPFeatureRecordSetLayer.fromJSON(json.dumps(param))
elif param['dataType'].lower().find('gpmultivalue') > -1:
self._results[k] = GPMultiValue.fromJSON(json.dumps(param))
elif param['dataType'] == "GPString":
self._results[k] = GPString.fromJSON(json.dumps(param))
elif param['dataType'] == "GPLong":
self._results[k] = GPLong.fromJSON(json.dumps(param))
elif param['dataType'] == "GPDouble":
self._results[k] = GPDouble.fromJSON(json.dumps(param))
elif param['dataType'] == "GPDate":
self._results[k] = GPDate.fromJSON(json.dumps(param))
elif param['dataType'] == "GPBoolean":
self._results[k] = GPBoolean.fromJSON(json.dumps(param))
elif param['dataType'] == "GPDataFile":
self._results[k] = GPDataFile.fromJSON(json.dumps(param))
elif param['dataType'] == "GPLinearUnit":
self._results[k] = GPLinearUnit.fromJSON(json.dumps(param))
elif param['dataType'] == "GPMultiValue":
self._results[k] = GPMultiValue.fromJSON(json.dumps(param))
elif param['dataType'] == "GPRasterData":
self._results[k] = GPRasterData.fromJSON(json.dumps(param))
elif param['dataType'] == "GPRasterDataLayer":
self._results[k] = GPRasterDataLayer.fromJSON(json.dumps(param))
elif param['dataType'] == "GPRecordSet":
self._results[k] = GPRecordSet.fromJSON(json.dumps(param))
return self._results
#----------------------------------------------------------------------
@property
def jobStatus(self):
""" returns the job status """
self.__init()
return self._jobStatus
#----------------------------------------------------------------------
@property
def jobId(self):
""" returns the job ID """
if self._jobId is None:
self.__init()
return self._jobId
#----------------------------------------------------------------------
@property
def inputs(self):
""" returns the inputs of a service """
self.__init()
return self._inputs
#----------------------------------------------------------------------
def getParameterValue(self, parameterName):
""" gets a parameter value """
if self._results is None:
self.__init()
parameter = self._results[parameterName]
return parameter |
bcornwellmott/erpnext | refs/heads/develop | erpnext/hr/doctype/daily_work_summary_settings/__init__.py | 12133432 | |
illicitonion/givabit | refs/heads/master | lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_1_3/tests/modeltests/custom_columns/__init__.py | 12133432 | |
OpenCanada/website | refs/heads/master | newsletter/__init__.py | 12133432 | |
wevoice/wesub | refs/heads/staging | apps/jsdemo/__init__.py | 20 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
|
aifil/odoo | refs/heads/8.0 | addons/l10n_pl/__init__.py | 381 | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
bingo235/TeamTalk | refs/heads/master | win-client/3rdParty/src/json/test/generate_expected.py | 257 | import glob
import os.path
for path in glob.glob( '*.json' ):
text = file(path,'rt').read()
target = os.path.splitext(path)[0] + '.expected'
if os.path.exists( target ):
print 'skipping:', target
else:
print 'creating:', target
file(target,'wt').write(text)
|
chaffra/sympy | refs/heads/master | bin/coverage_doctest.py | 83 | #!/usr/bin/env python
"""
Program to test that all methods/functions have at least one example
doctest. Also checks if docstrings are imported into Sphinx. For this to
work, the Sphinx docs need to be built first. Use "cd doc; make html" to
build the Sphinx docs.
Usage:
./bin/coverage_doctest.py sympy/core
or
./bin/coverage_doctest.py sympy/core/basic.py
If no arguments are given, all files in sympy/ are checked.
"""
from __future__ import print_function
import os
import sys
import inspect
from argparse import ArgumentParser, RawDescriptionHelpFormatter
try:
from HTMLParser import HTMLParser
except ImportError:
# It's html.parser in Python 3
from html.parser import HTMLParser
# Load color templates, used from sympy/utilities/runtests.py
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
def print_header(name, underline=None, color=None):
print()
if color:
print("%s%s%s" % (c_color % colors[color], name, c_normal))
else:
print(name)
if underline and not color:
print(underline*len(name))
def print_coverage(module_path, c, c_md, c_mdt, c_idt, c_sph, f, f_md, f_mdt,
f_idt, f_sph, score, total_doctests, total_members,
sphinx_score, total_sphinx, verbose=False, no_color=False,
sphinx=True):
""" Prints details (depending on verbose) of a module """
doctest_color = "Brown"
sphinx_color = "DarkGray"
less_100_color = "Red"
less_50_color = "LightRed"
equal_100_color = "Green"
big_header_color = "LightPurple"
small_header_color = "Purple"
if no_color:
score_string = "Doctests: %s%% (%s of %s)" % (score, total_doctests,
total_members)
elif score < 100:
if score < 50:
score_string = "%sDoctests:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[doctest_color], c_normal, c_color % colors[less_50_color], score, total_doctests, total_members, c_normal)
else:
score_string = "%sDoctests:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[doctest_color], c_normal, c_color % colors[less_100_color], score, total_doctests, total_members, c_normal)
else:
score_string = "%sDoctests:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[doctest_color], c_normal, c_color % colors[equal_100_color], score, total_doctests, total_members, c_normal)
if sphinx:
if no_color:
sphinx_score_string = "Sphinx: %s%% (%s of %s)" % (sphinx_score,
total_members - total_sphinx, total_members)
elif sphinx_score < 100:
if sphinx_score < 50:
sphinx_score_string = "%sSphinx:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[sphinx_color], c_normal, c_color %
colors[less_50_color], sphinx_score, total_members - total_sphinx,
total_members, c_normal)
else:
sphinx_score_string = "%sSphinx:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[sphinx_color], c_normal, c_color %
colors[less_100_color], sphinx_score, total_members -
total_sphinx, total_members, c_normal)
else:
sphinx_score_string = "%sSphinx:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[sphinx_color], c_normal, c_color %
colors[equal_100_color], sphinx_score, total_members -
total_sphinx, total_members, c_normal)
if verbose:
print('\n' + '-'*70)
print(module_path)
print('-'*70)
else:
if sphinx:
print("%s: %s %s" % (module_path, score_string, sphinx_score_string))
else:
print("%s: %s" % (module_path, score_string))
if verbose:
print_header('CLASSES', '*', not no_color and big_header_color)
if not c:
print_header('No classes found!')
else:
if c_md:
print_header('Missing docstrings', '-', not no_color and small_header_color)
for md in c_md:
print(' * ' + md)
if c_mdt:
print_header('Missing doctests', '-', not no_color and small_header_color)
for md in c_mdt:
print(' * ' + md)
if c_idt:
# Use "# indirect doctest" in the docstring to
# supress this warning.
print_header('Indirect doctests', '-', not no_color and small_header_color)
for md in c_idt:
print(' * ' + md)
print('\n Use \"# indirect doctest\" in the docstring to supress this warning')
if c_sph:
print_header('Not imported into Sphinx', '-', not no_color and small_header_color)
for md in c_sph:
print(' * ' + md)
print_header('FUNCTIONS', '*', not no_color and big_header_color)
if not f:
print_header('No functions found!')
else:
if f_md:
print_header('Missing docstrings', '-', not no_color and small_header_color)
for md in f_md:
print(' * ' + md)
if f_mdt:
print_header('Missing doctests', '-', not no_color and small_header_color)
for md in f_mdt:
print(' * ' + md)
if f_idt:
print_header('Indirect doctests', '-', not no_color and small_header_color)
for md in f_idt:
print(' * ' + md)
print('\n Use \"# indirect doctest\" in the docstring to supress this warning')
if f_sph:
print_header('Not imported into Sphinx', '-', not no_color and small_header_color)
for md in f_sph:
print(' * ' + md)
if verbose:
print('\n' + '-'*70)
print(score_string)
if sphinx:
print(sphinx_score_string)
print('-'*70)
def _is_indirect(member, doc):
""" Given string repr of doc and member checks if the member
contains indirect documentation """
d = member in doc
e = 'indirect doctest' in doc
if not d and not e:
return True
else:
return False
def _get_arg_list(name, fobj):
""" Given a function object, constructs a list of arguments
and their defaults. Takes care of varargs and kwargs """
trunc = 20 # Sometimes argument length can be huge
argspec = inspect.getargspec(fobj)
arg_list = []
if argspec.args:
for arg in argspec.args:
arg_list.append(str(arg))
arg_list.reverse()
# Now add the defaults
if argspec.defaults:
for i in range(len(argspec.defaults)):
arg_list[i] = str(arg_list[i]) + '=' + str(argspec.defaults[-i])
# Get the list in right order
arg_list.reverse()
# Add var args
if argspec.varargs:
arg_list.append(argspec.varargs)
if argspec.keywords:
arg_list.append(argspec.keywords)
# Truncate long arguments
arg_list = [x[:trunc] for x in arg_list]
# Construct the parameter string (enclosed in brackets)
str_param = "%s(%s)" % (name, ', '.join(arg_list))
return str_param
def get_mod_name(path, base):
""" Gets a module name, given the path of file/dir and base
dir of sympy """
rel_path = os.path.relpath(path, base)
# Remove the file extension
rel_path, ign = os.path.splitext(rel_path)
# Replace separators by . for module path
file_module = ""
h, t = os.path.split(rel_path)
while h or t:
if t:
file_module = t + '.' + file_module
h, t = os.path.split(h)
return file_module[:-1]
class FindInSphinx(HTMLParser):
is_imported = []
def handle_starttag(self, tag, attr):
a = dict(attr)
if tag == "div" and a.get('class', None) == "viewcode-block":
self.is_imported.append(a['id'])
def find_sphinx(name, mod_path, found={}):
if mod_path in found: # Cache results
return name in found[mod_path]
doc_path = mod_path.split('.')
doc_path[-1] += '.html'
sphinx_path = os.path.join(sympy_top, 'doc', '_build', 'html', '_modules', *doc_path)
if not os.path.exists(sphinx_path):
return False
with open(sphinx_path) as f:
html_txt = f.read()
p = FindInSphinx()
p.feed(html_txt)
found[mod_path] = p.is_imported
return name in p.is_imported
def process_function(name, c_name, b_obj, mod_path, f_sk, f_md, f_mdt, f_idt,
f_has_doctest, sk_list, sph, sphinx=True):
"""
Processes a function to get information regarding documentation.
It is assume that the function calling this subrouting has already
verified that it is a valid module function.
"""
if name in sk_list:
return False, False
# We add in the end, as inspect.getsourcelines is slow
add_md = False
add_mdt = False
add_idt = False
in_sphinx = True
f_doctest = False
function = False
if inspect.isclass(b_obj):
obj = getattr(b_obj, name)
obj_name = c_name + '.' + name
else:
obj = b_obj
obj_name = name
full_name = _get_arg_list(name, obj)
if name.startswith('_'):
f_sk.append(full_name)
else:
if not obj.__doc__:
add_md = True
elif not '>>>' in obj.__doc__:
add_mdt = True
elif _is_indirect(name, obj.__doc__):
add_idt = True
else:
f_doctest = True
function = True
if sphinx:
in_sphinx = find_sphinx(obj_name, mod_path)
if add_md or add_mdt or add_idt or not in_sphinx:
try:
line_no = inspect.getsourcelines(obj)[1]
except IOError:
# Raised when source does not exist
# which means the function is not there.
return False, False
full_name = "LINE %d: %s" % (line_no, full_name)
if add_md:
f_md.append(full_name)
elif add_mdt:
f_mdt.append(full_name)
elif add_idt:
f_idt.append(full_name)
if not in_sphinx:
sph.append(full_name)
return f_doctest, function
def process_class(c_name, obj, c_sk, c_md, c_mdt, c_idt, c_has_doctest,
mod_path, sph, sphinx=True):
"""
Extracts information about the class regarding documentation.
It is assumed that the function calling this subroutine has already
checked that the class is valid.
"""
# Skip class case
if c_name.startswith('_'):
c_sk.append(c_name)
return False, False, None
c = False
c_dt = False
# Get the line number of class
try:
source, line_no = inspect.getsourcelines(obj)
except IOError:
# Raised when source does not exist
# which means the class is not there.
return False, False, None
c = True
full_name = "LINE %d: %s" % (line_no, c_name)
if not obj.__doc__:
c_md.append(full_name)
elif not '>>>' in obj.__doc__:
c_mdt.append(full_name)
elif _is_indirect(c_name, obj.__doc__):
c_idt.append(full_name)
else:
c_dt = True
c_has_doctest.append(full_name)
in_sphinx = False
if sphinx:
in_sphinx = find_sphinx(c_name, mod_path)
if not in_sphinx:
sph.append(full_name)
return c_dt, c, source
def coverage(module_path, verbose=False, no_color=False, sphinx=True):
""" Given a module path, builds an index of all classes and functions
contained. It then goes through each of the classes/functions to get
the docstring and doctest coverage of the module. """
# Import the package and find members
m = None
try:
__import__(module_path)
m = sys.modules[module_path]
except Exception as a:
# Most likely cause, absence of __init__
print("%s could not be loaded due to %s." % (module_path, repr(a)))
return 0, 0, 0
c_skipped = []
c_md = []
c_mdt = []
c_has_doctest = []
c_idt = []
classes = 0
c_doctests = 0
c_sph = []
f_skipped = []
f_md = []
f_mdt = []
f_has_doctest = []
f_idt = []
functions = 0
f_doctests = 0
f_sph = []
skip_members = ['__abstractmethods__']
# Get the list of members
m_members = dir(m)
for member in m_members:
# Check for skipped functions first, they throw nasty errors
# when combined with getattr
if member in skip_members:
continue
# Identify if the member (class/def) a part of this module
obj = getattr(m, member)
obj_mod = inspect.getmodule(obj)
# Function not a part of this module
if not obj_mod or not obj_mod.__name__ == module_path:
continue
# If it's a function
if inspect.isfunction(obj) or inspect.ismethod(obj):
f_dt, f = process_function(member, '', obj, module_path,
f_skipped, f_md, f_mdt, f_idt, f_has_doctest, skip_members,
f_sph, sphinx=sphinx)
if f:
functions += 1
if f_dt:
f_doctests += 1
# If it's a class, look at it's methods too
elif inspect.isclass(obj):
# Process the class first
c_dt, c, source = process_class(member, obj, c_skipped, c_md,
c_mdt, c_idt, c_has_doctest, module_path, c_sph, sphinx=sphinx)
if not c:
continue
else:
classes += 1
if c_dt:
c_doctests += 1
# Iterate through it's members
for f_name in obj.__dict__:
if f_name in skip_members or f_name.startswith('_'):
continue
# Check if def funcname appears in source
if not ("def " + f_name) in ' '.join(source):
continue
# Identify the module of the current class member
f_obj = getattr(obj, f_name)
obj_mod = inspect.getmodule(f_obj)
# Function not a part of this module
if not obj_mod or not obj_mod.__name__ == module_path:
continue
# If it's a function
if inspect.isfunction(f_obj) or inspect.ismethod(f_obj):
f_dt, f = process_function(f_name, member, obj,
module_path, f_skipped, f_md, f_mdt, f_idt, f_has_doctest,
skip_members, f_sph, sphinx=sphinx)
if f:
functions += 1
if f_dt:
f_doctests += 1
# Evaluate the percent coverage
total_doctests = c_doctests + f_doctests
total_members = classes + functions
if total_members:
score = 100 * float(total_doctests) / (total_members)
else:
score = 100
score = int(score)
if sphinx:
total_sphinx = len(c_sph) + len(f_sph)
if total_members:
sphinx_score = 100 - 100 * float(total_sphinx) / total_members
else:
sphinx_score = 100
sphinx_score = int(sphinx_score)
else:
total_sphinx = 0
sphinx_score = 0
# Sort functions/classes by line number
c_md = sorted(c_md, key=lambda x: int(x.split()[1][:-1]))
c_mdt = sorted(c_mdt, key=lambda x: int(x.split()[1][:-1]))
c_idt = sorted(c_idt, key=lambda x: int(x.split()[1][:-1]))
f_md = sorted(f_md, key=lambda x: int(x.split()[1][:-1]))
f_mdt = sorted(f_mdt, key=lambda x: int(x.split()[1][:-1]))
f_idt = sorted(f_idt, key=lambda x: int(x.split()[1][:-1]))
print_coverage(module_path, classes, c_md, c_mdt, c_idt, c_sph, functions, f_md,
f_mdt, f_idt, f_sph, score, total_doctests, total_members,
sphinx_score, total_sphinx, verbose=verbose,
no_color=no_color, sphinx=sphinx)
return total_doctests, total_sphinx, total_members
def go(sympy_top, file, verbose=False, no_color=False, exact=True, sphinx=True):
if os.path.isdir(file):
doctests, total_sphinx, num_functions = 0, 0, 0
for F in os.listdir(file):
_doctests, _total_sphinx, _num_functions = go(sympy_top, '%s/%s' % (file, F),
verbose=verbose, no_color=no_color, exact=False, sphinx=sphinx)
doctests += _doctests
total_sphinx += _total_sphinx
num_functions += _num_functions
return doctests, total_sphinx, num_functions
if (not (file.endswith('.py') or file.endswith('.pyx')) or
file.endswith('__init__.py') or
not exact and ('test_' in file or 'bench_' in file or
any(name in file for name in skip_paths))):
return 0, 0, 0
if not os.path.exists(file):
print("File(%s does not exist." % file)
sys.exit(1)
# Relpath for constructing the module name
return coverage(get_mod_name(file, sympy_top), verbose=verbose,
no_color=no_color, sphinx=sphinx)
if __name__ == "__main__":
bintest_dir = os.path.abspath(os.path.dirname(__file__)) # bin/cover...
sympy_top = os.path.split(bintest_dir)[0] # ../
sympy_dir = os.path.join(sympy_top, 'sympy') # ../sympy/
if os.path.isdir(sympy_dir):
sys.path.insert(0, sympy_top)
usage = "usage: ./bin/doctest_coverage.py PATHS"
parser = ArgumentParser(
description=__doc__,
usage=usage,
formatter_class=RawDescriptionHelpFormatter,
)
parser.add_argument("path", nargs='*', default=[os.path.join(sympy_top, 'sympy')])
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
default=False)
parser.add_argument("--no-colors", action="store_true", dest="no_color",
help="use no colors", default=False)
parser.add_argument("--no-sphinx", action="store_false", dest="sphinx",
help="don't report Sphinx coverage", default=True)
args = parser.parse_args()
if args.sphinx and not os.path.exists(os.path.join(sympy_top, 'doc', '_build', 'html')):
print("""
Cannot check Sphinx coverage without a documentation build. To build the
docs, run "cd doc; make html". To skip checking Sphinx coverage, pass --no-sphinx.
""")
sys.exit(1)
full_coverage = True
for file in args.path:
file = os.path.normpath(file)
print('DOCTEST COVERAGE for %s' % (file))
print('='*70)
print()
doctests, total_sphinx, num_functions = go(sympy_top, file, verbose=args.verbose,
no_color=args.no_color, sphinx=args.sphinx)
if num_functions == 0:
score = 100
sphinx_score = 100
else:
score = 100 * float(doctests) / num_functions
score = int(score)
if doctests < num_functions:
full_coverage = False
if args.sphinx:
sphinx_score = 100 - 100 * float(total_sphinx) / num_functions
sphinx_score = int(sphinx_score)
if total_sphinx > 0:
full_coverage = False
print()
print('='*70)
if args.no_color:
print("TOTAL DOCTEST SCORE for %s: %s%% (%s of %s)" % \
(get_mod_name(file, sympy_top), score, doctests, num_functions))
elif score < 100:
print("TOTAL DOCTEST SCORE for %s: %s%s%% (%s of %s)%s" % \
(get_mod_name(file, sympy_top), c_color % (colors["Red"]),
score, doctests, num_functions, c_normal))
else:
print("TOTAL DOCTEST SCORE for %s: %s%s%% (%s of %s)%s" % \
(get_mod_name(file, sympy_top), c_color % (colors["Green"]),
score, doctests, num_functions, c_normal))
if args.sphinx:
if args.no_color:
print("TOTAL SPHINX SCORE for %s: %s%% (%s of %s)" % \
(get_mod_name(file, sympy_top), sphinx_score,
num_functions - total_sphinx, num_functions))
elif sphinx_score < 100:
print("TOTAL SPHINX SCORE for %s: %s%s%% (%s of %s)%s" % \
(get_mod_name(file, sympy_top), c_color % (colors["Red"]),
sphinx_score, num_functions - total_sphinx, num_functions, c_normal))
else:
print("TOTAL SPHINX SCORE for %s: %s%s%% (%s of %s)%s" % \
(get_mod_name(file, sympy_top), c_color % (colors["Green"]),
sphinx_score, num_functions - total_sphinx, num_functions, c_normal))
print()
sys.exit(not full_coverage)
|
codepython/restcommander | refs/heads/master | play-1.2.4/python/Lib/compiler/__init__.py | 58 | """Package for parsing and compiling Python source code
There are several functions defined at the top level that are imported
from modules contained in the package.
parse(buf, mode="exec") -> AST
Converts a string containing Python source code to an abstract
syntax tree (AST). The AST is defined in compiler.ast.
parseFile(path) -> AST
The same as parse(open(path))
walk(ast, visitor, verbose=None)
Does a pre-order walk over the ast using the visitor instance.
See compiler.visitor for details.
compile(source, filename, mode, flags=None, dont_inherit=None)
Returns a code object. A replacement for the builtin compile() function.
compileFile(filename)
Generates a .pyc file by compiling filename.
"""
from warnings import warnpy3k
warnpy3k("the compiler package has been removed in Python 3.0", stacklevel=2)
del warnpy3k
from compiler.transformer import parse, parseFile
from compiler.visitor import walk
from compiler.pycodegen import compile, compileFile
|
getsentry/disqus-postgres | refs/heads/master | src/dsq_postgres/exceptions.py | 2 | """
dsq_postgres.exceptions
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import psycopg2
import traceback
class CompositeTraceback(object):
def __init__(self, tb_list):
assert isinstance(tb_list, (list, tuple))
self.__tb_list = tb_list
self.__iterator = iter(self)
def __iter__(self):
for tb in self.__tb_list:
print repr(tb)
while tb:
self.__curframe = tb
tb = tb.tb_next
print '*', repr(tb)
yield tb
def tb_frame(self):
return self.__curframe.tb_frame
def tb_lasti(self):
return self.__curframe.tb_lasti
def tb_lineno(self):
return self.__curframe.tb_lineno
def tb_next(self):
self.__iterator.next()
return self
class TransactionAborted(psycopg2.DatabaseError):
def __init__(self, exc_info, cur_exc_info):
self.exc_info = exc_info
self.cur_exc_info = cur_exc_info
def __repr__(self):
return '\n'.join(traceback.format_exception(self.__class__, self, self.get_traceback()))
def __str__(self):
return str(unicode(self))
def __unicode__(self):
return u'(%s) %s' % (self.cur_exc_info[0].__name__, self.cur_exc_info[1])
def get_traceback(self):
return CompositeTraceback([self.exc_info[2], self.cur_exc_info[2]])
|
botswana-harvard/microbiome | refs/heads/develop | microbiome/apps/mb_maternal/forms/maternal_visit_form.py | 1 | from django import forms
from django.contrib.admin.widgets import AdminRadioSelect, AdminRadioFieldRenderer
from edc_base.form.forms import BaseModelForm
from microbiome.apps.mb.choices import VISIT_REASON, VISIT_INFO_SOURCE, MATERNAL_VISIT_STUDY_STATUS, INFO_PROVIDER
from ..models import MaternalVisit, MaternalConsent
from edc_constants.constants import ON_STUDY, MISSED_VISIT
from edc_visit_tracking.forms import VisitFormMixin
class MaternalVisitForm (VisitFormMixin, BaseModelForm):
participant_label = 'mother'
study_status = forms.ChoiceField(
label='What is the mother\'s current study status',
choices=MATERNAL_VISIT_STUDY_STATUS,
initial=ON_STUDY,
help_text="",
widget=AdminRadioSelect(renderer=AdminRadioFieldRenderer))
reason = forms.ChoiceField(
label='Reason for visit',
choices=[choice for choice in VISIT_REASON],
help_text="",
widget=AdminRadioSelect(renderer=AdminRadioFieldRenderer))
info_source = forms.ChoiceField(
label='Source of information',
required=False,
choices=[choice for choice in VISIT_INFO_SOURCE],
widget=AdminRadioSelect(renderer=AdminRadioFieldRenderer))
def clean(self):
cleaned_data = super(MaternalVisitForm, self).clean()
instance = None
if self.instance.id:
instance = self.instance
else:
instance = MaternalVisit(**self.cleaned_data)
instance.subject_failed_eligibility(forms.ValidationError)
return cleaned_data
class Meta:
model = MaternalVisit
fields = '__all__'
|
2014c2g2/teamwork | refs/heads/master | wsgi/static/reeborg/src/libraries/brython/Lib/unittest/test/test_break.py | 785 | import gc
import io
import os
import sys
import signal
import weakref
import unittest
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreak(unittest.TestCase):
def setUp(self):
self._default_handler = signal.getsignal(signal.SIGINT)
def tearDown(self):
signal.signal(signal.SIGINT, self._default_handler)
unittest.signals._results = weakref.WeakKeyDictionary()
unittest.signals._interrupt_handler = None
def testInstallHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(unittest.signals._interrupt_handler.called)
def testRegisterResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
for ref in unittest.signals._results:
if ref is result:
break
elif ref is not result:
self.fail("odd object in result set")
else:
self.fail("result not found")
def testInterruptCaught(self):
default_handler = signal.getsignal(signal.SIGINT)
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.breakCaught)
def testSecondInterrupt(self):
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
os.kill(pid, signal.SIGINT)
self.fail("Second KeyboardInterrupt not raised")
try:
test(result)
except KeyboardInterrupt:
pass
else:
self.fail("Second KeyboardInterrupt not raised")
self.assertTrue(result.breakCaught)
def testTwoResults(self):
unittest.installHandler()
result = unittest.TestResult()
unittest.registerResult(result)
new_handler = signal.getsignal(signal.SIGINT)
result2 = unittest.TestResult()
unittest.registerResult(result2)
self.assertEqual(signal.getsignal(signal.SIGINT), new_handler)
result3 = unittest.TestResult()
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.shouldStop)
self.assertTrue(result2.shouldStop)
self.assertFalse(result3.shouldStop)
def testHandlerReplacedButCalled(self):
# If our handler has been replaced (is no longer installed) but is
# called by the *new* handler, then it isn't safe to delay the
# SIGINT and we should immediately delegate to the default handler
unittest.installHandler()
handler = signal.getsignal(signal.SIGINT)
def new_handler(frame, signum):
handler(frame, signum)
signal.signal(signal.SIGINT, new_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
else:
self.fail("replaced but delegated handler doesn't raise interrupt")
def testRunner(self):
# Creating a TextTestRunner with the appropriate argument should
# register the TextTestResult it creates
runner = unittest.TextTestRunner(stream=io.StringIO())
result = runner.run(unittest.TestSuite())
self.assertIn(result, unittest.signals._results)
def testWeakReferences(self):
# Calling registerResult on a result should not keep it alive
result = unittest.TestResult()
unittest.registerResult(result)
ref = weakref.ref(result)
del result
# For non-reference counting implementations
gc.collect();gc.collect()
self.assertIsNone(ref())
def testRemoveResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
unittest.installHandler()
self.assertTrue(unittest.removeResult(result))
# Should this raise an error instead?
self.assertFalse(unittest.removeResult(unittest.TestResult()))
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
self.assertFalse(result.shouldStop)
def testMainInstallsHandler(self):
failfast = object()
test = object()
verbosity = object()
result = object()
default_handler = signal.getsignal(signal.SIGINT)
class FakeRunner(object):
initArgs = []
runArgs = []
def __init__(self, *args, **kwargs):
self.initArgs.append((args, kwargs))
def run(self, test):
self.runArgs.append(test)
return result
class Program(unittest.TestProgram):
def __init__(self, catchbreak):
self.exit = False
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.testRunner = FakeRunner
self.test = test
self.result = None
p = Program(False)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
FakeRunner.initArgs = []
FakeRunner.runArgs = []
p = Program(True)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
# check that calling removeHandler multiple times has no ill-effect
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandlerAsDecorator(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
@unittest.removeHandler
def test():
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
test()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
|
Buggaboo/gimp-plugin-export-layers | refs/heads/master | export_layers/pygimplib/tests/__init__.py | 1 | #-------------------------------------------------------------------------------
#
# This file is part of pygimplib.
#
# Copyright (C) 2014, 2015 khalim19 <[email protected]>
#
# pygimplib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pygimplib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygimplib. If not, see <http://www.gnu.org/licenses/>.
#
#-------------------------------------------------------------------------------
# empty
|
jart/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/softmax_op_test.py | 12 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SoftmaxOp and LogSoftmaxOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class SoftmaxTest(test.TestCase):
def _npSoftmax(self, features, dim=-1, log=False):
if dim is -1:
dim = len(features.shape) - 1
one_only_on_dim = list(features.shape)
one_only_on_dim[dim] = 1
is_fp16 = features.dtype == np.float16
if is_fp16:
# Do the compute in fp32 and cast the input back to fp32.
features = features.astype(np.float32)
e = np.exp(features - np.reshape(
np.amax(
features, axis=dim), one_only_on_dim))
softmax = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
if log:
res = np.log(softmax)
else:
res = softmax
if is_fp16:
res = res.astype(np.float16)
return res
def _testSoftmax(self, np_features, dim=-1, log=False, use_gpu=False):
# A previous version of the code checked the op name rather than the op type
# to distinguish between log and non-log. Use an arbitrary name to catch
# this bug in future.
name = "arbitrary"
np_softmax = self._npSoftmax(np_features, dim=dim, log=log)
with self.test_session(use_gpu=use_gpu):
if log:
tf_softmax = nn_ops.log_softmax(np_features, axis=dim, name=name)
else:
tf_softmax = nn_ops.softmax(np_features, axis=dim, name=name)
out = tf_softmax.eval()
self.assertAllCloseAccordingToType(np_softmax, out)
self.assertShapeEqual(np_softmax, tf_softmax)
if not log:
# Bonus check: the softmaxes should add to one in dimension dim.
sum_along_dim = np.sum(out, axis=dim)
self.assertAllCloseAccordingToType(
np.ones(sum_along_dim.shape), sum_along_dim)
def _testAll(self, features):
self._testSoftmax(features, use_gpu=True)
self._testSoftmax(features, log=True, use_gpu=True)
self._testOverflow(use_gpu=True)
def testNpSoftmax(self):
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
# Batch 0: All exps are 1. The expected result is
# Softmaxes = [0.25, 0.25, 0.25, 0.25]
# LogSoftmaxes = [-1.386294, -1.386294, -1.386294, -1.386294]
#
# Batch 1:
# exps = [1., 2.718, 7.389, 20.085]
# sum = 31.192
# Softmaxes = exps / sum = [0.0320586, 0.08714432, 0.23688282, 0.64391426]
# LogSoftmaxes = [-3.44019 , -2.44019 , -1.44019 , -0.44019]
np_sm = self._npSoftmax(np.array(features))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, 0.25],
[0.0320586, 0.08714432, 0.23688282, 0.64391426]]),
np_sm,
rtol=1.e-5,
atol=1.e-5)
np_lsm = self._npSoftmax(np.array(features), log=True)
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[-3.4401897, -2.4401897, -1.4401897, -0.4401897]]),
np_lsm,
rtol=1.e-5,
atol=1.e-5)
def _testOverflow(self, use_gpu=False):
if use_gpu:
type = np.float32 # pylint: disable=redefined-builtin
else:
type = np.float64 # pylint: disable=redefined-builtin
max = np.finfo(type).max # pylint: disable=redefined-builtin
features = np.array([[1., 1., 1., 1.], [max, 1., 2., 3.]]).astype(type)
with self.test_session(use_gpu=use_gpu):
tf_log_softmax = nn_ops.log_softmax(features)
out = tf_log_softmax.eval()
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[0, -max, -max, -max]]),
out,
rtol=1.e-5,
atol=1.e-5)
def testFloat(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32))
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testFloatGPU(self):
if test.is_gpu_available(cuda_only=True):
rows = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
cols = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
for row, col in zip(rows, cols):
logging.info("Testing softmax float dtype in shape [%d, %d]", row, col)
data = np.random.rand(row, col)
self._testAll(data.astype(np.float32))
def testHalf(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16))
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testHalfGPU(self):
if test.is_gpu_available(cuda_only=True):
rows = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
cols = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
for row, col in zip(rows, cols):
logging.info("Testing softmax half dtype in shape [%d, %d]", row, col)
data = np.random.rand(row, col)
self._testAll(data.astype(np.float16))
def testDouble(self):
self._testSoftmax(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64))
self._testOverflow()
def test1DTesnorAsInput(self):
self._testSoftmax(
np.array([3., 2., 3., 9.]).astype(np.float64), use_gpu=False)
self._testOverflow(use_gpu=False)
def test3DTensorAsInput(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
use_gpu=False)
self._testOverflow(use_gpu=False)
def testAlongFirstDimension(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
dim=0,
use_gpu=False)
self._testOverflow(use_gpu=False)
def testAlongSecondDimension(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
dim=1,
use_gpu=False)
self._testOverflow(use_gpu=False)
def testShapeInference(self):
op = nn_ops.softmax([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]])
self.assertEqual([3, 2, 4], op.get_shape())
def testEmptyInput(self):
with self.test_session():
x = array_ops.placeholder(dtypes.float32, shape=[0, 3])
self.assertEqual(0, array_ops.size(x).eval())
# reshape would raise if logits is empty
with self.assertRaises(errors_impl.InvalidArgumentError):
nn_ops.softmax(x, axis=0).eval()
def testDimTooLarge(self):
with self.test_session():
# Use placeholder to make sure we get runtime error instead of shape
# inference error.
dim = array_ops.placeholder_with_default(100, shape=[])
with self.assertRaises(errors_impl.InvalidArgumentError):
nn_ops.softmax([1., 2., 3., 4.], axis=dim).eval()
def testLargeDims(self):
# Make sure that we properly handle large inputs. See
# https://github.com/tensorflow/tensorflow/issues/4425 for details
for dims in [129, 256]:
ones = np.random.rand(dims, dims).astype(np.float32)
np_softmax = self._npSoftmax(ones)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = array_ops.placeholder(dtypes.float32)
y = nn_ops.softmax(x)
tf_softmax = sess.run(y, feed_dict={x: ones})
self.assertAllClose(tf_softmax, np_softmax)
if __name__ == "__main__":
test.main()
|
vikigenius/ns3-mmwave | refs/heads/master | src/internet/bindings/modulegen__gcc_LP64.py | 30 | null |
DirtyPiece/dancestudio | refs/heads/master | Build/Tools/Python27/Lib/test/test_traceback.py | 26 | """Test cases for traceback module"""
from StringIO import StringIO
import sys
import unittest
from imp import reload
from test.test_support import run_unittest, is_jython, Error, cpython_only
import traceback
class TracebackCases(unittest.TestCase):
# For now, a very minimal set of tests. I want to be sure that
# formatting of SyntaxErrors works based on changes for 2.1.
def get_exception_format(self, func, exc):
try:
func()
except exc, value:
return traceback.format_exception_only(exc, value)
else:
raise ValueError, "call did not raise exception"
def syntax_error_with_caret(self):
compile("def fact(x):\n\treturn x!\n", "?", "exec")
def syntax_error_with_caret_2(self):
compile("1 +\n", "?", "exec")
def syntax_error_without_caret(self):
# XXX why doesn't compile raise the same traceback?
import test.badsyntax_nocaret
def syntax_error_bad_indentation(self):
compile("def spam():\n print 1\n print 2", "?", "exec")
def syntax_error_bad_indentation2(self):
compile(" print(2)", "?", "exec")
def test_caret(self):
err = self.get_exception_format(self.syntax_error_with_caret,
SyntaxError)
self.assertTrue(len(err) == 4)
self.assertTrue(err[1].strip() == "return x!")
self.assertIn("^", err[2]) # third line has caret
self.assertTrue(err[1].find("!") == err[2].find("^")) # in the right place
err = self.get_exception_format(self.syntax_error_with_caret_2,
SyntaxError)
self.assertIn("^", err[2]) # third line has caret
self.assertTrue(err[2].count('\n') == 1) # and no additional newline
self.assertTrue(err[1].find("+") == err[2].find("^")) # in the right place
def test_nocaret(self):
if is_jython:
# jython adds a caret in this case (why shouldn't it?)
return
err = self.get_exception_format(self.syntax_error_without_caret,
SyntaxError)
self.assertTrue(len(err) == 3)
self.assertTrue(err[1].strip() == "[x for x in x] = x")
def test_bad_indentation(self):
err = self.get_exception_format(self.syntax_error_bad_indentation,
IndentationError)
self.assertTrue(len(err) == 4)
self.assertTrue(err[1].strip() == "print 2")
self.assertIn("^", err[2])
self.assertTrue(err[1].find("2") == err[2].find("^"))
def test_bug737473(self):
import os, tempfile, time
savedpath = sys.path[:]
testdir = tempfile.mkdtemp()
try:
sys.path.insert(0, testdir)
testfile = os.path.join(testdir, 'test_bug737473.py')
print >> open(testfile, 'w'), """
def test():
raise ValueError"""
if 'test_bug737473' in sys.modules:
del sys.modules['test_bug737473']
import test_bug737473
try:
test_bug737473.test()
except ValueError:
# this loads source code to linecache
traceback.extract_tb(sys.exc_traceback)
# If this test runs too quickly, test_bug737473.py's mtime
# attribute will remain unchanged even if the file is rewritten.
# Consequently, the file would not reload. So, added a sleep()
# delay to assure that a new, distinct timestamp is written.
# Since WinME with FAT32 has multisecond resolution, more than
# three seconds are needed for this test to pass reliably :-(
time.sleep(4)
print >> open(testfile, 'w'), """
def test():
raise NotImplementedError"""
reload(test_bug737473)
try:
test_bug737473.test()
except NotImplementedError:
src = traceback.extract_tb(sys.exc_traceback)[-1][-1]
self.assertEqual(src, 'raise NotImplementedError')
finally:
sys.path[:] = savedpath
for f in os.listdir(testdir):
os.unlink(os.path.join(testdir, f))
os.rmdir(testdir)
err = self.get_exception_format(self.syntax_error_bad_indentation2,
IndentationError)
self.assertEqual(len(err), 4)
self.assertEqual(err[1].strip(), "print(2)")
self.assertIn("^", err[2])
self.assertEqual(err[1].find("p"), err[2].find("^"))
def test_base_exception(self):
# Test that exceptions derived from BaseException are formatted right
e = KeyboardInterrupt()
lst = traceback.format_exception_only(e.__class__, e)
self.assertEqual(lst, ['KeyboardInterrupt\n'])
# String exceptions are deprecated, but legal. The quirky form with
# separate "type" and "value" tends to break things, because
# not isinstance(value, type)
# and a string cannot be the first argument to issubclass.
#
# Note that sys.last_type and sys.last_value do not get set if an
# exception is caught, so we sort of cheat and just emulate them.
#
# test_string_exception1 is equivalent to
#
# >>> raise "String Exception"
#
# test_string_exception2 is equivalent to
#
# >>> raise "String Exception", "String Value"
#
def test_string_exception1(self):
str_type = "String Exception"
err = traceback.format_exception_only(str_type, None)
self.assertEqual(len(err), 1)
self.assertEqual(err[0], str_type + '\n')
def test_string_exception2(self):
str_type = "String Exception"
str_value = "String Value"
err = traceback.format_exception_only(str_type, str_value)
self.assertEqual(len(err), 1)
self.assertEqual(err[0], str_type + ': ' + str_value + '\n')
def test_format_exception_only_bad__str__(self):
class X(Exception):
def __str__(self):
1 // 0
err = traceback.format_exception_only(X, X())
self.assertEqual(len(err), 1)
str_value = '<unprintable %s object>' % X.__name__
self.assertEqual(err[0], X.__name__ + ': ' + str_value + '\n')
def test_without_exception(self):
err = traceback.format_exception_only(None, None)
self.assertEqual(err, ['None\n'])
def test_unicode(self):
err = AssertionError('\xff')
lines = traceback.format_exception_only(type(err), err)
self.assertEqual(lines, ['AssertionError: \xff\n'])
err = AssertionError(u'\xe9')
lines = traceback.format_exception_only(type(err), err)
self.assertEqual(lines, ['AssertionError: \\xe9\n'])
class TracebackFormatTests(unittest.TestCase):
@cpython_only
def test_traceback_format(self):
from _testcapi import traceback_print
try:
raise KeyError('blah')
except KeyError:
type_, value, tb = sys.exc_info()
traceback_fmt = 'Traceback (most recent call last):\n' + \
''.join(traceback.format_tb(tb))
file_ = StringIO()
traceback_print(tb, file_)
python_fmt = file_.getvalue()
else:
raise Error("unable to create test traceback string")
# Make sure that Python and the traceback module format the same thing
self.assertEqual(traceback_fmt, python_fmt)
# Make sure that the traceback is properly indented.
tb_lines = python_fmt.splitlines()
self.assertEqual(len(tb_lines), 3)
banner, location, source_line = tb_lines
self.assertTrue(banner.startswith('Traceback'))
self.assertTrue(location.startswith(' File'))
self.assertTrue(source_line.startswith(' raise'))
def test_main():
run_unittest(TracebackCases, TracebackFormatTests)
if __name__ == "__main__":
test_main()
|
xuleiboy1234/autoTitle | refs/heads/master | tensorflow/tensorflow/python/grappler/cost_analyzer_test.py | 40 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the cost analyzer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.grappler import cost_analyzer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class PyWrapOptimizeGraphTest(test.TestCase):
def testBasic(self):
"""Make sure arguments can be passed correctly."""
a = constant_op.constant(10, name="a")
b = constant_op.constant(20, name="b")
c = math_ops.add_n([a, b], name="c")
d = math_ops.add_n([b, c], name="d")
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(d)
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
report = cost_analyzer.GenerateCostReport(mg)
# Check the report headers
self.assertTrue(b"Total time measured in ns (serialized):" in report)
self.assertTrue(b"Total time measured in ns (actual):" in report)
self.assertTrue(b"Total time analytical in ns (upper bound):" in report)
self.assertTrue(b"Total time analytical in ns (lower bound):" in report)
self.assertTrue(b"Overall efficiency (analytical upper/actual):" in report)
self.assertTrue(b"Overall efficiency (analytical lower/actual):" in report)
# Also print the report to make it easier to debug
print("{}".format(report))
def testSmallNetwork(self):
image = array_ops.placeholder(dtypes.float32, shape=[1, 28, 28, 1])
label = array_ops.placeholder(dtypes.float32, shape=[1, 10])
w = variables.Variable(
random_ops.truncated_normal([5, 5, 1, 32], stddev=0.1))
b = variables.Variable(random_ops.truncated_normal([32], stddev=0.1))
conv = nn_ops.conv2d(image, w, strides=[1, 1, 1, 1], padding="SAME")
h_conv = nn_ops.relu(conv + b)
h_conv_flat = array_ops.reshape(h_conv, [1, -1])
w_fc = variables.Variable(
random_ops.truncated_normal([25088, 10], stddev=0.1))
b_fc = variables.Variable(random_ops.truncated_normal([10], stddev=0.1))
y_conv = nn_ops.softmax(math_ops.matmul(h_conv_flat, w_fc) + b_fc)
cross_entropy = math_ops.reduce_mean(-math_ops.reduce_sum(
label * math_ops.log(y_conv), reduction_indices=[1]))
_ = adam.AdamOptimizer(1e-4).minimize(cross_entropy)
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
report = cost_analyzer.GenerateCostReport(mg)
# Print the report to make it easier to debug
print("{}".format(report))
self.assertTrue(b"MatMul" in report)
self.assertTrue(b"ApplyAdam" in report)
self.assertTrue(b"Conv2D" in report)
self.assertTrue(b"Conv2DBackpropInput" in report)
self.assertTrue(b"Conv2DBackpropFilter" in report)
self.assertTrue(b"Softmax" in report)
for op_type in [
b"MatMul", b"Conv2D", b"Conv2DBackpropInput", b"Conv2DBackpropFilter"
]:
matcher = re.compile(
br"\s+" + op_type + br",\s*(\d+),\s*(\d+),\s*([\d\.eE+-]+)%,\s*" +
br"([\d\.eE+-]+)%,\s*(-?\d+),\s*(\d+),", re.MULTILINE)
m = matcher.search(report)
op_count = int(m.group(1))
# upper = int(m.group(5))
lower = int(m.group(6))
if op_type is b"MatMul":
self.assertEqual(3, op_count)
else:
self.assertEqual(1, op_count)
self.assertTrue(0 <= lower)
# self.assertTrue(0 < upper)
# self.assertTrue(lower <= upper)
if __name__ == "__main__":
test.main()
|
simonwydooghe/ansible | refs/heads/devel | lib/ansible/module_utils/facts/system/local.py | 73 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import glob
import json
import os
import stat
from ansible.module_utils.six.moves import configparser
from ansible.module_utils.six.moves import StringIO
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.collector import BaseFactCollector
class LocalFactCollector(BaseFactCollector):
name = 'local'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
local_facts = {}
local_facts['local'] = {}
if not module:
return local_facts
fact_path = module.params.get('fact_path', None)
if not fact_path or not os.path.exists(fact_path):
return local_facts
local = {}
for fn in sorted(glob.glob(fact_path + '/*.fact')):
# where it will sit under local facts
fact_base = os.path.basename(fn).replace('.fact', '')
if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
# run it
# try to read it as json first
# if that fails read it with ConfigParser
# if that fails, skip it
try:
rc, out, err = module.run_command(fn)
except UnicodeError:
fact = 'error loading fact - output of running %s was not utf-8' % fn
local[fact_base] = fact
local_facts['local'] = local
module.warn(fact)
return local_facts
else:
out = get_file_content(fn, default='')
# load raw json
fact = 'loading %s' % fact_base
try:
fact = json.loads(out)
except ValueError:
# load raw ini
cp = configparser.ConfigParser()
try:
cp.readfp(StringIO(out))
except configparser.Error:
fact = "error loading fact - please check content"
module.warn(fact)
else:
fact = {}
for sect in cp.sections():
if sect not in fact:
fact[sect] = {}
for opt in cp.options(sect):
val = cp.get(sect, opt)
fact[sect][opt] = val
local[fact_base] = fact
local_facts['local'] = local
return local_facts
|
gminds/rapidnewsng | refs/heads/master | django/contrib/gis/geos/geometry.py | 103 | """
This module contains the 'base' GEOSGeometry object -- all GEOS Geometries
inherit from this object.
"""
from __future__ import unicode_literals
# Python, ctypes and types dependencies.
from ctypes import addressof, byref, c_double
from django.contrib.gis import memoryview
# super-class for mutable list behavior
from django.contrib.gis.geos.mutable_list import ListMixin
# GEOS-related dependencies.
from django.contrib.gis.geos.base import GEOSBase, gdal
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOS_PREPARE
from django.contrib.gis.geos.mutable_list import ListMixin
# All other functions in this module come from the ctypes
# prototypes module -- which handles all interaction with
# the underlying GEOS library.
from django.contrib.gis.geos import prototypes as capi
# These functions provide access to a thread-local instance
# of their corresponding GEOS I/O class.
from django.contrib.gis.geos.prototypes.io import wkt_r, wkt_w, wkb_r, wkb_w, ewkb_w
# For recognizing geometry input.
from django.contrib.gis.geometry.regex import hex_regex, wkt_regex, json_regex
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class GEOSGeometry(GEOSBase, ListMixin):
"A class that, generally, encapsulates a GEOS geometry."
# Raise GEOSIndexError instead of plain IndexError
# (see ticket #4740 and GEOSIndexError docstring)
_IndexError = GEOSIndexError
ptr_type = GEOM_PTR
#### Python 'magic' routines ####
def __init__(self, geo_input, srid=None):
"""
The base constructor for GEOS geometry objects, and may take the
following inputs:
* strings:
- WKT
- HEXEWKB (a PostGIS-specific canonical form)
- GeoJSON (requires GDAL)
* buffer:
- WKB
The `srid` keyword is used to specify the Source Reference Identifier
(SRID) number for this Geometry. If not set, the SRID will be None.
"""
if isinstance(geo_input, bytes):
geo_input = force_text(geo_input)
if isinstance(geo_input, six.string_types):
wkt_m = wkt_regex.match(geo_input)
if wkt_m:
# Handling WKT input.
if wkt_m.group('srid'): srid = int(wkt_m.group('srid'))
g = wkt_r().read(force_bytes(wkt_m.group('wkt')))
elif hex_regex.match(geo_input):
# Handling HEXEWKB input.
g = wkb_r().read(force_bytes(geo_input))
elif gdal.HAS_GDAL and json_regex.match(geo_input):
# Handling GeoJSON input.
g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
else:
raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
elif isinstance(geo_input, GEOM_PTR):
# When the input is a pointer to a geomtry (GEOM_PTR).
g = geo_input
elif isinstance(geo_input, memoryview):
# When the input is a buffer (WKB).
g = wkb_r().read(geo_input)
elif isinstance(geo_input, GEOSGeometry):
g = capi.geom_clone(geo_input.ptr)
else:
# Invalid geometry type.
raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))
if bool(g):
# Setting the pointer object with a valid pointer.
self.ptr = g
else:
raise GEOSException('Could not initialize GEOS Geometry with given input.')
# Post-initialization setup.
self._post_init(srid)
def _post_init(self, srid):
"Helper routine for performing post-initialization setup."
# Setting the SRID, if given.
if srid and isinstance(srid, int): self.srid = srid
# Setting the class type (e.g., Point, Polygon, etc.)
self.__class__ = GEOS_CLASSES[self.geom_typeid]
# Setting the coordinate sequence for the geometry (will be None on
# geometries that do not have coordinate sequences)
self._set_cs()
def __del__(self):
"""
Destroys this Geometry; in other words, frees the memory used by the
GEOS C++ object.
"""
if self._ptr: capi.destroy_geom(self._ptr)
def __copy__(self):
"""
Returns a clone because the copy of a GEOSGeometry may contain an
invalid pointer location if the original is garbage collected.
"""
return self.clone()
def __deepcopy__(self, memodict):
"""
The `deepcopy` routine is used by the `Node` class of django.utils.tree;
thus, the protocol routine needs to be implemented to return correct
copies (clones) of these GEOS objects, which use C pointers.
"""
return self.clone()
def __str__(self):
"WKT is used for the string representation."
return self.wkt
def __repr__(self):
"Short-hand representation because WKT may be very large."
return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr)))
# Pickling support
def __getstate__(self):
# The pickled state is simply a tuple of the WKB (in string form)
# and the SRID.
return bytes(self.wkb), self.srid
def __setstate__(self, state):
# Instantiating from the tuple state that was pickled.
wkb, srid = state
ptr = wkb_r().read(memoryview(wkb))
if not ptr: raise GEOSException('Invalid Geometry loaded from pickled state.')
self.ptr = ptr
self._post_init(srid)
# Comparison operators
def __eq__(self, other):
"""
Equivalence testing, a Geometry may be compared with another Geometry
or a WKT representation.
"""
if isinstance(other, six.string_types):
return self.wkt == other
elif isinstance(other, GEOSGeometry):
return self.equals_exact(other)
else:
return False
def __ne__(self, other):
"The not equals operator."
return not (self == other)
### Geometry set-like operations ###
# Thanks to Sean Gillies for inspiration:
# http://lists.gispython.org/pipermail/community/2007-July/001034.html
# g = g1 | g2
def __or__(self, other):
"Returns the union of this Geometry and the other."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
#### Coordinate Sequence Routines ####
@property
def has_cs(self):
"Returns True if this Geometry has a coordinate sequence, False if not."
# Only these geometries are allowed to have coordinate sequences.
if isinstance(self, (Point, LineString, LinearRing)):
return True
else:
return False
def _set_cs(self):
"Sets the coordinate sequence for this Geometry."
if self.has_cs:
self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz)
else:
self._cs = None
@property
def coord_seq(self):
"Returns a clone of the coordinate sequence for this Geometry."
if self.has_cs:
return self._cs.clone()
#### Geometry Info ####
@property
def geom_type(self):
"Returns a string representing the Geometry type, e.g. 'Polygon'"
return capi.geos_type(self.ptr).decode()
@property
def geom_typeid(self):
"Returns an integer representing the Geometry type."
return capi.geos_typeid(self.ptr)
@property
def num_geom(self):
"Returns the number of geometries in the Geometry."
return capi.get_num_geoms(self.ptr)
@property
def num_coords(self):
"Returns the number of coordinates in the Geometry."
return capi.get_num_coords(self.ptr)
@property
def num_points(self):
"Returns the number points, or coordinates, in the Geometry."
return self.num_coords
@property
def dims(self):
"Returns the dimension of this Geometry (0=point, 1=line, 2=surface)."
return capi.get_dims(self.ptr)
def normalize(self):
"Converts this Geometry to normal form (or canonical form)."
return capi.geos_normalize(self.ptr)
#### Unary predicates ####
@property
def empty(self):
"""
Returns a boolean indicating whether the set of points in this Geometry
are empty.
"""
return capi.geos_isempty(self.ptr)
@property
def hasz(self):
"Returns whether the geometry has a 3D dimension."
return capi.geos_hasz(self.ptr)
@property
def ring(self):
"Returns whether or not the geometry is a ring."
return capi.geos_isring(self.ptr)
@property
def simple(self):
"Returns false if the Geometry not simple."
return capi.geos_issimple(self.ptr)
@property
def valid(self):
"This property tests the validity of this Geometry."
return capi.geos_isvalid(self.ptr)
@property
def valid_reason(self):
"""
Returns a string containing the reason for any invalidity.
"""
if not GEOS_PREPARE:
raise GEOSException('Upgrade GEOS to 3.1 to get validity reason.')
return capi.geos_isvalidreason(self.ptr).decode()
#### Binary predicates. ####
def contains(self, other):
"Returns true if other.within(this) returns true."
return capi.geos_contains(self.ptr, other.ptr)
def crosses(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T****** (for a point and a curve,a point and an area or a line and
an area) 0******** (for two curves).
"""
return capi.geos_crosses(self.ptr, other.ptr)
def disjoint(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FF*FF****.
"""
return capi.geos_disjoint(self.ptr, other.ptr)
def equals(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**FFF*.
"""
return capi.geos_equals(self.ptr, other.ptr)
def equals_exact(self, other, tolerance=0):
"""
Returns true if the two Geometries are exactly equal, up to a
specified tolerance.
"""
return capi.geos_equalsexact(self.ptr, other.ptr, float(tolerance))
def intersects(self, other):
"Returns true if disjoint returns false."
return capi.geos_intersects(self.ptr, other.ptr)
def overlaps(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves).
"""
return capi.geos_overlaps(self.ptr, other.ptr)
def relate_pattern(self, other, pattern):
"""
Returns true if the elements in the DE-9IM intersection matrix for the
two Geometries match the elements in pattern.
"""
if not isinstance(pattern, six.string_types) or len(pattern) > 9:
raise GEOSException('invalid intersection matrix pattern')
return capi.geos_relatepattern(self.ptr, other.ptr, force_bytes(pattern))
def touches(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FT*******, F**T***** or F***T****.
"""
return capi.geos_touches(self.ptr, other.ptr)
def within(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**F***.
"""
return capi.geos_within(self.ptr, other.ptr)
#### SRID Routines ####
def get_srid(self):
"Gets the SRID for the geometry, returns None if no SRID is set."
s = capi.geos_get_srid(self.ptr)
if s == 0: return None
else: return s
def set_srid(self, srid):
"Sets the SRID for the geometry."
capi.geos_set_srid(self.ptr, srid)
srid = property(get_srid, set_srid)
#### Output Routines ####
@property
def ewkt(self):
"""
Returns the EWKT (WKT + SRID) of the Geometry. Note that Z values
are *not* included in this representation because GEOS does not yet
support serializing them.
"""
if self.get_srid(): return 'SRID=%s;%s' % (self.srid, self.wkt)
else: return self.wkt
@property
def wkt(self):
"Returns the WKT (Well-Known Text) representation of this Geometry."
return wkt_w().write(self).decode()
@property
def hex(self):
"""
Returns the WKB of this Geometry in hexadecimal form. Please note
that the SRID is not included in this representation because it is not
a part of the OGC specification (use the `hexewkb` property instead).
"""
# A possible faster, all-python, implementation:
# str(self.wkb).encode('hex')
return wkb_w(self.hasz and 3 or 2).write_hex(self)
@property
def hexewkb(self):
"""
Returns the EWKB of this Geometry in hexadecimal form. This is an
extension of the WKB specification that includes SRID value that are
a part of this geometry.
"""
if self.hasz and not GEOS_PREPARE:
# See: http://trac.osgeo.org/geos/ticket/216
raise GEOSException('Upgrade GEOS to 3.1 to get valid 3D HEXEWKB.')
return ewkb_w(self.hasz and 3 or 2).write_hex(self)
@property
def json(self):
"""
Returns GeoJSON representation of this Geometry if GDAL is installed.
"""
if gdal.HAS_GDAL:
return self.ogr.json
else:
raise GEOSException('GeoJSON output only supported when GDAL is installed.')
geojson = json
@property
def wkb(self):
"""
Returns the WKB (Well-Known Binary) representation of this Geometry
as a Python buffer. SRID and Z values are not included, use the
`ewkb` property instead.
"""
return wkb_w(self.hasz and 3 or 2).write(self)
@property
def ewkb(self):
"""
Return the EWKB representation of this Geometry as a Python buffer.
This is an extension of the WKB specification that includes any SRID
value that are a part of this geometry.
"""
if self.hasz and not GEOS_PREPARE:
# See: http://trac.osgeo.org/geos/ticket/216
raise GEOSException('Upgrade GEOS to 3.1 to get valid 3D EWKB.')
return ewkb_w(self.hasz and 3 or 2).write(self)
@property
def kml(self):
"Returns the KML representation of this Geometry."
gtype = self.geom_type
return '<%s>%s</%s>' % (gtype, self.coord_seq.kml, gtype)
@property
def prepared(self):
"""
Returns a PreparedGeometry corresponding to this geometry -- it is
optimized for the contains, intersects, and covers operations.
"""
if GEOS_PREPARE:
return PreparedGeometry(self)
else:
raise GEOSException('GEOS 3.1+ required for prepared geometry support.')
#### GDAL-specific output routines ####
@property
def ogr(self):
"Returns the OGR Geometry for this Geometry."
if gdal.HAS_GDAL:
if self.srid:
return gdal.OGRGeometry(self.wkb, self.srid)
else:
return gdal.OGRGeometry(self.wkb)
else:
raise GEOSException('GDAL required to convert to an OGRGeometry.')
@property
def srs(self):
"Returns the OSR SpatialReference for SRID of this Geometry."
if gdal.HAS_GDAL:
if self.srid:
return gdal.SpatialReference(self.srid)
else:
return None
else:
raise GEOSException('GDAL required to return a SpatialReference object.')
@property
def crs(self):
"Alias for `srs` property."
return self.srs
def transform(self, ct, clone=False):
"""
Requires GDAL. Transforms the geometry according to the given
transformation object, which may be an integer SRID, and WKT or
PROJ.4 string. By default, the geometry is transformed in-place and
nothing is returned. However if the `clone` keyword is set, then this
geometry will not be modified and a transformed clone will be returned
instead.
"""
srid = self.srid
if ct == srid:
# short-circuit where source & dest SRIDs match
if clone:
return self.clone()
else:
return
if (srid is None) or (srid < 0):
raise GEOSException("Calling transform() with no SRID set is not supported")
if not gdal.HAS_GDAL:
raise GEOSException("GDAL library is not available to transform() geometry.")
# Creating an OGR Geometry, which is then transformed.
g = self.ogr
g.transform(ct)
# Getting a new GEOS pointer
ptr = wkb_r().read(g.wkb)
if clone:
# User wants a cloned transformed geometry returned.
return GEOSGeometry(ptr, srid=g.srid)
if ptr:
# Reassigning pointer, and performing post-initialization setup
# again due to the reassignment.
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(g.srid)
else:
raise GEOSException('Transformed WKB was invalid.')
#### Topology Routines ####
def _topology(self, gptr):
"Helper routine to return Geometry from the given pointer."
return GEOSGeometry(gptr, srid=self.srid)
@property
def boundary(self):
"Returns the boundary as a newly allocated Geometry object."
return self._topology(capi.geos_boundary(self.ptr))
def buffer(self, width, quadsegs=8):
"""
Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry. The optional third parameter sets
the number of segment used to approximate a quarter circle (defaults to 8).
(Text from PostGIS documentation at ch. 6.1.3)
"""
return self._topology(capi.geos_buffer(self.ptr, width, quadsegs))
@property
def centroid(self):
"""
The centroid is equal to the centroid of the set of component Geometries
of highest dimension (since the lower-dimension geometries contribute zero
"weight" to the centroid).
"""
return self._topology(capi.geos_centroid(self.ptr))
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points
in the Geometry.
"""
return self._topology(capi.geos_convexhull(self.ptr))
def difference(self, other):
"""
Returns a Geometry representing the points making up this Geometry
that do not make up other.
"""
return self._topology(capi.geos_difference(self.ptr, other.ptr))
@property
def envelope(self):
"Return the envelope for this geometry (a polygon)."
return self._topology(capi.geos_envelope(self.ptr))
def interpolate(self, distance):
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('interpolate only works on LineString and MultiLineString geometries')
if not hasattr(capi, 'geos_interpolate'):
raise NotImplementedError('interpolate requires GEOS 3.2+')
return self._topology(capi.geos_interpolate(self.ptr, distance))
def interpolate_normalized(self, distance):
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('interpolate only works on LineString and MultiLineString geometries')
if not hasattr(capi, 'geos_interpolate_normalized'):
raise NotImplementedError('interpolate_normalized requires GEOS 3.2+')
return self._topology(capi.geos_interpolate_normalized(self.ptr, distance))
def intersection(self, other):
"Returns a Geometry representing the points shared by this Geometry and other."
return self._topology(capi.geos_intersection(self.ptr, other.ptr))
@property
def point_on_surface(self):
"Computes an interior point of this Geometry."
return self._topology(capi.geos_pointonsurface(self.ptr))
def project(self, point):
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('locate_point only works on LineString and MultiLineString geometries')
if not hasattr(capi, 'geos_project'):
raise NotImplementedError('geos_project requires GEOS 3.2+')
return capi.geos_project(self.ptr, point.ptr)
def project_normalized(self, point):
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('locate_point only works on LineString and MultiLineString geometries')
if not hasattr(capi, 'geos_project_normalized'):
raise NotImplementedError('project_normalized requires GEOS 3.2+')
return capi.geos_project_normalized(self.ptr, point.ptr)
def relate(self, other):
"Returns the DE-9IM intersection matrix for this Geometry and the other."
return capi.geos_relate(self.ptr, other.ptr).decode()
def simplify(self, tolerance=0.0, preserve_topology=False):
"""
Returns the Geometry, simplified using the Douglas-Peucker algorithm
to the specified tolerance (higher tolerance => less points). If no
tolerance provided, defaults to 0.
By default, this function does not preserve topology - e.g. polygons can
be split, collapse to lines or disappear holes can be created or
disappear, and lines can cross. By specifying preserve_topology=True,
the result will have the same dimension and number of components as the
input. This is significantly slower.
"""
if preserve_topology:
return self._topology(capi.geos_preservesimplify(self.ptr, tolerance))
else:
return self._topology(capi.geos_simplify(self.ptr, tolerance))
def sym_difference(self, other):
"""
Returns a set combining the points in this Geometry not in other,
and the points in other not in this Geometry.
"""
return self._topology(capi.geos_symdifference(self.ptr, other.ptr))
def union(self, other):
"Returns a Geometry representing all the points in this Geometry and other."
return self._topology(capi.geos_union(self.ptr, other.ptr))
#### Other Routines ####
@property
def area(self):
"Returns the area of the Geometry."
return capi.geos_area(self.ptr, byref(c_double()))
def distance(self, other):
"""
Returns the distance between the closest points on this Geometry
and the other. Units will be in those of the coordinate system of
the Geometry.
"""
if not isinstance(other, GEOSGeometry):
raise TypeError('distance() works only on other GEOS Geometries.')
return capi.geos_distance(self.ptr, other.ptr, byref(c_double()))
@property
def extent(self):
"""
Returns the extent of this geometry as a 4-tuple, consisting of
(xmin, ymin, xmax, ymax).
"""
env = self.envelope
if isinstance(env, Point):
xmin, ymin = env.tuple
xmax, ymax = xmin, ymin
else:
xmin, ymin = env[0][0]
xmax, ymax = env[0][2]
return (xmin, ymin, xmax, ymax)
@property
def length(self):
"""
Returns the length of this Geometry (e.g., 0 for point, or the
circumfrence of a Polygon).
"""
return capi.geos_length(self.ptr, byref(c_double()))
def clone(self):
"Clones this Geometry."
return GEOSGeometry(capi.geom_clone(self.ptr), srid=self.srid)
# Class mapping dictionary. Has to be at the end to avoid import
# conflicts with GEOSGeometry.
from django.contrib.gis.geos.linestring import LineString, LinearRing
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.contrib.gis.geos.collections import GeometryCollection, MultiPoint, MultiLineString, MultiPolygon
GEOS_CLASSES = {0 : Point,
1 : LineString,
2 : LinearRing,
3 : Polygon,
4 : MultiPoint,
5 : MultiLineString,
6 : MultiPolygon,
7 : GeometryCollection,
}
# If supported, import the PreparedGeometry class.
if GEOS_PREPARE:
from django.contrib.gis.geos.prepared import PreparedGeometry
|
quokkaproject/quokka-cart | refs/heads/master | models.py | 1 | # coding: utf-8
import datetime
import logging
import sys
from werkzeug.utils import import_string
from flask import session, current_app
from quokka.utils.translation import _l
from quokka.utils import get_current_user, lazy_str_setting
from quokka.core.templates import render_template
from quokka.core.db import db
from quokka.core.models.signature import (
Publishable, Ordered, Dated
)
from quokka.core.models.content import Content
from quokka.modules.media.models import Image
if sys.version_info.major == 3:
from functools import reduce
logger = logging.getLogger()
class BaseProductReference(object):
def get_title(self):
return getattr(self, 'title', None)
def get_description(self):
return getattr(self, 'description', None)
def get_unity_value(self):
return getattr(self, 'unity_value', None)
def get_weight(self):
return getattr(self, 'weight', None)
def get_dimensions(self):
return getattr(self, 'dimensions', None)
def get_summary(self):
summary = getattr(self, 'summary', None)
if not summary:
try:
return self.get_description()[:255]
except:
pass
return summary
def get_extra_value(self):
return getattr(self, 'extra_value', None)
def get_uid(self):
return str(self.id)
def set_status(self, *args, **kwargs):
pass
def remove_item(self, *args, **kwargs):
pass
class BaseProduct(BaseProductReference, Content):
description = db.StringField(required=True)
unity_value = db.FloatField()
weight = db.FloatField()
dimensions = db.StringField()
extra_value = db.FloatField()
meta = {
'allow_inheritance': True
}
class Item(Ordered, Dated, db.EmbeddedDocument):
product = db.ReferenceField(Content)
reference = db.GenericReferenceField() # customized product
"""
Must implement all the BaseProduct methods/ its optional
if None, "product" will be considered
"""
uid = db.StringField()
title = db.StringField(required=True, max_length=255)
description = db.StringField(required=True)
link = db.StringField()
quantity = db.FloatField(default=1)
unity_value = db.FloatField(required=True)
total_value = db.FloatField()
weight = db.FloatField()
dimensions = db.StringField()
extra_value = db.FloatField()
allowed_to_set = db.ListField(db.StringField(), default=['quantity'])
pipeline = db.ListField(db.StringField(), default=[])
def set_status(self, status, *args, **kwargs):
kwargs['item'] = self
if self.reference and hasattr(self.reference, 'set_status'):
self.reference.set_status(status, *args, **kwargs)
if self.product and hasattr(self.product, 'set_status'):
self.product.set_status(status, *args, **kwargs)
def get_main_image_url(self, thumb=False, default=None):
try:
return self.product.get_main_image_url(thumb, default)
except:
return None
@classmethod
def normalize(cls, kwargs):
new = {}
for k, v in kwargs.items():
field = cls._fields.get(k)
if not field:
continue
new[k] = field.to_python(v)
return new
def __unicode__(self):
return u"{i.title} - {i.total_value}".format(i=self)
def get_uid(self):
try:
return self.product.get_uid()
except:
return self.uid
@property
def unity_plus_extra(self):
return float(self.unity_value or 0) + float(self.extra_value or 0)
@property
def total(self):
self.clean()
self.total_value = self.unity_plus_extra * float(self.quantity or 1)
return self.total_value
def clean(self):
mapping = [
('title', 'get_title'),
('description', 'get_description'),
('link', 'get_absolute_url'),
('unity_value', 'get_unity_value'),
('weight', 'get_weight'),
('dimensions', 'get_dimensions'),
('extra_value', 'get_extra_value'),
('uid', 'get_uid'),
]
references = [self.reference, self.product]
for ref in references:
if not ref:
continue
for attr, method in mapping:
current = getattr(self, attr, None)
if current is not None:
continue
setattr(self, attr, getattr(ref, method, lambda: None)())
class Payment(db.EmbeddedDocument):
uid = db.StringField()
payment_system = db.StringField()
method = db.StringField()
value = db.FloatField()
extra_value = db.FloatField()
date = db.DateTimeField()
confirmed_at = db.DateTimeField()
status = db.StringField()
class Processor(Publishable, db.DynamicDocument):
identifier = db.StringField(max_length=100, unique=True)
module = db.StringField(max_length=255)
requires = db.ListField(db.StringField(max_length=255))
description = db.StringField()
title = db.StringField()
image = db.ReferenceField(Image, reverse_delete_rule=db.NULLIFY)
link = db.StringField(max_length=255)
config = db.DictField(default=lambda: {})
pipeline = db.ListField(db.StringField(max_length=255), default=[])
def import_processor(self):
return import_string(self.module)
def get_instance(self, *args, **kwargs):
if 'config' not in kwargs:
kwargs['config'] = self.config
kwargs['_record'] = self
return self.import_processor()(*args, **kwargs)
def clean(self, *args, **kwargs):
for item in (self.requires or []):
import_string(item)
super(Processor, self).clean(*args, **kwargs)
def __unicode__(self):
return self.identifier
@classmethod
def get_instance_by_identifier(cls, identifier, cart=None):
processor = cls.objects.get(identifier=identifier)
return processor.get_instance(cart=cart)
@classmethod
def get_default_processor(cls):
default = lazy_str_setting(
'CART_DEFAULT_PROCESSOR',
default={
'module': 'quokka.modules.cart.processors.Dummy',
'identifier': 'dummy',
'published': True,
'title': "Test"
}
)
try:
return cls.objects.get(identifier=default['identifier'])
except:
return cls.objects.create(**default)
def save(self, *args, **kwargs):
self.import_processor()
super(Processor, self).save(*args, **kwargs)
class Cart(Publishable, db.DynamicDocument):
STATUS = (
("pending", _l("Pending")), # not checked out
("checked_out", _l("Checked out")), # not confirmed (payment)
("analysing", _l("Analysing")), # Analysing payment
("confirmed", _l("Confirmed")), # Payment confirmed
("completed", _l("Completed")), # Payment completed (money released)
("refunding", _l("Refunding")), # Buyer asks refund
("refunded", _l("Refunded")), # Money refunded to buyer
("cancelled", _l("Cancelled")), # Cancelled without processing
("abandoned", _l("Abandoned")), # Long time no update
)
reference = db.GenericReferenceField()
"""reference must implement set_status(**kwargs) method
arguments: status(str), value(float), date, uid(str), msg(str)
and extra(dict).
Also reference must implement get_uid() which will return
the unique identifier for this transaction"""
belongs_to = db.ReferenceField('User',
# default=get_current_user,
reverse_delete_rule=db.NULLIFY)
items = db.ListField(db.EmbeddedDocumentField(Item))
payment = db.ListField(db.EmbeddedDocumentField(Payment))
status = db.StringField(choices=STATUS, default='pending')
total = db.FloatField(default=0)
extra_costs = db.DictField(default=lambda: {})
sender_data = db.DictField(default=lambda: {})
shipping_data = db.DictField(default=lambda: {})
shipping_cost = db.FloatField(default=0)
tax = db.FloatField(default=0)
processor = db.ReferenceField(Processor,
default=Processor.get_default_processor,
reverse_delete_rule=db.NULLIFY)
reference_code = db.StringField() # Reference code for filtering
checkout_code = db.StringField() # The UID for transaction checkout
transaction_code = db.StringField() # The UID for transaction
requires_login = db.BooleanField(default=True)
continue_shopping_url = db.StringField(
default=lambda: current_app.config.get(
'CART_CONTINUE_SHOPPING_URL', '/'
)
)
pipeline = db.ListField(db.StringField(), default=[])
log = db.ListField(db.StringField(), default=[])
config = db.DictField(default=lambda: {})
search_helper = db.StringField()
meta = {
'ordering': ['-created_at']
}
def send_response(self, response, identifier):
if self.reference and hasattr(self.reference, 'get_response'):
self.reference.get_response(response, identifier)
for item in self.items:
if hasattr(item, 'get_response'):
item.get_response(response, identifier)
def set_tax(self, tax, save=False):
"""
set tax and send to references
"""
try:
tax = float(tax)
self.tax = tax
self.set_reference_tax(tax)
except Exception as e:
self.addlog("impossible to set tax: %s" % str(e))
def set_status(self, status, save=False):
"""
THis method will be called by the processor
which will pass a valid status as in STATUS
so, this method will dispatch the STATUS to
all the items and also the 'reference' if set
"""
if self.status != status:
self.status = status
self.set_reference_statuses(status)
if save:
self.save()
def set_reference_statuses(self, status):
if self.reference and hasattr(self.reference, 'set_status'):
self.reference.set_status(status, cart=self)
for item in self.items:
item.set_status(status, cart=self)
def set_reference_tax(self, tax):
if self.reference and hasattr(self.reference, 'set_tax'):
self.reference.set_tax(tax)
for item in self.items:
if hasattr(item, 'set_tax'):
item.set_tax(tax)
def addlog(self, msg, save=True):
try:
self.log.append(u"{0},{1}".format(datetime.datetime.now(), msg))
logger.debug(msg)
save and self.save()
except UnicodeDecodeError as e:
logger.info(msg)
logger.error(str(e))
@property
def uid(self):
return self.get_uid()
def get_uid(self):
try:
return self.reference.get_uid() or str(self.id)
except Exception:
self.addlog("Using self.id as reference", save=False)
return str(self.id)
def __unicode__(self):
return u"{o.uid} - {o.processor.identifier}".format(o=self)
def get_extra_costs(self):
if self.extra_costs:
return sum(self.extra_costs.values())
@classmethod
def get_cart(cls, no_dereference=False, save=True):
"""
get or create a new cart related to the session
if there is a current logged in user it will be set
else it will be set during the checkout.
"""
session.permanent = current_app.config.get(
"CART_PERMANENT_SESSION", True)
try:
cart = cls.objects(id=session.get('cart_id'), status='pending')
if not cart:
raise cls.DoesNotExist('A pending cart not found')
if no_dereference:
cart = cart.no_dereference()
cart = cart.first()
save and cart.save()
except (cls.DoesNotExist, db.ValidationError):
cart = cls(status="pending")
cart.save()
session['cart_id'] = str(cart.id)
session.pop('cart_pipeline_index', None)
session.pop('cart_pipeline_args', None)
return cart
def assign(self):
self.belongs_to = self.belongs_to or get_current_user()
def save(self, *args, **kwargs):
self.total = sum([item.total for item in self.items])
self.assign()
self.reference_code = self.get_uid()
self.search_helper = self.get_search_helper()
if not self.id:
self.published = True
super(Cart, self).save(*args, **kwargs)
self.set_reference_statuses(self.status)
def get_search_helper(self):
if not self.belongs_to:
return ""
user = self.belongs_to
return " ".join([
user.name or "",
user.email or ""
])
def get_item(self, uid):
# MongoEngine/mongoengine#503
return self.items.get(uid=uid)
def set_item(self, **kwargs):
if 'product' in kwargs:
if not isinstance(kwargs['product'], Content):
try:
kwargs['product'] = Content.objects.get(
id=kwargs['product'])
except Content.DoesNotExist:
kwargs['product'] = None
uid = kwargs.get(
'uid',
kwargs['product'].get_uid() if kwargs.get('product') else None
)
if not uid:
self.addlog("Cannot add item without an uid %s" % kwargs)
return
item = self.get_item(uid)
kwargs = Item.normalize(kwargs)
if not item:
# items should only be added if there is a product (for safety)
if not kwargs.get('product'):
self.addlog("there is no product to add item")
return
allowed = ['product', 'quantity']
item = self.items.create(
**{k: v for k, v in kwargs.items() if k in allowed}
)
self.addlog("New item created %s" % item, save=False)
else:
# update only allowed attributes
item = self.items.update(
{k: v for k, v in kwargs.items() if k in item.allowed_to_set},
uid=item.uid
)
self.addlog("Item updated %s" % item, save=False)
if int(kwargs.get('quantity', "1")) == 0:
self.addlog("quantity is 0, removed %s" % kwargs, save=False)
self.remove_item(**kwargs)
self.save()
self.reload()
return item
def remove_item(self, **kwargs):
deleted = self.items.delete(**kwargs)
if self.reference and hasattr(self.reference, 'remove_item'):
self.reference.remove_item(**kwargs)
return deleted
def checkout(self, processor=None, *args, **kwargs):
self.set_processor(processor)
processor_instance = self.processor.get_instance(self, *args, **kwargs)
if processor_instance.validate():
response = processor_instance.process()
self.status = 'checked_out'
self.save()
session.pop('cart_id', None)
return response
else:
self.addlog("Cart did not validate")
raise Exception("Cart did not validate") # todo: specialize this
def get_items_pipeline(self):
if not self.items:
return []
return reduce(
lambda x, y: x + y, [item.pipeline for item in self.items]
)
def build_pipeline(self):
items = ['quokka.modules.cart.pipelines:StartPipeline']
items.extend(current_app.config.get('CART_PIPELINE', []))
items.extend(self.get_items_pipeline())
items.extend(self.pipeline)
items.extend(self.processor and self.processor.pipeline or [])
return items
def process_pipeline(self):
if not self.items:
return render_template('cart/empty_cart.html',
url=self.continue_shopping_url)
pipelines = self.build_pipeline()
index = session.get('cart_pipeline_index', 0)
pipeline = import_string(pipelines[index])
return pipeline(self, pipelines, index)._preprocess()
def set_processor(self, processor=None):
if not self.processor:
self.processor = Processor.get_default_processor()
self.save()
if not processor:
return
if isinstance(processor, Processor):
self.processor = processor
self.save()
return
try:
self.processor = Processor.objects.get(id=processor)
except:
self.processor = Processor.objects.get(identifier=processor)
self.save()
def get_available_processors(self):
return Processor.objects(published=True)
|
EngExp/Testovii1 | refs/heads/master | mint.py | 2 | # -*- coding: utf-8 -*-
'''
mint - small, fast and simple template engine.
'''
import os
import re
import ast
import mmap
import time
import fnmatch
import logging
import weakref
import itertools
import htmlentitydefs
from ast import Load, Store, Param
from StringIO import StringIO
from functools import partial
from collections import deque
from xml.etree.ElementTree import TreeBuilder as _TreeBuilder, Element
############# LEXER
class BaseToken(object):
pass
class TokenWrapper(BaseToken):
'''
Objects of this class reprezents tokens
'''
def __init__(self, token, value=None, regex_str=None):
assert value or regex_str, 'Provide token text value or regex'
self.token = intern(token)
if regex_str is not None:
self.regex = re.compile(regex_str, re.U)
else:
self.regex = re.compile(r'%s' % re.escape(value), re.U)
def __str__(self):
return self.token
__repr__ = __str__
class TextToken(BaseToken):
'Special token for text'
def __str__(self):
return 'text'
__repr__ = __str__
class TokenIndent(BaseToken):
def __str__(self):
return 'indent'
__repr__ = __str__
class TokenUnindent(BaseToken):
def __str__(self):
return 'unindent'
__repr__ = __str__
class EOF(BaseToken):
'Special token'
def __str__(self):
return 'eof'
__repr__ = __str__
# constants
TAG_CHAR = '@'
STMT_CHAR = '#'
COMMENT_CHAR = '--'
# Tokens
TOKEN_TAG_START = TokenWrapper('tag_start', value=TAG_CHAR)
TOKEN_TAG_ATTR_SET = TokenWrapper('tag_attr_set', value='%s.' % TAG_CHAR)
TOKEN_TAG_ATTR_APPEND = TokenWrapper('tag_attr_append', value='%s+' % TAG_CHAR)
TOKEN_BASE_TEMPLATE = TokenWrapper('base_template', value='%sbase: ' % STMT_CHAR)
TOKEN_STATEMENT_IF = TokenWrapper('statement_if', value='%sif ' % STMT_CHAR)
TOKEN_STATEMENT_ELIF = TokenWrapper('statement_elif', regex_str=r'(%selif |%selse if )' % (
re.escape(STMT_CHAR), re.escape(STMT_CHAR)))
TOKEN_STATEMENT_ELSE = TokenWrapper('statement_else', value='%selse:' % STMT_CHAR)
TOKEN_STATEMENT_FOR = TokenWrapper('statement_for', value='%sfor ' % STMT_CHAR)
TOKEN_SLOT_DEF = TokenWrapper('slot_def', regex_str=r'(%sdef |%sfunction )' % (re.escape(STMT_CHAR),
re.escape(STMT_CHAR)))
TOKEN_STMT_CHAR = TokenWrapper('hash', value=STMT_CHAR)
TOKEN_COMMENT = TokenWrapper('comment', value=COMMENT_CHAR)
TOKEN_BACKSLASH = TokenWrapper('backslash', value='\\')
TOKEN_DOT = TokenWrapper('dot', value='.')
TOKEN_PLUS = TokenWrapper('plus', value='+')
TOKEN_MINUS = TokenWrapper('minus', value='-')
TOKEN_COLON = TokenWrapper('colon', value=':')
TOKEN_PARENTHESES_OPEN = TokenWrapper('parentheses_open', value='(')
TOKEN_PARENTHESES_CLOSE = TokenWrapper('parentheses_close', value=')')
TOKEN_EXPRESSION_START = TokenWrapper('expression_start', value='{{')
TOKEN_EXPRESSION_END = TokenWrapper('expression_end', value='}}')
TOKEN_WHITESPACE = TokenWrapper('whitespace', regex_str=r'\s+')
TOKEN_NEWLINE = TokenWrapper('newline', regex_str=r'(\r\n|\r|\n)')
TOKEN_EOF = EOF()
TOKEN_TEXT = TextToken()
TOKEN_INDENT = TokenIndent()
TOKEN_UNINDENT = TokenUnindent()
tokens = (
TOKEN_TAG_ATTR_SET,
TOKEN_TAG_ATTR_APPEND,
TOKEN_TAG_START,
TOKEN_BASE_TEMPLATE,
TOKEN_STATEMENT_IF,
TOKEN_STATEMENT_ELIF,
TOKEN_STATEMENT_ELSE,
TOKEN_STATEMENT_FOR,
TOKEN_SLOT_DEF,
TOKEN_STMT_CHAR,
TOKEN_COMMENT,
TOKEN_BACKSLASH,
TOKEN_DOT,
TOKEN_PLUS,
TOKEN_MINUS,
TOKEN_PARENTHESES_OPEN,
TOKEN_PARENTHESES_CLOSE,
TOKEN_EXPRESSION_START,
TOKEN_EXPRESSION_END,
TOKEN_COLON,
TOKEN_WHITESPACE,
TOKEN_NEWLINE,
)
all_tokens = list(tokens) + [TOKEN_EOF, TOKEN_TEXT, TOKEN_INDENT, TOKEN_UNINDENT]
all_except = lambda *t: filter(lambda x: x not in t, all_tokens)
re_comment = re.compile(r'\s*//')
def base_tokenizer(fp):
'Tokenizer. Generates tokens stream from text'
if isinstance(fp, StringIO):
template_file = fp
size = template_file.len
else:
#empty file check
if os.fstat(fp.fileno()).st_size == 0:
yield TOKEN_EOF, 'EOF', 0, 0
return
template_file = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ)
size = template_file.size()
lineno = 0
while 1:
lineno += 1
pos = 1
# end of file
if template_file.tell() == size:
yield TOKEN_EOF, 'EOF', lineno, 0
break
# now we tokinize line by line
line = template_file.readline().decode('utf-8')
line = line.replace('\r\n', '')
line = line.replace('\n', '')
# ignoring non XML comments
if re_comment.match(line):
continue
last_text = deque()
while line:
line_len = len(line)
for token in tokens:
m = token.regex.match(line)
if m:
if last_text:
yield TOKEN_TEXT, ''.join(last_text), lineno, pos
pos += len(last_text)
last_text.clear()
offset, value = m.end(), m.group()
line = line[offset:]
yield token, value, lineno, pos
pos += offset
break
# we did not get right in tokens list, so next char is text
if line_len == len(line):
last_text.append(line[0])
line = line[1:]
if last_text:
yield TOKEN_TEXT, ''.join(last_text), lineno, pos
pos += len(last_text)
last_text.clear()
yield TOKEN_NEWLINE, '\n', lineno, pos
# all work is done
template_file.close()
def indent_tokenizer(tokens_stream):
current_indent = 0
indent = 0
for tok in tokens_stream:
token, value, lineno, pos = tok
# backslashed line transfer
if token is TOKEN_BACKSLASH:
next_tok = tokens_stream.next()
next_token, next_value, next_lineno, next_pos = next_tok
if next_token is TOKEN_NEWLINE:
next_tok = tokens_stream.next()
while next_tok[0] in (TOKEN_WHITESPACE, TOKEN_NEWLINE):
next_tok = tokens_stream.next()
# first not newline or whitespace token
yield next_tok
continue
yield tok
tok = next_tok
token, value, lineno, pos = next_tok
# indenting and unindenting
if token is TOKEN_NEWLINE or (token is TOKEN_WHITESPACE and (lineno, pos) == (1, 1)):
if token is TOKEN_NEWLINE:
yield tok
next_tok = tokens_stream.next()
while next_tok[0] is TOKEN_NEWLINE:
next_tok = tokens_stream.next()
else:
next_tok = tok
next_token, next_value, next_lineno, next_pos = next_tok
if next_token is TOKEN_WHITESPACE:
ws_count = len(next_value)
if indent == 0:
indent = ws_count
if ws_count >= indent:
times = ws_count/indent
rest = ws_count % indent
range_ = times - current_indent
if range_ > 0:
# indenting
tmp_curr_indent = current_indent
for i in range(range_):
yield TOKEN_INDENT, ' '*indent, next_lineno, (i+tmp_curr_indent)*indent+1
current_indent += 1
elif range_ < 0:
# unindenting
for i in range(abs(range_)):
yield TOKEN_UNINDENT, ' '*indent, next_lineno, next_pos
current_indent -= 1
if rest:
yield TOKEN_WHITESPACE, ' '*rest, next_lineno, times*indent+1
continue
# next token is the whitespace lighter than indent or any other
# token, so unindenting to zero level
for i in range(current_indent):
yield TOKEN_UNINDENT, ' '*indent, lineno, pos
current_indent = 0
yield next_tok
# we do not yielding newline tokens
continue
yield tok
def tokenizer(fileobj):
return indent_tokenizer(base_tokenizer(fileobj))
############# LEXER END
UNSAFE_CHARS = '&<>"'
CHARS_ENTITIES = dict([(v, '&%s;' % k) for k, v in htmlentitydefs.entitydefs.items()])
UNSAFE_CHARS_ENTITIES = [(k, CHARS_ENTITIES[k]) for k in UNSAFE_CHARS]
UNSAFE_CHARS_ENTITIES_IN_ATTR = [(k, CHARS_ENTITIES[k]) for k in '<>"']
UNSAFE_CHARS_ENTITIES.append(("'",'''))
UNSAFE_CHARS_ENTITIES_IN_ATTR.append(("'",'''))
UNSAFE_CHARS_ENTITIES_REVERSED = [(v,k) for k,v in UNSAFE_CHARS_ENTITIES]
def escape(obj, ctx='tag'):
if hasattr(obj, '__html__'):
safe_markup = obj.__html__()
if ctx == 'tag':
return safe_markup
else:
for k, v in UNSAFE_CHARS_ENTITIES_IN_ATTR:
safe_markup = safe_markup.replace(k, v)
return safe_markup
obj = unicode(obj)
for k, v in UNSAFE_CHARS_ENTITIES:
obj = obj.replace(k, v)
return obj
def unescape(obj):
text = unicode(obj)
for k, v in UNSAFE_CHARS_ENTITIES_REVERSED:
text = text.replace(k, v)
return text
class TemplateError(Exception): pass
class WrongToken(Exception): pass
# variables names (we do not want to override user variables and vise versa)
TREE_BUILDER = '__MINT_TREE_BUILDER__'
TREE_FACTORY = '__MINT_TREE_FACTORY__'
MAIN_FUNCTION = '__MINT_MAIN__'
TAG_START = '__MINT_TAG_START__'
TAG_END = '__MINT_TAG_END__'
DATA = '__MINT_DATA__'
ESCAPE_HELLPER = '__MINT_ESCAPE__'
CURRENT_NODE = '__MINT_CURRENT_NODE__'
##### MINT NODES
class Node(ast.AST):
def __repr__(self):
return '%s' % self.__class__.__name__
class MintTemplate(Node):
def __init__(self, body=None):
self.body = body or []
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.body==other.body
return False
def __repr__(self):
return '%s(body=%r)' % (self.__class__.__name__, self.body)
class BaseTemplate(Node):
def __init__(self, name):
self.name = name
def to_ast(self):
return self
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.name == other.name
return False
class TextNode(Node):
def __init__(self, text, lineno=None, col_offset=None):
self.text = text
self.lineno = lineno
self.col_offset = col_offset
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.text==other.text and self.lineno==other.lineno \
and self.col_offset==other.col_offset
return False
def __repr__(self):
return '%s(%r, lineno=%d, col_offset=%d)' % (self.__class__.__name__, self.text,
self.lineno, self.col_offset)
class ExpressionNode(Node):
def __init__(self, text, lineno=None, col_offset=None):
self.text = text.strip()
self.lineno = lineno
self.col_offset = col_offset
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.text==other.text and self.lineno==other.lineno \
and self.col_offset==other.col_offset
return False
def __repr__(self):
return '%s(%r, lineno=%d, col_offset=%d)' % (self.__class__.__name__, self.text,
self.lineno, self.col_offset)
class TagAttrNode(Node):
def __init__(self, name, value=None, lineno=None, col_offset=None):
self.name = escape(name, ctx='attr')
self.value = value or []
self.lineno = lineno
self.col_offset = col_offset
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.name==other.name and self.value==other.value and self.lineno==other.lineno \
and self.col_offset==other.col_offset
return False
def __repr__(self):
return '%s(%r, value=%r, lineno=%d, col_offset=%d)' % (self.__class__.__name__, self.name,
self.value, self.lineno, self.col_offset)
class SetAttrNode(Node):
def __init__(self, attr_node):
self.attr = attr_node
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.attr==other.attr
return False
class AppendAttrNode(Node):
def __init__(self, attr_node):
self.attr = attr_node
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.attr==other.attr
return False
class TagNode(Node):
def __init__(self, name, attrs=None, body=None, lineno=None, col_offset=None):
self.name = name
self.attrs = attrs or []
self.body = body or []
self.lineno = lineno
self.col_offset = col_offset
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.name==other.name and self.body==other.body and self.attrs==other.attrs\
and self.lineno==other.lineno and self.col_offset==other.col_offset
return False
def __repr__(self):
return '%s(%r, attrs=%r, body=%r, lineno=%d, col_offset=%d)' % (self.__class__.__name__, self.name,
self.attrs, self.body, self.lineno, self.col_offset)
class ForStmtNode(Node):
def __init__(self, text, body=None, lineno=None, col_offset=None):
self.text = text.strip()
self.body = body or []
self.lineno = lineno
self.col_offset = col_offset
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.text==other.text and self.body==other.body and self.lineno==other.lineno \
and self.col_offset==other.col_offset
return False
def __repr__(self):
return '%s(%r, body=%r, lineno=%d, col_offset=%d)' % (self.__class__.__name__, self.text,
self.body, self.lineno, self.col_offset)
class IfStmtNode(Node):
def __init__(self, text, body=None, orelse=None, lineno=None, col_offset=None):
self.text = text
self.body = body or []
self.orelse = orelse or []
self.lineno = lineno
self.col_offset = col_offset
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.text==other.text and self.body==other.body and self.orelse==other.orelse\
and self.lineno==other.lineno and self.col_offset==other.col_offset
return False
def __repr__(self):
return '%s(%r, body=%r, orelse=%r, lineno=%d, col_offset=%d)' % (self.__class__.__name__,
self.text, self.body,
self.orelse, self.lineno, self.col_offset)
class ElseStmtNode(Node):
def __init__(self, body=None, lineno=None, col_offset=None):
self.body = body or []
self.lineno = lineno
self.col_offset = col_offset
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.body==other.body and self.lineno==other.lineno \
and self.col_offset==other.col_offset
return False
def __repr__(self):
return '%s(body=%r, lineno=%d, col_offset=%d)' % (self.__class__.__name__, self.body,
self.lineno, self.col_offset)
class SlotDefNode(Node):
def __init__(self, text, body=None, lineno=None, col_offset=None):
self.text = text.strip()
self.body = body or []
self.lineno = lineno
self.col_offset = col_offset
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.text==other.text and self.body==other.body and self.lineno==other.lineno \
and self.col_offset==other.col_offset
return False
def __repr__(self):
return '%s(%r, body=%r, lineno=%d, col_offset=%d)' % (self.__class__.__name__, self.text,
self.body, self.lineno, self.col_offset)
class SlotCallNode(Node):
def __init__(self, text, lineno=None, col_offset=None):
self.text = text.strip()
self.lineno = lineno
self.col_offset = col_offset
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.text==other.text and self.lineno==other.lineno \
and self.col_offset==other.col_offset
return False
def __repr__(self):
return '%s(%r, lineno=%d, col_offset=%d)' % (self.__class__.__name__, self.text,
self.lineno, self.col_offset)
##### NODES END
##### PARSER
class RecursiveStack(object):
'Stack of stacks'
def __init__(self):
self.stacks = [[]]
@property
def stack(self):
return self.stacks[-1]
@property
def current(self):
return self.stack and self.stack[-1] or None
def push(self, item):
self.stack.append(item)
return True
def pop(self):
return self.stack.pop()
return True
def push_stack(self, new_stack):
self.stacks.append(new_stack)
def pop_stack(self):
return self.stacks.pop()
def __nonzero__(self):
return len(self.stacks)
def __repr__(self):
return repr(self.stacks)
def __iter__(self):
return reversed(self.stack[:])
class Parser(object):
def __init__(self, states):
self.states = dict(states)
def parse(self, tokens_stream, stack):
current_state = 'start'
variantes = self.states[current_state]
for tok in tokens_stream:
token, tok_value, lineno, pos = tok
# accept new token
new_state = None
for item in variantes:
variante, state, callback = item
# tokens sequence
if isinstance(variante, basestring):
variante = globals().get(variante)
if isinstance(variante, (list, tuple)):
if token in variante:
new_state = state
break
elif variante is token:
new_state = state
break
elif isinstance(variante, Parser):
variante.parse(itertools.chain([tok], tokens_stream), stack)
new_state = state
#NOTE: tok still points to first token
if new_state is None:
raise WrongToken('[%s] Unexpected token "%s(%r)" at line %d, pos %d' \
% (current_state, token, tok_value, lineno, pos))
# process of new_state
elif new_state != current_state:
if new_state == 'end':
#print current_state, '%s(%r)' % (token, tok_value), new_state
callback(tok, stack)
#_print_stack(stack)
break
current_state = new_state
variantes = self.states[current_state]
# state callback
#print current_state, '%s(%r)' % (token, tok_value), new_state
callback(tok, stack)
#_print_stack(stack)
def _print_stack(s):
print '[stack]'
for i in s:
print ' '*4, i
print '[end of stack]\n'
# utils functions
def get_tokens(s):
my_tokens = []
while s.current and isinstance(s.current, (list, tuple)):
my_tokens.append(s.pop())
my_tokens.reverse()
return my_tokens
#NOTE: Callbacks are functions that takes token and stack
skip = lambda t, s: None
push = lambda t, s: s.push(t)
pop_stack = lambda t, s: s.pop_stack()
def push_stack(t, s):
if isinstance(s.current, ElseStmtNode):
stmt = s.pop()
s.push_stack(stmt.body)
elif isinstance(s.current, IfStmtNode) and s.current.orelse:
s.push_stack(s.current.orelse[-1].body)
else:
if not hasattr(s.current, 'body'):
raise SyntaxError('Unexpected indent at line %d' % t[2])
s.push_stack(s.current.body)
# text data and inline python expressions
def py_expr(t, s):
my_tokens = get_tokens(s)
lineno, col_offset = my_tokens[0][2], my_tokens[0][3] - 2
s.push(ExpressionNode(u''.join([t[1] for t in my_tokens]),
lineno=lineno, col_offset=col_offset))
def text_value(t, s):
my_tokens = get_tokens(s)
if my_tokens:
lineno, col_offset = my_tokens[0][2], my_tokens[0][3]
s.push(TextNode(u''.join([t[1] for t in my_tokens]),
lineno=lineno, col_offset=col_offset))
def text_value_with_last(t, s):
s.push(t)
text_value(t, s)
# parser of attribute value
attr_data_parser = Parser((
# state name
('start', (
# variantes (token, new_state, callback)
# ((token, token,...), new_state, callback)
# (other_parser, new_state, callback)
# ('other_parser', new_state, callback)
(TOKEN_EXPRESSION_START, 'expr', text_value),
(TOKEN_PARENTHESES_CLOSE, 'end', text_value),
(all_except(TOKEN_NEWLINE), 'start', push),
)),
('expr', (
(TOKEN_EXPRESSION_END, 'start', py_expr),
(all_tokens, 'expr', push),
)),
))
# parser of text data and inline python expressions
data_parser = Parser((
('start', (
(TOKEN_EXPRESSION_START, 'expr', text_value),
(TOKEN_NEWLINE, 'end', text_value_with_last),
(all_except(TOKEN_INDENT), 'start', push),
)),
('expr', (
(TOKEN_EXPRESSION_END, 'start', py_expr),
(all_tokens, 'expr', push),
)),
))
# tag and tag attributes callbacks
def tag_name(t, s):
#if isinstance(s.current, (list, tuple)):
my_tokens = get_tokens(s)
if my_tokens:
lineno, col_offset = my_tokens[0][2], my_tokens[0][3] - 1
s.push(TagNode(u''.join([t[1] for t in my_tokens]),
lineno=lineno, col_offset=col_offset))
def tag_attr_name(t, s):
my_tokens = get_tokens(s)
lineno, col_offset = my_tokens[0][2], my_tokens[0][3]
s.push(TagAttrNode(u''.join([t[1] for t in my_tokens]),
lineno=lineno, col_offset=col_offset))
def tag_attr_value(t, s):
nodes = []
while not isinstance(s.current, TagAttrNode):
nodes.append(s.pop())
attr = s.current
nodes.reverse()
attr.value = nodes
def set_attr(t, s):
nodes = []
while not isinstance(s.current, TagAttrNode):
nodes.append(s.pop())
attr = s.pop()
nodes.reverse()
attr.value = nodes
s.push(SetAttrNode(attr))
def append_attr(t, s):
nodes = []
while not isinstance(s.current, TagAttrNode):
nodes.append(s.pop())
attr = s.pop()
nodes.reverse()
attr.value = nodes
s.push(AppendAttrNode(attr))
def tag_node(t, s):
attrs = []
while isinstance(s.current, TagAttrNode):
attrs.append(s.pop())
tag = s.pop()
# if there were no attrs
if isinstance(tag, (list, tuple)):
my_tokens = get_tokens(s)
my_tokens.append(tag)
lineno, col_offset = my_tokens[0][2], my_tokens[0][3] - 1
tag = TagNode(u''.join([t[1] for t in my_tokens]),
lineno=lineno, col_offset=col_offset)
if attrs:
tag.attrs = attrs
s.push(tag)
def tag_node_with_data(t, s):
tag_node(t, s)
push_stack(t, s)
# tag parser
tag_parser = Parser((
('start', (
(TOKEN_TEXT, 'start', push),
(TOKEN_MINUS, 'start', push),
(TOKEN_COLON, 'start', push),
(TOKEN_DOT, 'attr', tag_name),
(TOKEN_WHITESPACE, 'continue', tag_node_with_data),
(TOKEN_NEWLINE, 'end', tag_node),
)),
('attr', (
(TOKEN_TEXT, 'attr', push),
(TOKEN_MINUS, 'attr', push),
(TOKEN_COLON, 'attr', push),
(TOKEN_PARENTHESES_OPEN, 'attr_value', tag_attr_name),
)),
('attr_value', (
(attr_data_parser, 'start', tag_attr_value),
)),
('continue', (
(TOKEN_TAG_START, 'nested_tag', skip),
(TOKEN_NEWLINE, 'end', pop_stack),
(data_parser, 'end', pop_stack),
)),
('nested_tag', (
('nested_tag_parser', 'end', pop_stack),
)),
))
# this is modified tag parser, supports inline tags with data
nested_tag_parser = Parser(dict(tag_parser.states, start=(
(TOKEN_TEXT, 'start', push),
(TOKEN_MINUS, 'start', push),
(TOKEN_COLON, 'start', push),
(TOKEN_DOT, 'attr', tag_name),
(TOKEN_WHITESPACE, 'continue', tag_node_with_data),
(TOKEN_NEWLINE, 'end', tag_node),
)
).iteritems())
# base parser callbacks
def base_template(t, s):
my_tokens = get_tokens(s)
lineno, col_offset = my_tokens[0][2], my_tokens[0][3]
s.push(BaseTemplate(u''.join([t[1] for t in my_tokens])))
def html_comment(t, s):
my_tokens = get_tokens(s)
lineno, col_offset = my_tokens[0][2], my_tokens[0][3]
s.push(TextNode(Markup(u'<!-- %s -->' % (u''.join([t[1] for t in my_tokens])).strip()),
lineno=lineno, col_offset=col_offset))
def for_stmt(t, s):
my_tokens = get_tokens(s)
lineno, col_offset = my_tokens[0][2], my_tokens[0][3]
s.push(ForStmtNode(u''.join([t[1] for t in my_tokens]),
lineno=lineno, col_offset=col_offset))
def if_stmt(t, s):
my_tokens = get_tokens(s)
lineno, col_offset = my_tokens[0][2], my_tokens[0][3]
s.push(IfStmtNode(u''.join([t[1] for t in my_tokens]),
lineno=lineno, col_offset=col_offset))
def elif_stmt(t, s):
if not isinstance(s.current, IfStmtNode):
pass
#XXX: raise TemplateError
my_tokens = get_tokens(s)
lineno, col_offset = my_tokens[0][2], my_tokens[0][3]
stmt = IfStmtNode(u''.join([t[1] for t in my_tokens]),
lineno=lineno, col_offset=col_offset)
s.current.orelse.append(stmt)
def else_stmt(t, s):
lineno, col_offset = t[2], t[3] - 6
if not isinstance(s.current, IfStmtNode):
pass
#XXX: raise TemplateError
stmt = ElseStmtNode(lineno=lineno, col_offset=col_offset)
# elif
if s.current.orelse:
s.current.orelse[-1].orelse.append(stmt)
# just else
else:
s.current.orelse.append(stmt)
s.push(stmt)
def slot_def(t, s):
my_tokens = get_tokens(s)
lineno, col_offset = my_tokens[0][2], my_tokens[0][3]
s.push(SlotDefNode(u''.join([t[1] for t in my_tokens]),
lineno=lineno, col_offset=col_offset))
def slot_call(t, s):
my_tokens = get_tokens(s)
lineno, col_offset = my_tokens[0][2], my_tokens[0][3]
s.push(SlotCallNode(u''.join([t[1] for t in my_tokens]),
lineno=lineno, col_offset=col_offset))
# base parser (MAIN PARSER)
block_parser = Parser((
# start is always the start of a new line
('start', (
(TOKEN_TEXT, 'text', push),
(TOKEN_EXPRESSION_START, 'expr', skip),
(TOKEN_TAG_ATTR_SET, 'set_attr', skip),
(TOKEN_TAG_ATTR_APPEND, 'append_attr', skip),
(TOKEN_TAG_START, 'tag', skip),
(TOKEN_STATEMENT_FOR, 'for_stmt', push),
(TOKEN_STATEMENT_IF, 'if_stmt', push),
(TOKEN_STATEMENT_ELIF, 'elif_stmt', push),
(TOKEN_STATEMENT_ELSE, 'else_stmt', skip),
(TOKEN_SLOT_DEF, 'slot_def', push),
(TOKEN_BASE_TEMPLATE, 'base', skip),
(TOKEN_STMT_CHAR, 'slot_call', skip),
(TOKEN_COMMENT, 'comment', skip),
(TOKEN_BACKSLASH, 'escaped_text', skip),
(TOKEN_INDENT, 'indent', push_stack),
(TOKEN_UNINDENT, 'start', pop_stack),
(TOKEN_NEWLINE, 'start', skip),
(TOKEN_EOF, 'end', skip),
(all_tokens, 'text', push),
)),
# to prevent multiple indentions in a row
('indent', (
(TOKEN_TEXT, 'text', push),
(TOKEN_EXPRESSION_START, 'expr', skip),
(TOKEN_TAG_ATTR_APPEND, 'append_attr', skip),
(TOKEN_TAG_ATTR_SET, 'set_attr', skip),
(TOKEN_TAG_START, 'tag', skip),
(TOKEN_STATEMENT_FOR, 'for_stmt', push),
(TOKEN_STATEMENT_IF, 'if_stmt', push),
(TOKEN_STATEMENT_ELIF, 'elif_stmt', push),
(TOKEN_STATEMENT_ELSE, 'else_stmt', skip),
(TOKEN_SLOT_DEF, 'slot_def', push),
(TOKEN_STMT_CHAR, 'slot_call', skip),
(TOKEN_COMMENT, 'comment', skip),
(TOKEN_BACKSLASH, 'escaped_text', skip),
(TOKEN_NEWLINE, 'start', skip),
(TOKEN_UNINDENT, 'start', pop_stack),
)),
('base', (
(TOKEN_NEWLINE, 'start', base_template),
(all_tokens, 'base', push),
)),
('text', (
(TOKEN_EXPRESSION_START, 'expr', text_value),
(TOKEN_NEWLINE, 'start', text_value_with_last),
(all_except(TOKEN_INDENT), 'text', push),
)),
('expr', (
(TOKEN_EXPRESSION_END, 'text', py_expr),
(all_tokens, 'expr', push),
)),
('escaped_text', (
(TOKEN_NEWLINE, 'start', text_value_with_last),
(all_except(TOKEN_INDENT), 'escaped_text', push),
)),
('tag', (
(tag_parser, 'start', skip),
)),
('comment', (
(TOKEN_NEWLINE, 'start', html_comment),
(all_tokens, 'comment', push),
)),
('set_attr', (
(TOKEN_TEXT, 'set_attr', push),
(TOKEN_MINUS, 'set_attr', push),
(TOKEN_COLON, 'set_attr', push),
(TOKEN_PARENTHESES_OPEN, 'set_attr_value', tag_attr_name),
)),
('set_attr_value', (
(attr_data_parser, 'start', set_attr),
)),
('append_attr', (
(TOKEN_TEXT, 'append_attr', push),
(TOKEN_MINUS, 'append_attr', push),
(TOKEN_COLON, 'append_attr', push),
(TOKEN_PARENTHESES_OPEN, 'append_attr_value', tag_attr_name),
)),
('append_attr_value', (
(attr_data_parser, 'start', append_attr),
)),
('for_stmt', (
(TOKEN_NEWLINE, 'start', for_stmt),
(all_tokens, 'for_stmt', push),
)),
('if_stmt', (
(TOKEN_NEWLINE, 'start', if_stmt),
(all_tokens, 'if_stmt', push),
)),
('elif_stmt', (
(TOKEN_NEWLINE, 'start', elif_stmt),
(all_tokens, 'elif_stmt', push),
)),
('else_stmt', (
(TOKEN_NEWLINE, 'start', else_stmt),
#(all_tokens, 'else_stmt', push),
)),
('slot_def', (
(TOKEN_NEWLINE, 'start', slot_def),
(all_tokens, 'slot_def', push),
)),
('slot_call', (
(TOKEN_NEWLINE, 'start', slot_call),
(all_tokens, 'slot_call', push),
)),
))
############# PARSER END
class AstWrapper(object):
def __init__(self, lineno, col_offset):
assert lineno is not None and col_offset is not None
self.lineno = lineno
self.col_offset = col_offset
def __getattr__(self, name):
attr = getattr(ast, name)
return partial(attr, lineno=self.lineno, col_offset=self.col_offset, ctx=Load())
class MintToPythonTransformer(ast.NodeTransformer):
def visit_MintTemplate(self, node):
ast_ = AstWrapper(1,1)
module = ast_.Module(body=[
ast_.FunctionDef(name=MAIN_FUNCTION,
body=[],
args=ast_.arguments(args=[], vararg=None, kwargs=None, defaults=[]),
decorator_list=[])])
body = module.body[0].body
for n in node.body:
result = self.visit(n)
if isinstance(result, (list, tuple)):
for i in result:
body.append(i)
else:
body.append(result)
return module
def visit_TextNode(self, node):
ast_ = AstWrapper(node.lineno, node.col_offset)
return ast_.Expr(value=ast_.Call(func=ast_.Name(id=DATA),
args=[self.get_value(node, ast_)],
keywords=[], starargs=None, kwargs=None))
def visit_ExpressionNode(self, node):
ast_ = AstWrapper(node.lineno, node.col_offset)
return ast_.Expr(value=ast_.Call(func=ast_.Name(id=DATA),
args=[self.get_value(node, ast_)],
keywords=[], starargs=None, kwargs=None))
def visit_SetAttrNode(self, node):
ast_ = AstWrapper(node.attr.lineno, node.attr.col_offset)
key, value = self.get_value(node.attr, ast_)
return ast_.Expr(value=ast_.Call(func=ast_.Attribute(value=ast_.Name(id=CURRENT_NODE),
attr='set'),
args=[key, value],
keywords=[],
starargs=None, kwargs=None))
def visit_AppendAttrNode(self, node):
ast_ = AstWrapper(node.attr.lineno, node.attr.col_offset)
key, value = self.get_value(node.attr, ast_)
value = ast_.BinOp(
left=ast_.BoolOp(
values=[ast_.Call(
func=ast_.Attribute(value=ast_.Name(id=CURRENT_NODE),
attr='get'),
args=[key],
keywords=[],
starargs=None, kwargs=None), ast_.Str(s=u'')],
op=ast.Or()),
op=ast.Add(),
right=value)
return ast_.Expr(value=ast_.Call(func=ast_.Attribute(value=ast_.Name(id=CURRENT_NODE),
attr='set'),
args=[key, value],
keywords=[],
starargs=None, kwargs=None))
def visit_TagNode(self, node):
ast_ = AstWrapper(node.lineno, node.col_offset)
name = CURRENT_NODE
attrs = ast_.Dict(keys=[], values=[])
for a in node.attrs:
k, v = self.get_value(a, ast_)
attrs.keys.append(k)
attrs.values.append(v)
nodes = []
# tag start
node_start = ast_.Assign(targets=[ast_.Name(id=name, ctx=Store())],
value=ast_.Call(func=ast_.Name(id=TAG_START),
args=[ast_.Str(s=escape(node.name)), attrs],
keywords=[], starargs=None, kwargs=None))
nodes.append(node_start)
for n in node.body:
result = self.visit(n)
if isinstance(result, (list, tuple)):
for i in result:
nodes.append(i)
else:
nodes.append(result)
# tag end
node_end = ast_.Expr(value=ast_.Call(func=ast_.Name(id=TAG_END),
args=[ast_.Str(s=escape(node.name))],
keywords=[], starargs=None, kwargs=None))
nodes.append(node_end)
return nodes
def visit_ForStmtNode(self, node):
ast_ = AstWrapper(node.lineno, node.col_offset)
result = []
expr = node.text[1:]
if not expr.endswith(':'):
expr += ':'
expr += 'pass'
value = ast.parse(expr).body[0]
for n in node.body:
result = self.visit(n)
if isinstance(result, (list, tuple)):
for i in result:
value.body.append(i)
else:
value.body.append(result)
value.lineno = ast_.lineno
value.col_offset = ast_.col_offset
return value
def visit_IfStmtNode(self, node):
ast_ = AstWrapper(node.lineno, node.col_offset)
result = []
expr = node.text[1:]
if not expr.endswith(':'):
expr += ':'
expr += 'pass'
if expr.startswith('el'):
expr = expr[2:]
value = ast.parse(expr).body[0]
value.body = []
value.lineno = ast_.lineno
value.col_offset = ast_.col_offset
#XXX: if nodes is empty list raise TemplateError
for n in node.body:
result = self.visit(n)
if isinstance(result, (list, tuple)):
for i in result:
value.body.append(i)
else:
value.body.append(result)
for n in node.orelse:
result = self.visit(n)
if isinstance(result, (list, tuple)):
for i in result:
value.orelse.append(i)
else:
value.orelse.append(result)
return value
def visit_ElseStmtNode(self, node):
value = []
for n in node.body:
result = self.visit(n)
if isinstance(result, (list, tuple)):
for i in result:
value.append(i)
else:
value.append(result)
return value
def visit_SlotDefNode(self, node):
ast_ = AstWrapper(node.lineno, node.col_offset)
result = []
expr = node.text[1:]
if not expr.endswith(':'):
expr += ':'
expr += 'pass'
value = ast.parse(expr).body[0]
value.lineno = ast_.lineno
value.col_offset = ast_.col_offset
#XXX: if self.nodes is empty list raise TemplateError
for n in node.body:
result = self.visit(n)
if isinstance(result, (list, tuple)):
for i in result:
value.body.append(i)
else:
value.body.append(result)
return value
def visit_SlotCallNode(self, node):
ast_ = AstWrapper(node.lineno, node.col_offset)
expr = node.text
value = ast.parse(expr).body[0].value
value.lineno = ast_.lineno
value.col_offset = ast_.col_offset
return ast_.Expr(value=ast_.Call(func=ast_.Name(id=DATA),
args=[value], keywords=[]))
def get_value(self, node, ast_, ctx='tag'):
if isinstance(node, TextNode):
return ast_.Str(s=escape(node.text, ctx=ctx))
elif isinstance(node, ExpressionNode):
expr = ast.parse(node.text).body[0].value
return ast_.Call(func=ast_.Name(id=ESCAPE_HELLPER),
args=[expr],
keywords=[ast.keyword(arg='ctx', value=ast_.Str(s=ctx))],
starargs=None, kwargs=None)
elif isinstance(node, TagAttrNode):
key = ast_.Str(s=node.name)
value = ast_.Str(s=u'')
nodes = list(node.value)
if nodes:
value = ast_.Call(func=ast_.Attribute(value=ast_.Str(s=u''),
attr='join'),
args=[ast_.Tuple(elts=[self.get_value(n, ast_, ctx='attr') for n in nodes])],
keywords=[], starargs=None, kwargs=None)
return key, value
class SlotsGetter(ast.NodeTransformer):
'Node transformer, collects slots'
def __init__(self):
self.slots = {}
self.base = None
def visit_FunctionDef(self, node):
ast_ = AstWrapper(node.lineno, node.col_offset)
new_tree_call = ast_.Assign(targets=[ast_.Tuple(elts=[
ast_.Name(id=TREE_BUILDER, ctx=Store()),
ast_.Name(id=TAG_START, ctx=Store()),
ast_.Name(id=TAG_END, ctx=Store()),
ast_.Name(id=DATA, ctx=Store())],
ctx=Store())],
value=ast_.Call(func=ast_.Name(id=TREE_FACTORY),
args=[],
keywords=[], starargs=None, kwargs=None))
tree_to_unicode_call = ast_.Return(value=ast_.Call(func=ast_.Attribute(
value=ast_.Name(id=TREE_BUILDER),
attr='to_unicode'),
args=[],
keywords=[]))
node.body.insert(0, new_tree_call)
node.body.append(tree_to_unicode_call)
if node.name == MAIN_FUNCTION:
_nones = []
for n in node.body:
v = self.visit(n)
if v is None:
_nones.append(n)
for n in _nones:
node.body.remove(n)
return node
self.slots[node.name] = node
node.name = 'slot_' + os.urandom(5).encode('hex')
def visit_BaseTemplate(self, node):
self.base = node.name
def _correct_inheritance(new_slots, old_slots):
slots = {}
for k, value in new_slots.items():
if k in old_slots:
name = '__base__'
old_value = old_slots[k]
ast_ = AstWrapper(old_value.lineno + 1, old_value.col_offset)
value.body.insert(0, ast_.Assign(targets=[ast_.Name(id=name, ctx=Store())],
value=ast_.Name(id=old_value.name)))
del old_slots[k]
# this slot is overrided in child template
old_slots[k+'__overrided'] = old_value
slots[k] = value
slots.update(old_slots)
return slots
def get_mint_tree(tokens_stream):
'''
This function is wrapper to normal parsers (tag_parser, block_parser, etc.).
Returns mint tree.
'''
smart_stack = RecursiveStack()
block_parser.parse(tokens_stream, smart_stack)
return MintTemplate(body=smart_stack.stack)
############# API
class TemplateNotFound(Exception):
pass
class TreeBuilder(_TreeBuilder):
'Tree with root element already set'
def __init__(self, *args, **kwargs):
_TreeBuilder.__init__(self, *args, **kwargs)
self.start('root', {})
def to_unicode(self):
class dummy: pass
data = []
out = dummy()
out.write = data.append
# out - fast writable object
self.end('root')
root = self.close()
if root.text:
out.write(root.text)
for node in root:
self._node_to_unicode(out, node)
if root.tail:
out.write(root.tail)
return Markup(u''.join(data))
def _node_to_unicode(self, out, node):
#NOTE: all data must be escaped during tree building
tag = node.tag
items = node.items()
selfclosed = ['link', 'input', 'br', 'hr', 'img', 'meta']
out.write(u'<' + tag)
if items:
items.sort() # lexical order
for k, v in items:
out.write(u' %s="%s"' % (k, v))
if tag in selfclosed:
out.write(u' />')
else:
out.write(u'>')
if node.text or len(node):
if node.text:
out.write(node.text)
for n in node:
self._node_to_unicode(out, n)
out.write(u'</' + tag + '>')
if node.tail:
out.write(node.tail)
class PprintTreeBuilder(_TreeBuilder):
'Tree with root element already set'
def __init__(self, *args, **kwargs):
_TreeBuilder.__init__(self, *args, **kwargs)
self.start('root', {})
self._level = -1
@property
def indention(self):
return self._level > 0 and ' '*self._level or ''
def to_unicode(self):
class dummy: pass
data = []
out = dummy()
out.write = data.append
# out - fast writable object
self.end('root')
root = self.close()
if root.text:
out.write(self.indent_text(root.text))
out.write('\n')
for node in root:
self._node_to_unicode(out, node)
if root.tail:
out.write(self.indent_text(root.tail))
return Markup(u''.join(data))
def _node_to_unicode(self, out, node):
#NOTE: all data must be escaped during tree building
self.indent()
tag = node.tag
items = node.items()
selfclosed = ['link', 'input', 'br', 'hr', 'img', 'meta']
children = list(node)
text = node.text
tail = node.tail
out.write(self.indention)
out.write(u'<' + tag)
if items:
items.sort() # lexical order
for k, v in items:
out.write(u' %s="%s"' % (k, v))
if tag in selfclosed:
out.write(u' />')
else:
out.write(u'>')
if text:
if text.endswith('\n'):
text = text[:-1]
self.indent()
out.write('\n')
out.write(self.indent_text(text))
out.write('\n')
self.unindent()
if children:
out.write('\n')
for n in children:
self._node_to_unicode(out, n)
if children or text:
out.write(self.indention)
out.write(u'</' + tag + '>')
if node.tail:
out.write('\n')
tail = node.tail
if tail.endswith('\n'):
tail = tail[:-1]
out.write(self.indent_text(tail))
out.write('\n')
self.unindent()
def indent_text(self, text):
return '\n'.join([self.indention+t for t in text.split('\n')])
def indent(self):
self._level += 1
def unindent(self):
self._level -= 1
def new_tree(pprint):
def wrapper():
tree = pprint and PprintTreeBuilder() or TreeBuilder()
return tree, tree.start, tree.end, tree.data
return wrapper
class Template(object):
def __init__(self, source, filename=None, loader=None, globals=None, pprint=False):
assert source or filename, 'Please provide source code or filename'
self.source = source
self.filename = filename if filename else '<string>'
self._loader = loader
self.compiled_code = compile(self.tree(), self.filename, 'exec')
self.globals = globals or {}
self.pprint = pprint
def tree(self, slots=None):
slots = slots or {}
source = StringIO(self.source) if self.source else open(self.filename, 'r')
mint_tree = get_mint_tree(tokenizer(source))
tree = MintToPythonTransformer().visit(mint_tree)
slots_getter = SlotsGetter()
slots_getter.visit(tree.body[0])
_slots, base_template_name = slots_getter.slots, slots_getter.base
# we do not want to override slot's names,
# so prefixing existing slots with underscore
slots = _correct_inheritance(slots, _slots)
if base_template_name:
base_template = self._loader.get_template(base_template_name)
tree = base_template.tree(slots=slots)
elif slots is not None:
# insert implementation of slots
# def slot_bb13e100d5(): ...
# and insert assings of slots
# real_slot_name = slot_bb13e100d5
for k,v in slots.items():
if not k.endswith('__overrided'):
ast_ = AstWrapper(v.lineno, v.col_offset)
tree.body.insert(0, ast_.Assign(targets=[ast_.Name(id=k, ctx=Store())],
value=ast_.Name(id=v.name)))
tree.body.insert(0, v)
# tree already has slots definitions and ready to be compiled
return tree
def render(self, **kwargs):
ns = {
'utils':utils,
ESCAPE_HELLPER:escape,
TREE_FACTORY:new_tree(self.pprint),
}
ns.update(self.globals)
ns.update(kwargs)
exec self.compiled_code in ns
# execute template main function
return ns[MAIN_FUNCTION]()
def slot(self, name, **kwargs):
ns = {
'utils':utils,
ESCAPE_HELLPER:escape,
TREE_FACTORY:new_tree(self.pprint),
}
ns.update(self.globals)
ns.update(kwargs)
exec self.compiled_code in ns
return ns[name]
class Loader(object):
def __init__(self, *dirs, **kwargs):
self.dirs = []
# dirs - list of directories. Order matters
for d in dirs:
self.dirs.append(os.path.abspath(d))
self.cache = kwargs.get('cache', False)
self._templates_cache = {}
self.globals = kwargs.get('globals', {})
self.pprint = kwargs.get('pprint', 0)
def get_template(self, template):
if template in self._templates_cache:
return self._templates_cache[template]
for dir in self.dirs:
location = os.path.join(dir, template)
if os.path.exists(location) and os.path.isfile(location):
with open(location, 'r') as f:
tmpl = Template(source=f.read(), filename=f.name,
loader=self, globals=self.globals, pprint=self.pprint)
if self.cache:
self._templates_cache[template] = tmpl
return tmpl
raise TemplateNotFound(template)
def __add__(self, other):
dirs = self.dirs + other.dirs
return self.__class__(cache=self.cache, globals=self.globals,*dirs)
#NOTE: Taken from jinja2
class Markup(unicode):
def __new__(cls, obj=u'', **kwargs):
if hasattr(obj, '__html__'):
obj = obj.__html__()
return super(Markup, cls).__new__(cls, obj, **kwargs)
def __html__(self):
return self
def __add__(self, other):
if hasattr(other, '__html__') or isinstance(other, basestring):
return self.__class__(unicode(self) + unicode(escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__html__') or isinstance(other, basestring):
return self.__class__(unicode(escape(other)) + unicode(self))
return NotImplemented
def __mul__(self, num):
if isinstance(num, (int, long)):
return self.__class__(unicode.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def join(self, seq):
return self.__class__(unicode.join(self, itertools.imap(escape, seq)))
join.__doc__ = unicode.join.__doc__
def split(self, *args, **kwargs):
return map(self.__class__, unicode.split(self, *args, **kwargs))
split.__doc__ = unicode.split.__doc__
def rsplit(self, *args, **kwargs):
return map(self.__class__, unicode.rsplit(self, *args, **kwargs))
rsplit.__doc__ = unicode.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return map(self.__class__, unicode.splitlines(self, *args, **kwargs))
splitlines.__doc__ = unicode.splitlines.__doc__
def __repr__(self):
return 'Markup(%s)' % super(Markup, self).__repr__()
class utils(object):
class doctype:
html_strict = Markup('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" '
'"http://www.w3.org/TR/html4/strict.dtd">')
html_transitional = Markup('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 '
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">')
xhtml_strict = Markup('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">')
xhtml_transitional = Markup('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 '
'Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">')
html5 = Markup('<!DOCTYPE html>')
markup = Markup
@staticmethod
def loop(iterable):
return Looper(iterable)
@staticmethod
def entity(char):
return Markup(CHARS_ENTITIES.get(char, char))
@staticmethod
def script(src=None, data=None, type='text/javascript'):
if src:
return Markup('<script type="%s" src="%s"></script>' % (type, src))
elif data:
return Markup('<script type="%s">%s</script>' % (type, data))
return ''
@staticmethod
def scripts(*args, **kwargs):
result = []
for name in args:
result.append(utils.script(name, **kwargs))
return ''.join(result)
@staticmethod
def link(href, rel='stylesheet', type='text/css'):
return Markup('<link rel="%s" type="%s" href="%s" />' % (rel, type, href))
class Looper:
'Cool class taken from PPA project'
class _Item:
def __init__(self, index, has_next):
self.index = index
self.has_next = has_next
self.last = not has_next
self.first = not index
@property
def odd(self):
return self.index % 2
@property
def even(self):
return not self.index % 2
def cycle(self, *args):
'Magic method (adopted ;)'
return args[self.index % len(args)]
def __init__(self, iterable):
self._iterator = iter(iterable)
def _shift(self):
try:
self._next = self._iterator.next()
except StopIteration:
self._has_next = False
else:
self._has_next = True
def __iter__(self):
self._shift()
index = 0
while self._has_next:
value = self._next
self._shift()
yield value, self._Item(index, self._has_next)
index += 1
############# API END
class Printer(ast.NodeVisitor):
'AST printer'
def __init__(self):
self._indent = 0
self._indent_tab = ' '
self.src = StringIO()
self.write = self.src.write
self._in_args = False
self._in_if = False
def make_tab(self):
self.src.write(self._indent*self._indent_tab)
def visit_FunctionDef(self, node):
self.make_tab()
self.src.write('def %s(' % node.name)
self._in_args = True
total_args = len(node.args.args)
default_args_len = len(node.args.defaults) if node.args.defaults else 0
for i, arg in enumerate(node.args.args):
if i != 0:
self.src.write(', ')
self.visit(arg)
if default_args_len > 0 and i >= (total_args - default_args_len):
self.src.write('=')
y = (total_args - default_args_len) - i
self.visit(node.args.defaults[y])
self._in_args = False
self.src.write('):\n')
self._indent += 1
for n in node.body:
self.visit(n)
self._indent -= 1
def visit_Return(self, node):
self.make_tab()
self.src.write('return ')
self.visit(node.value)
def visit_Name(self, node):
self.src.write(node.id)
def visit_Str(self, node):
self.src.write('%r' % node.s)
def visit_Num(self, node):
self.src.write('%d' % node.n)
def visit_Pass(self, node):
self.make_tab()
self.src.write('pass\n')
def visit_If(self, node):
self.make_tab()
if self._in_if:
self.src.write('elif ')
else:
self.src.write('if ')
self.visit(node.test)
self.src.write(':\n')
self._indent += 1
for n in node.body:
self.visit(n)
self._indent -= 1
if node.orelse:
self._in_if = True
if not isinstance(node.orelse[0], ast.If):
self.make_tab()
self.src.write('else:\n')
self._indent += 1
for orelse in node.orelse:
self.visit(orelse)
if not isinstance(node.orelse[0], ast.If):
self._indent -= 1
self._in_if = False
def visit_Compare(self, node):
self.visit(node.left)
self.src.write(' ')
for op in node.ops:
self.visit(op)
self.src.write(' ')
for comp in node.comparators:
self.visit(comp)
def visit_For(self, node):
self.make_tab()
self.write('for ')
self.visit(node.target)
self.write(' in ')
self._in_args = True
self.visit(node.iter)
self._in_args = False
self.write(':\n')
self._indent += 1
for n in node.body:
self.visit(n)
self._indent -= 1
def visit_Tuple(self, node):
self.src.write('(')
for i,el in enumerate(node.elts):
if i != 0:
self.src.write(', ')
self.visit(el)
self.src.write(')')
def visit_List(self, node):
self.src.write('[')
for i,el in enumerate(node.elts):
if i != 0:
self.src.write(', ')
self.visit(el)
self.src.write(']')
def visit_Dict(self, node):
self.src.write('{')
total_keys = len(node.keys)
for i in range(total_keys):
if i != 0:
self.src.write(', ')
self.visit(node.keys[i])
self.src.write(': ')
self.visit(node.values[i])
self.src.write('}')
def visit_Assign(self, node):
self.make_tab()
for i, target in enumerate(node.targets):
if i != 0:
self.src.write(', ')
self.visit(target)
self.src.write(' = ')
self._in_args = True
self.visit(node.value)
self._in_args = False
self.src.write('\n')
def visit_Call(self, node):
if self._in_args:
self.visit(node.func)
self.src.write('(')
for i, arg in enumerate(node.args):
if i != 0:
self.src.write(', ')
self.visit(arg)
self.src.write(')')
else:
self.make_tab()
self.visit(node.func)
self.src.write('(')
self._in_args = True
for i, arg in enumerate(node.args):
if i != 0:
self.src.write(', ')
self.visit(arg)
self.src.write(')')
self._in_args = False
self.src.write('\n')
def visit_Attribute(self, node):
self.visit(node.value)
self.src.write('.')
self.src.write(node.attr)
def visit_BinOp(self, node):
self.visit(node.left)
self.src.write(' ')
self.visit(node.op)
self.src.write(' ')
self.visit(node.right)
def visit_BoolOp(self, node):
for i, n in enumerate(node.values):
self.visit(n)
if not i:
self.src.write(' ')
self.visit(node.op)
self.src.write(' ')
# Operators
def visit_Add(self, node):
self.src.write('+')
def visit_Mod(self, node):
self.src.write('%')
def visit_Eq(self, node):
self.src.write('==')
def visit_NotEq(self, node):
self.src.write('!=')
def visit_Lt(self, node):
self.src.write('<=')
def visit_Gt(self, node):
self.src.write('>=')
def visit_Or(self, node):
self.src.write('or')
def all_files_by_mask(mask):
for root, dirs, files in os.walk('.'):
for basename in files:
if fnmatch.fnmatch(basename, mask):
filename = os.path.join(root, basename)
yield filename
def render_templates(*templates, **kw):
loader = kw['loader']
for template_name in templates:
result = loader.get_template(template_name).render()
if result:
open(template_name[:-4]+'html', 'w').write(result)
def iter_changed(interval=1):
mtimes = {}
while 1:
for filename in all_files_by_mask('*.mint'):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
mtimes[filename] = mtime
yield filename
time.sleep(interval)
if __name__ == '__main__':
import datetime
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-c', '--code', dest='code', action='store_true',
default=False,
help='Show only python code of compiled template.')
parser.add_option('-t', '--tokenize', dest='tokenize', action='store_true',
default=False,
help='Show tokens stream of template.')
parser.add_option('-r', '--repeat', dest='repeat',
default=0, metavar='N', type='int',
help='Try to render template N times and display average time result.')
parser.add_option('-p', '--pprint', dest='pprint', action='store_true',
default=False,
help='Turn pretty print on.')
parser.add_option('-m', '--monitor', dest='monitor', action='store_true',
default=False,
help='Monitor current directory and subdirectories for changes in mint files. '
'And render corresponding html files.')
(options, args) = parser.parse_args()
loader = Loader('.', pprint=options.pprint)
if len(args) > 0:
template_name = args[0]
template = loader.get_template(template_name)
if options.code:
printer = Printer()
printer.visit(template.tree())
print printer.src.getvalue()
elif options.tokenize:
for t in tokenizer(StringIO(template.source)):
print t
else:
print template.render()
if options.repeat > 0:
now = datetime.datetime.now
results = []
for i in range(options.repeat):
start = now()
template.render()
results.append(now() - start)
print 'Total time (%d repeats): ' % options.repeat, reduce(lambda a,b: a+b, results)
print 'Average: ', reduce(lambda a,b: a+b, results)/len(results)
elif options.monitor:
curdir = os.path.abspath(os.getcwd())
try:
render_templates(*all_files_by_mask('*.mint'), loader=loader)
print 'Monitoring for file changes...'
for changed_file in iter_changed():
print 'Changes in file: ', changed_file, datetime.datetime.now().strftime('%H:%M:%S')
render_templates(changed_file, loader=loader)
except KeyboardInterrupt:
pass
else:
print 'Try --help'
|
shadmanj/college-code | refs/heads/master | PHYS172-Modern-Mechanics/Lab10.py | 1 | from __future__ import division
from visual import *
from visual.graph import *
#control visualizer window
scene.width = 600
scene.height = 760
#objects
ceiling = box(pos=vector(0,0,0), size = (0.2,0.01,0.2))
block = sphere(radius=.025, color=color.yellow)
spring=helix(pos=ceiling.pos, color=color.orange, radius=.01)
block.m = 9.1 #mass of holder weights
L0 = 0 #meters
ks = 18.9 #spring constant
block.trail = curve(color=block.color)
#graphs
#gdisplay(xtitle='Seconds', ytitle='Meters', x=600, y=0, width=600, height=300)
#ygraph = gcurve(color=color.yellow)
gdisplay(xtitle='Seconds', ytitle='Joules', x=500, y=0, width=800, height=500)
Kgraph = gcurve(color=color.magenta)
Ugraph = gcurve(color=color.blue)
Etotal = gcurve(color=color.red)
#physical constants
g = 0 #m/s^2
b = 1.8 ##units of N*s/m
#simulation parameters
delta_t = .001
simtime = 2 #seconds
t = 0 #simulation time is 0
amp = .09 #cm amplitude of oscilation
yeq = -L0 - block.m*g/ks
#initial values
block.pos = vector(0,.2,0) #displace block from equilibrium position
block.v = vector(0,0,0) #start at rest
block.p = block.m*block.v #momentum of block
spring.axis = block.pos - ceiling.pos
#imporve display
scene.autoscale = 0 #dont zoom camera
scene.center = vector(0,yeq,0)
#calculation loop
while t < simtime:
rate(10000)
#calculate L vector
L =block.pos - spring.pos
Lmag = mag(L)
Lhat = -L/Lmag
#force of spring on block
stretch = Lmag - L0
Fmag_bspring = ks * stretch
Fhat_bspring = Lhat
F_bspring = Fmag_bspring * Fhat_bspring
#Force on block from liquid
F_ballliquid = -b*(block.p/block.m)
#other force on block
Fmag_b = block.m * g
Fhat_b = vector(0,-1,0)
F_b = Fmag_b * Fhat_b
#net force on block
Fnet_b = F_b + F_bspring + F_ballliquid
#apply momentum principle
deltap_block = Fnet_b * delta_t
block.p = block.p + deltap_block
#update position
deltar_block = (block.p/block.m)*delta_t
block.pos = block.pos + deltar_block
#Calculate energies
pmag = mag(block.p) #magnitude of ball momentum
K = pmag**2 / (2*block.m)
U = .5 * ks * stretch**2
E = K + U
if K == 0:
print(E)
break
#add points to block trail
block.trail.append(pos=block.pos)
#add data points to ygraph
#ygraph.plot(pos=(t, block.pos.y))
#update axis of spring
spring.axis = block.pos
#Update plot
Kgraph.plot(pos=(t,K))
Ugraph.plot(pos=(t,U))
Etotal.plot(pos=(t,E))
#update time
t = t + delta_t
print(E)
|
SpamScope/spamscope | refs/heads/develop | fabfile.py | 1 | def pre_submit(topology_name, env_name, env_config, options):
"""Override this function to perform custom actions prior to topology
submission. No SSH tunnels will be active when this function is called."""
pass
def post_submit(topo_name, env_name, env_configi, options):
"""Override this function to perform custom actions after topology
submission. Note that the SSH tunnel to Nimbus will still be active
when this function is called."""
pass
|
jmerkow/VTK | refs/heads/master | ThirdParty/Twisted/twisted/conch/client/agent.py | 69 | # -*- test-case-name: twisted.conch.test.test_default -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Accesses the key agent for user authentication.
Maintainer: Paul Swartz
"""
import os
from twisted.conch.ssh import agent, channel, keys
from twisted.internet import protocol, reactor
from twisted.python import log
class SSHAgentClient(agent.SSHAgentClient):
def __init__(self):
agent.SSHAgentClient.__init__(self)
self.blobs = []
def getPublicKeys(self):
return self.requestIdentities().addCallback(self._cbPublicKeys)
def _cbPublicKeys(self, blobcomm):
log.msg('got %i public keys' % len(blobcomm))
self.blobs = [x[0] for x in blobcomm]
def getPublicKey(self):
"""
Return a L{Key} from the first blob in C{self.blobs}, if any, or
return C{None}.
"""
if self.blobs:
return keys.Key.fromString(self.blobs.pop(0))
return None
class SSHAgentForwardingChannel(channel.SSHChannel):
def channelOpen(self, specificData):
cc = protocol.ClientCreator(reactor, SSHAgentForwardingLocal)
d = cc.connectUNIX(os.environ['SSH_AUTH_SOCK'])
d.addCallback(self._cbGotLocal)
d.addErrback(lambda x:self.loseConnection())
self.buf = ''
def _cbGotLocal(self, local):
self.local = local
self.dataReceived = self.local.transport.write
self.local.dataReceived = self.write
def dataReceived(self, data):
self.buf += data
def closed(self):
if self.local:
self.local.loseConnection()
self.local = None
class SSHAgentForwardingLocal(protocol.Protocol):
pass
|
andrewnsk/dorokhin.moscow | refs/heads/master | realtydb/widgets/__init__.py | 12133432 | |
aleksandra-tarkowska/django | refs/heads/master | tests/backends/__init__.py | 12133432 | |
Sciprios/EvolutionaryPartyProblemSimulator | refs/heads/master | PartyProblemSimulator/Solvers/GeneticAlgorithm.py | 2 | class GeneticAlgorithm(object):
""" A genetic algorithm evolves a solution to a problem. """
def __init__(self, max_generations):
""" Initializes the instance level variables. """
self._eval_count = 0
self._repopulate([]) # Empty population
self._set_generation(0)
self._set_best_genome(None)
self._set_max_generation(max_generations)
self._set_mutation_rate(0.5)
self._history_evaluations = []
self._history_fitness = []
self._set_finished_flag(False)
def run(self, equation, no_vars): # pragma: no cover
""" Runs the genetic algorithm on the given equation. """
self._history_evaluations = []
self._history_fitness = []
self._set_generation(0) # Reset algorithm attributes
self._set_finished_flag(False)
self._reset_eval_count()
self._initialise(no_vars) # Initialize a population
while (self.get_generation() <= self.get_max_generation()) and ((self.get_best_genome() is None) or (self.get_best_genome().evaluate(equation) != 1)) and not self.is_finished():
self._set_generation(self.get_generation() + 1)
self._evolve(equation)
self._history_fitness.append(self.get_best_genome().evaluate(equation))
self._history_evaluations.append(self.get_num_evaluations())
print("Generation: {} - Best Fitness: {}".format(self.get_generation(), self.get_best_genome().evaluate(equation)))
self._set_finished_flag(True)
print("Algorithm finished with {} evaluations.".format(self.get_num_evaluations()))
def _initialise(self, no_vars): # pragma: no cover
""" Initializes the population of organisms. """
self._population.clear()
def _evolve(self, equation): # pragma: no cover
""" Evolves the instance's population through a single generation. """
new_population = []
parents = self._selection(equation)
new_population.extend(self._reproduction(parents))
self._mutation(new_population)
self._repopulate(new_population)
def _selection(self, equation): # pragma: no cover
""" Selection of parents from the population and identifies the best genome. """
raise NotImplementedError("The _selection method has not been inherited by the base class {}".format(type(self)))
def _reproduction(self, parents): # pragma: no cover
""" Reproduces children based on the selected parents. """
raise NotImplementedError("The _reproduction method has not been inherited by the base class {}".format(type(self)))
def _mutation(self, new_population): # pragma: no cover
""" Mutates the new population. """
raise NotImplementedError("The _mutation method has not been inherited by the base class {}".format(type(self)))
def _repopulate(self, new_population): # pragma: no cover
""" Repopulates the population of this genetic algorithm. """
self._population = new_population
def _set_generation(self, gen): # pragma: no cover
""" Sets the generation. """
if gen >= 0 :
self._generation = gen
else:
self._generation = 0
def _set_best_genome(self, best): # pragma: no cover
""" Safely sets the best genome. """
self._best_genome = best
def _set_mutation_rate(self, mut_rate): # pragma: no cover
""" Sets the mutation rate of this method. """
if mut_rate < 0:
self._mutation_rate = 0
elif mut_rate > 1:
self._mutation_rate = 1
else:
self._mutation_rate = mut_rate
def _increment_eval_count(self): # pragma: no cover
""" Increments the number of evaluations. """
self._eval_count = self._eval_count + 1
def _set_max_generation(self, max): # pragma: no cover
""" Sets the max generation of the algorithm. """
if max > 0:
self._max_generation = max
else:
self._max_generation = 1
def _set_finished_flag(self, flag):
""" Sets the finished flag. """
self._finished = flag
def _reset_eval_count(self): # pragma: no cover
""" Resets the evaluation count to 0. """
self._eval_count = 0
def _add_organism(self, new_org): # pragma: no cover
""" Adds an organism to the population. """
self._population.append(new_org)
def get_best_genome(self): # pragma: no cover
""" Retrieves the best genome from the population. """
return self._best_genome
def get_max_generation(self): # pragma: no cover
""" Retrieves the maximum generation. """
return self._max_generation
def get_mutation_rate(self): # pragma: no cover
""" Retrieves the mutation rate. """
return self._mutation_rate
def get_population(self): # pragma: no cover
""" Retrieves the population. """
return self._population
def get_generation(self): # pragma: no cover
""" Retrieves the current generation this algorithm is on. """
return self._generation
def get_num_evaluations(self): # pragma: no cover
""" Retrieves the number of evaluations this method has used. """
return self._eval_count
def is_finished(self): # pragma: no cover
""" Determines if this GA is finished. """
return self._finished
def get_evaluation_history(self): # pragma: no cover
""" Retrieves the history of evaluation count. """
return self._history_evaluations
def get_fitness_history(self): # pragma: no cover
""" Retrieves the history of best fitness. """
return self._history_fitness |
neustar/wpm_api_client | refs/heads/master | src/file.py | 2 | # Copyright 2000 - 2015 NeuStar, Inc.All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from mimetypes import MimeTypes
from ntpath import basename
class File:
def __init__(self, connection, id):
self.connection = connection
self.id = id
self.service = "/script/1.0/file"
def list(self):
"""List all existing files in account."""
return self.connection.get(self.service)
def retrieve(self):
"""Retrieve an existing file info by id."""
if self.id is None:
raise Exception("Missing id: This API requires a monitor ID be supplied.")
return self.connection.get(self.service + "/" + self.id)
def delete(self):
"""Delete an existing file by id."""
if self.id is None:
raise Exception("Missing id: This API requires a monitor ID be supplied.")
return self.connection.delete(self.service + "/" + self.id)
def upload(self, file_path, mime_type=None):
"""Upload a new data file.
Arguments:
file_path -- Path to the file on the system making the request.
Keyword Arguments:
mime_type -- The MIME type of the file. If not specific, the client
will attempt to use the mimetypes library to guess.
"""
if mime_type == None:
mime = MimeTypes()
mime_type = mime.guess_type(file_path)[0]
file_name = basename(file_path)
file = {'file': (file_name, open(file_path, 'rb'), mime_type)}
params = {'qqfile': file_name}
return self.connection.post_multi_part(self.service, file, params=params)
|
root-mirror/root | refs/heads/master | main/src/hadd-argparse.py | 23 | import argparse
import sys
def get_argparse():
DESCRIPTION = """This program will add histograms from a list of root files and write them to a target root file.\n
The target file is newly created and must not exist, or if -f (\"force\") is given, must not be one of the source files.\n
"""
EPILOGUE = """
If Target and source files have different compression settings a slower method is used.
For options that takes a size as argument, a decimal number of bytes is expected.
If the number ends with a ``k'', ``m'', ``g'', etc., the number is multiplied by 1000 (1K), 1000000 (1MB), 1000000000 (1G), etc.
If this prefix is followed by i, the number is multiplied by the traditional 1024 (1KiB), 1048576 (1MiB), 1073741824 (1GiB), etc.
The prefix can be optionally followed by B whose casing is ignored, eg. 1k, 1K, 1Kb and 1KB are the same.
"""
parser = argparse.ArgumentParser(add_help=False, prog='hadd',
description = DESCRIPTION, epilog = EPILOGUE)
parser.add_argument("-a", help="Append to the output")
parser.add_argument("-k", help="Skip corrupt or non-existent files, do not exit")
parser.add_argument("-T", help="Do not merge Trees")
parser.add_argument("-O", help="Re-optimize basket size when merging TTree")
parser.add_argument("-v", help="Explicitly set the verbosity level: 0 request no output, 99 is the default")
parser.add_argument("-j", help="Parallelize the execution in multiple processes")
parser.add_argument("-dbg", help="Parallelize the execution in multiple processes in debug mode (Does not delete partial files stored inside working directory)")
parser.add_argument("-d", help="Carry out the partial multiprocess execution in the specified directory")
parser.add_argument("-n", help="Open at most 'maxopenedfiles' at once (use 0 to request to use the system maximum)")
parser.add_argument("-cachesize", help="Resize the prefetching cache use to speed up I/O operations(use 0 to disable)")
parser.add_argument("-experimental-io-features", help="Used with an argument provided, enables the corresponding experimental feature for output trees")
parser.add_argument("-f", help="Gives the ability to specify the compression level of the target file(by default 4) ")
parser.add_argument("-fk", help="""Sets the target file to contain the baskets with the same compression
as the input files (unless -O is specified). Compresses the meta data
using the compression level specified in the first input or the
compression setting after fk (for example 206 when using -fk206)""")
parser.add_argument("-ff", help="The compression level use is the one specified in the first input")
parser.add_argument("-f0", help="Do not compress the target file")
parser.add_argument("-f6", help="Use compression level 6. (See TFile::SetCompressionSettings for the support range of value.)")
parser.add_argument("TARGET", help="Target file")
parser.add_argument("SOURCES", help="Source files")
return parser
|
ccwang002/biocloud-server-kai | refs/heads/master | src/biocloud/urls.py | 1 | """biocloud URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from core.views import IndexView
urlpatterns = [
url(r'^$', IndexView.as_view(), name='index'),
url(r'^dashboard/', include('dashboard.urls')),
url(r'^accounts/', include('users.urls')),
url(r'^analysis/', include('analyses.urls')),
url(r'^data-sources/', include('data_sources.urls')),
url(r'^experiments/', include('experiments.urls')),
url(r'^access/', include('analyses.access_urls')),
url(r'^admin/', admin.site.urls),
]
|
NeuroDataDesign/seelviz | refs/heads/gh-pages | Flask/env/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.py | 1428 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Backports for individual classes and functions."""
import os
import sys
__all__ = ['cache_from_source', 'callable', 'fsencode']
try:
from imp import cache_from_source
except ImportError:
def cache_from_source(py_file, debug=__debug__):
ext = debug and 'c' or 'o'
return py_file + ext
try:
callable = callable
except NameError:
from collections import Callable
def callable(obj):
return isinstance(obj, Callable)
try:
fsencode = os.fsencode
except AttributeError:
def fsencode(filename):
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
|
RobertABT/heightmap | refs/heads/master | build/matplotlib/lib/mpl_toolkits/axes_grid/axisline_style.py | 9 | from mpl_toolkits.axisartist.axisline_style import *
|
rismalrv/edx-platform | refs/heads/master | cms/djangoapps/contentstore/course_info_model.py | 112 | """
Views for viewing, adding, updating and deleting course updates.
Current db representation:
{
"_id" : locationjson,
"definition" : {
"data" : "<ol>[<li><h2>date</h2>content</li>]</ol>"},
"items" : [{"id": ID, "date": DATE, "content": CONTENT}]
"metadata" : ignored
}
}
"""
import re
import logging
from django.http import HttpResponseBadRequest
from django.utils.translation import ugettext as _
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.django import modulestore
from xmodule.html_module import CourseInfoModule
from openedx.core.lib.xblock_utils import get_course_update_items
from cms.djangoapps.contentstore.push_notification import enqueue_push_course_update
# # This should be in a class which inherits from XmlDescriptor
log = logging.getLogger(__name__)
def get_course_updates(location, provided_id, user_id):
"""
Retrieve the relevant course_info updates and unpack into the model which the client expects:
[{id : index, date : string, content : html string}]
"""
try:
course_updates = modulestore().get_item(location)
except ItemNotFoundError:
course_updates = modulestore().create_item(user_id, location.course_key, location.block_type, location.block_id)
course_update_items = get_course_update_items(course_updates, _get_index(provided_id))
return _get_visible_update(course_update_items)
def update_course_updates(location, update, passed_id=None, user=None):
"""
Either add or update the given course update.
Add:
If the passed_id is absent or None, the course update is added.
If push_notification_selected is set in the update, a celery task for the push notification is created.
Update:
It will update it if it has a passed_id which has a valid value.
Until updates have distinct values, the passed_id is the location url + an index into the html structure.
"""
try:
course_updates = modulestore().get_item(location)
except ItemNotFoundError:
course_updates = modulestore().create_item(user.id, location.course_key, location.block_type, location.block_id)
course_update_items = list(reversed(get_course_update_items(course_updates)))
if passed_id is not None:
passed_index = _get_index(passed_id)
# oldest update at start of list
if 0 < passed_index <= len(course_update_items):
course_update_dict = course_update_items[passed_index - 1]
course_update_dict["date"] = update["date"]
course_update_dict["content"] = update["content"]
course_update_items[passed_index - 1] = course_update_dict
else:
return HttpResponseBadRequest(_("Invalid course update id."))
else:
course_update_dict = {
"id": len(course_update_items) + 1,
"date": update["date"],
"content": update["content"],
"status": CourseInfoModule.STATUS_VISIBLE
}
course_update_items.append(course_update_dict)
enqueue_push_course_update(update, location.course_key)
# update db record
save_course_update_items(location, course_updates, course_update_items, user)
# remove status key
if "status" in course_update_dict:
del course_update_dict["status"]
return course_update_dict
def _make_update_dict(update):
"""
Return course update item as a dictionary with required keys ('id', "date" and "content").
"""
return {
"id": update["id"],
"date": update["date"],
"content": update["content"],
}
def _get_visible_update(course_update_items):
"""
Filter course update items which have status "deleted".
"""
if isinstance(course_update_items, dict):
# single course update item
if course_update_items.get("status") != CourseInfoModule.STATUS_DELETED:
return _make_update_dict(course_update_items)
else:
# requested course update item has been deleted (soft delete)
return {"error": _("Course update not found."), "status": 404}
return ([_make_update_dict(update) for update in course_update_items
if update.get("status") != CourseInfoModule.STATUS_DELETED])
# pylint: disable=unused-argument
def delete_course_update(location, update, passed_id, user):
"""
Don't delete course update item from db.
Delete the given course_info update by settings "status" flag to 'deleted'.
Returns the resulting course_updates.
"""
if not passed_id:
return HttpResponseBadRequest()
try:
course_updates = modulestore().get_item(location)
except ItemNotFoundError:
return HttpResponseBadRequest()
course_update_items = list(reversed(get_course_update_items(course_updates)))
passed_index = _get_index(passed_id)
# delete update item from given index
if 0 < passed_index <= len(course_update_items):
course_update_item = course_update_items[passed_index - 1]
# soft delete course update item
course_update_item["status"] = CourseInfoModule.STATUS_DELETED
course_update_items[passed_index - 1] = course_update_item
# update db record
save_course_update_items(location, course_updates, course_update_items, user)
return _get_visible_update(course_update_items)
else:
return HttpResponseBadRequest(_("Invalid course update id."))
def _get_index(passed_id=None):
"""
From the url w/ index appended, get the index.
"""
if passed_id:
index_matcher = re.search(r'.*?/?(\d+)$', passed_id)
if index_matcher:
return int(index_matcher.group(1))
# return 0 if no index found
return 0
def _get_html(course_updates_items):
"""
Method to create course_updates_html from course_updates items
"""
list_items = []
for update in reversed(course_updates_items):
# filter course update items which have status "deleted".
if update.get("status") != CourseInfoModule.STATUS_DELETED:
list_items.append(u"<article><h2>{date}</h2>{content}</article>".format(**update))
return u"<section>{list_items}</section>".format(list_items="".join(list_items))
def save_course_update_items(location, course_updates, course_update_items, user=None):
"""
Save list of course_updates data dictionaries in new field ("course_updates.items")
and html related to course update in 'data' ("course_updates.data") field.
"""
course_updates.items = course_update_items
course_updates.data = _get_html(course_update_items)
# update db record
modulestore().update_item(course_updates, user.id)
return course_updates
|
sangwonl/stage34 | refs/heads/develop | webapp/api/admin.py | 1 | from __future__ import unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.contrib.auth import get_user_model
from api.models.resources import Organization, Membership, Stage
User = get_user_model()
class UserCreationForm(forms.ModelForm):
"""
A form for creating new users. Includes all the required
fields, plus a repeated password.
"""
password1 = forms.CharField(required=False, label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(required=False, label='Password Confirmation', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('email',)
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise forms.ValidationError('Passwords does not match')
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""
A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = ('email', 'password', 'is_active', 'is_admin')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial['password']
class UserAdmin(BaseUserAdmin):
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('email', 'is_admin')
list_filter = ('is_admin',)
fieldsets = (
(None, {'fields': ('email', 'password')}),
# ('Personal info', {'fields': ('date_of_birth',)}),
('Permissions', {'fields': ('is_admin', 'is_superuser',)}),
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
class OrganizationAdmin(admin.ModelAdmin):
pass
class MembershipAdmin(admin.ModelAdmin):
pass
class StageAdmin(admin.ModelAdmin):
pass
admin.site.register(User, UserAdmin)
admin.site.register(Organization, OrganizationAdmin)
admin.site.register(Membership, MembershipAdmin)
admin.site.register(Stage, StageAdmin)
# ... and, since we're not using Django's built-in permissions,
# unregister the Group model from admin.
admin.site.unregister(Group)
|
leki75/ansible | refs/heads/devel | lib/ansible/modules/network/nxos/nxos_ntp_auth.py | 50 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_ntp_auth
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages NTP authentication.
description:
- Manages NTP authentication.
author:
- Jason Edelman (@jedelman8)
notes:
- If C(state=absent), the module will attempt to remove the given key configuration.
If a matching key configuration isn't found on the device, the module will fail.
- If C(state=absent) and C(authentication=on), authentication will be turned off.
- If C(state=absent) and C(authentication=off), authentication will be turned on.
options:
key_id:
description:
- Authentication key identifier (numeric).
required: true
md5string:
description:
- MD5 String.
required: true
default: null
auth_type:
description:
- Whether the given md5string is in cleartext or
has been encrypted. If in cleartext, the device
will encrypt it before storing it.
required: false
default: text
choices: ['text', 'encrypt']
trusted_key:
description:
- Whether the given key is required to be supplied by a time source
for the device to synchronize to the time source.
required: false
default: false
choices: ['true', 'false']
authentication:
description:
- Turns NTP authentication on or off.
required: false
default: null
choices: ['on', 'off']
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Basic NTP authentication configuration
- nxos_ntp_auth:
key_id: 32
md5string: hello
auth_type: text
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"auth_type": "text", "authentication": "off",
"key_id": "32", "md5string": "helloWorld",
"trusted_key": "true"}
existing:
description:
- k/v pairs of existing ntp authentication
returned: always
type: dict
sample: {"authentication": "off", "trusted_key": "false"}
end_state:
description: k/v pairs of ntp authentication after module execution
returned: always
type: dict
sample: {"authentication": "off", "key_id": "32",
"md5string": "kapqgWjwdg", "trusted_key": "true"}
state:
description: state as sent in from the playbook
returned: always
type: string
sample: "present"
updates:
description: command sent to the device
returned: always
type: list
sample: ["ntp authentication-key 32 md5 helloWorld 0", "ntp trusted-key 32"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_ntp_auth(module):
command = 'show ntp authentication-status'
body = execute_show_command(command, module)[0]
ntp_auth_str = body['authentication']
if 'enabled' in ntp_auth_str:
ntp_auth = True
else:
ntp_auth = False
return ntp_auth
def get_ntp_trusted_key(module):
trusted_key_list = []
command = 'show run | inc ntp.trusted-key'
trusted_key_str = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
if trusted_key_str:
trusted_keys = trusted_key_str.splitlines()
else:
trusted_keys = []
for line in trusted_keys:
if line:
trusted_key_list.append(str(line.split()[2]))
return trusted_key_list
def get_ntp_auth_key(key_id, module):
authentication_key = {}
command = 'show run | inc ntp.authentication-key.{0}'.format(key_id)
auth_regex = (".*ntp\sauthentication-key\s(?P<key_id>\d+)\s"
"md5\s(?P<md5string>\S+).*")
body = execute_show_command(command, module, command_type='cli_show_ascii')
try:
match_authentication = re.match(auth_regex, body[0], re.DOTALL)
group_authentication = match_authentication.groupdict()
key_id = group_authentication["key_id"]
md5string = group_authentication['md5string']
authentication_key['key_id'] = key_id
authentication_key['md5string'] = md5string
except (AttributeError, TypeError):
authentication_key = {}
return authentication_key
def get_ntp_auth_info(key_id, module):
auth_info = get_ntp_auth_key(key_id, module)
trusted_key_list = get_ntp_trusted_key(module)
auth_power = get_ntp_auth(module)
if key_id in trusted_key_list:
auth_info['trusted_key'] = 'true'
else:
auth_info['trusted_key'] = 'false'
if auth_power:
auth_info['authentication'] = 'on'
else:
auth_info['authentication'] = 'off'
return auth_info
def auth_type_to_num(auth_type):
if auth_type == 'encrypt' :
return '7'
else:
return '0'
def set_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
ntp_auth_cmds = []
auth_type_num = auth_type_to_num(auth_type)
ntp_auth_cmds.append(
'ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if trusted_key == 'true':
ntp_auth_cmds.append(
'ntp trusted-key {0}'.format(key_id))
elif trusted_key == 'false':
ntp_auth_cmds.append(
'no ntp trusted-key {0}'.format(key_id))
if authentication == 'on':
ntp_auth_cmds.append(
'ntp authenticate')
elif authentication == 'off':
ntp_auth_cmds.append(
'no ntp authenticate')
return ntp_auth_cmds
def remove_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
auth_remove_cmds = []
auth_type_num = auth_type_to_num(auth_type)
auth_remove_cmds.append(
'no ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if authentication == 'on':
auth_remove_cmds.append(
'no ntp authenticate')
elif authentication == 'off':
auth_remove_cmds.append(
'ntp authenticate')
return auth_remove_cmds
def main():
argument_spec = dict(
key_id=dict(required=True, type='str'),
md5string=dict(required=True, type='str'),
auth_type=dict(choices=['text', 'encrypt'], default='text'),
trusted_key=dict(choices=['true', 'false'], default='false'),
authentication=dict(choices=['on', 'off']),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
key_id = module.params['key_id']
md5string = module.params['md5string']
auth_type = module.params['auth_type']
trusted_key = module.params['trusted_key']
authentication = module.params['authentication']
state = module.params['state']
args = dict(key_id=key_id, md5string=md5string,
auth_type=auth_type, trusted_key=trusted_key,
authentication=authentication)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_ntp_auth_info(key_id, module)
end_state = existing
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if state == 'present':
if delta:
command = set_ntp_auth_key(
key_id, md5string, auth_type, trusted_key, delta.get('authentication'))
if command:
commands.append(command)
elif state == 'absent':
if existing:
auth_toggle = None
if authentication == existing.get('authentication'):
auth_toggle = authentication
command = remove_ntp_auth_key(
key_id, md5string, auth_type, trusted_key, auth_toggle)
if command:
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
end_state = get_ntp_auth_info(key_id, module)
delta = dict(set(end_state.items()).difference(existing.items()))
if delta or (len(existing) != len(end_state)):
changed = True
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main()
|
ingokegel/intellij-community | refs/heads/master | python/testData/mover/insideDocComment.py | 80 | def fcn(self, foo, bar):
"""
:type <caret>foo: int
:type bar: str
"""
self.foo = foo
self.bar = bar |
ssbrewster/peanuts | refs/heads/master | peanuts/config/production.py | 1 | # -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgird to sendemails
- Use MEMCACHIER on Heroku
'''
from configurations import values
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from .common import Common
class Production(Common):
# INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
# END INSTALLED_APPS
# SECRET KEY
SECRET_KEY = values.SecretValue()
# END SECRET KEY
# django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
# end django-secure
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ("collectfast", )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIREY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIREY, AWS_EXPIREY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# END STORAGE CONFIGURATION
# EMAIL
DEFAULT_FROM_EMAIL = values.Value('Peanuts <[email protected]>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[Peanuts] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
# END EMAIL
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# END TEMPLATE CONFIGURATION
# CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
# END CACHING
# Your production stuff: Below this line define 3rd party libary settings
|
adnanh/zulip | refs/heads/master | zerver/decorator.py | 116 | from __future__ import absolute_import
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.http import QueryDict, HttpResponseNotAllowed
from django.http.multipartparser import MultiPartParser
from zerver.models import UserProfile, get_client, get_user_profile_by_email
from zerver.lib.response import json_error, json_unauthorized
from django.utils.timezone import now
from django.conf import settings
import ujson
from StringIO import StringIO
from zerver.lib.queue import queue_json_publish
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.utils import statsd
from zerver.exceptions import RateLimited
from zerver.lib.rate_limiter import incr_ratelimit, is_ratelimited, \
api_calls_left
from functools import wraps
import base64
import logging
import cProfile
from zerver.lib.mandrill_client import get_mandrill_client
if settings.ZULIP_COM:
from zilencer.models import get_deployment_by_domain, Deployment
else:
from mock import Mock
get_deployment_by_domain = Mock()
Deployment = Mock()
def get_deployment_or_userprofile(role):
return get_user_profile_by_email(role) if "@" in role else get_deployment_by_domain(role)
class _RespondAsynchronously(object):
pass
# Return RespondAsynchronously from an @asynchronous view if the
# response will be provided later by calling handler.zulip_finish(),
# or has already been provided this way. We use this for longpolling
# mode.
RespondAsynchronously = _RespondAsynchronously()
def asynchronous(method):
@wraps(method)
def wrapper(request, *args, **kwargs):
return method(request, handler=request._tornado_handler, *args, **kwargs)
if getattr(method, 'csrf_exempt', False):
wrapper.csrf_exempt = True
return wrapper
def update_user_activity(request, user_profile):
# update_active_status also pushes to rabbitmq, and it seems
# redundant to log that here as well.
if request.META["PATH_INFO"] == '/json/update_active_status':
return
if hasattr(request, '_query'):
query = request._query
else:
query = request.META['PATH_INFO']
event={'query': query,
'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(now()),
'client': request.client.name}
queue_json_publish("user_activity", event, lambda event: None)
# Based on django.views.decorators.http.require_http_methods
def require_post(func):
@wraps(func)
def wrapper(request, *args, **kwargs):
if (request.method != "POST"
and not (request.method == "SOCKET"
and request.META['zulip.emulated_method'] == "POST")):
if request.method == "SOCKET":
err_method = "SOCKET/%s" % (request.META['zulip.emulated_method'],)
else:
err_method = request.method
logging.warning('Method Not Allowed (%s): %s', err_method, request.path,
extra={'status_code': 405, 'request': request})
return HttpResponseNotAllowed(["POST"])
return func(request, *args, **kwargs)
return wrapper
def require_realm_admin(func):
@wraps(func)
def wrapper(request, user_profile, *args, **kwargs):
if not user_profile.has_perm('administer', user_profile.realm):
raise JsonableError("Must be a realm administrator")
return func(request, user_profile, *args, **kwargs)
return wrapper
from zerver.lib.user_agent import parse_user_agent
def get_client_name(request, is_json_view):
# If the API request specified a client in the request content,
# that has priority. Otherwise, extract the client from the
# User-Agent.
if 'client' in request.REQUEST:
return request.REQUEST['client']
elif "HTTP_USER_AGENT" in request.META:
user_agent = parse_user_agent(request.META["HTTP_USER_AGENT"])
# We could check for a browser's name being "Mozilla", but
# e.g. Opera and MobileSafari don't set that, and it seems
# more robust to just key off whether it was a json view
if user_agent["name"] != "ZulipDesktop" and is_json_view:
# Avoid changing the client string for browsers Once this
# is out to prod, we can name the field to something like
# Browser for consistency.
return "website"
else:
return user_agent["name"]
else:
# In the future, we will require setting USER_AGENT, but for
# now we just want to tag these requests so we can review them
# in logs and figure out the extent of the problem
if is_json_view:
return "website"
else:
return "Unspecified"
def process_client(request, user_profile, is_json_view=False):
client_name = get_client_name(request, is_json_view)
# Transitional hack for early 2014. Eventually the ios clients
# will all report ZulipiOS, and we can remove the next couple lines.
if client_name == 'ios':
client_name = 'ZulipiOS'
request.client = get_client(client_name)
update_user_activity(request, user_profile)
def validate_api_key(role, api_key):
# Remove whitespace to protect users from trivial errors.
role, api_key = role.strip(), api_key.strip()
try:
profile = get_deployment_or_userprofile(role)
except UserProfile.DoesNotExist:
raise JsonableError("Invalid user: %s" % (role,))
except Deployment.DoesNotExist:
raise JsonableError("Invalid deployment: %s" % (role,))
if api_key != profile.api_key:
if len(api_key) != 32:
reason = "Incorrect API key length (keys should be 32 characters long)"
else:
reason = "Invalid API key"
raise JsonableError(reason + " for role '%s'" % (role,))
if not profile.is_active:
raise JsonableError("Account not active")
try:
if profile.realm.deactivated:
raise JsonableError("Realm for account has been deactivated")
except AttributeError:
# Deployment objects don't have realms
pass
return profile
# Use this for webhook views that don't get an email passed in.
def api_key_only_webhook_view(view_func):
@csrf_exempt
@has_request_variables
@wraps(view_func)
def _wrapped_view_func(request, api_key=REQ,
*args, **kwargs):
try:
user_profile = UserProfile.objects.get(api_key=api_key, is_active=True)
except UserProfile.DoesNotExist:
raise JsonableError("Invalid API key")
request.user = user_profile
request._email = user_profile.email
process_client(request, user_profile)
rate_limit_user(request, user_profile, domain='all')
return view_func(request, user_profile, *args, **kwargs)
return _wrapped_view_func
def zulip_internal(view_func):
@login_required(login_url = settings.HOME_NOT_LOGGED_IN)
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
request._query = view_func.__name__
if request.user.realm.domain != 'zulip.com':
return HttpResponseRedirect(settings.HOME_NOT_LOGGED_IN)
request._email = request.user.email
process_client(request, request.user)
return view_func(request, *args, **kwargs)
return _wrapped_view_func
# authenticated_api_view will add the authenticated user's
# user_profile to the view function's arguments list, since we have to
# look it up anyway. It is deprecated in favor on the REST API
# versions.
def authenticated_api_view(view_func):
@csrf_exempt
@require_post
@has_request_variables
@wraps(view_func)
def _wrapped_view_func(request, email=REQ, api_key=REQ('api_key', default=None),
api_key_legacy=REQ('api-key', default=None),
*args, **kwargs):
if not api_key and not api_key_legacy:
raise RequestVariableMissingError("api_key")
elif not api_key:
api_key = api_key_legacy
user_profile = validate_api_key(email, api_key)
request.user = user_profile
request._email = user_profile.email
process_client(request, user_profile)
# Apply rate limiting
limited_func = rate_limit()(view_func)
return limited_func(request, user_profile, *args, **kwargs)
return _wrapped_view_func
# A more REST-y authentication decorator, using, in particular, HTTP Basic
# authentication.
def authenticated_rest_api_view(view_func):
@csrf_exempt
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
# First try block attempts to get the credentials we need to do authentication
try:
# Grab the base64-encoded authentication string, decode it, and split it into
# the email and API key
auth_type, encoded_value = request.META['HTTP_AUTHORIZATION'].split()
# case insensitive per RFC 1945
if auth_type.lower() != "basic":
return json_error("Only Basic authentication is supported.")
role, api_key = base64.b64decode(encoded_value).split(":")
except ValueError:
return json_error("Invalid authorization header for basic auth")
except KeyError:
return json_unauthorized("Missing authorization header for basic auth")
# Now we try to do authentication or die
try:
# Could be a UserProfile or a Deployment
profile = validate_api_key(role, api_key)
except JsonableError, e:
return json_unauthorized(e.error)
request.user = profile
process_client(request, profile)
if isinstance(profile, UserProfile):
request._email = profile.email
else:
request._email = "deployment:" + role
profile.rate_limits = ""
# Apply rate limiting
return rate_limit()(view_func)(request, profile, *args, **kwargs)
return _wrapped_view_func
def process_as_post(view_func):
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
# Adapted from django/http/__init__.py.
# So by default Django doesn't populate request.POST for anything besides
# POST requests. We want this dict populated for PATCH/PUT, so we have to
# do it ourselves.
#
# This will not be required in the future, a bug will be filed against
# Django upstream.
if not request.POST:
# Only take action if POST is empty.
if request.META.get('CONTENT_TYPE', '').startswith('multipart'):
# Note that request._files is just the private attribute that backs the
# FILES property, so we are essentially setting request.FILES here. (In
# Django 1.5 FILES was still a read-only property.)
request.POST, request._files = MultiPartParser(request.META, StringIO(request.body),
request.upload_handlers, request.encoding).parse()
else:
request.POST = QueryDict(request.body, encoding=request.encoding)
return view_func(request, *args, **kwargs)
return _wrapped_view_func
def authenticate_log_and_execute_json(request, view_func, *args, **kwargs):
if not request.user.is_authenticated():
return json_error("Not logged in", status=401)
user_profile = request.user
process_client(request, user_profile, True)
request._email = user_profile.email
return view_func(request, user_profile, *args, **kwargs)
# Checks if the request is a POST request and that the user is logged
# in. If not, return an error (the @login_required behavior of
# redirecting to a login page doesn't make sense for json views)
def authenticated_json_post_view(view_func):
@require_post
@has_request_variables
@wraps(view_func)
def _wrapped_view_func(request,
*args, **kwargs):
return authenticate_log_and_execute_json(request, view_func, *args, **kwargs)
return _wrapped_view_func
def authenticated_json_view(view_func):
@wraps(view_func)
def _wrapped_view_func(request,
*args, **kwargs):
return authenticate_log_and_execute_json(request, view_func, *args, **kwargs)
return _wrapped_view_func
# These views are used by the main Django server to notify the Tornado server
# of events. We protect them from the outside world by checking a shared
# secret, and also the originating IP (for now).
def authenticate_notify(request):
return (request.META['REMOTE_ADDR'] in ('127.0.0.1', '::1')
and request.POST.get('secret') == settings.SHARED_SECRET)
def internal_notify_view(view_func):
@csrf_exempt
@require_post
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
if not authenticate_notify(request):
return json_error('Access denied', status=403)
if not hasattr(request, '_tornado_handler'):
# We got called through the non-Tornado server somehow.
# This is not a security check; it's an internal assertion
# to help us find bugs.
raise RuntimeError, 'notify view called with no Tornado handler'
request._email = "internal"
return view_func(request, *args, **kwargs)
return _wrapped_view_func
class JsonableError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return self.to_json_error_msg()
def to_json_error_msg(self):
return self.error
class RequestVariableMissingError(JsonableError):
def __init__(self, var_name):
self.var_name = var_name
def to_json_error_msg(self):
return "Missing '%s' argument" % (self.var_name,)
class RequestVariableConversionError(JsonableError):
def __init__(self, var_name, bad_value):
self.var_name = var_name
self.bad_value = bad_value
def to_json_error_msg(self):
return "Bad value for '%s': %s" % (self.var_name, self.bad_value)
# Used in conjunction with @has_request_variables, below
class REQ(object):
# NotSpecified is a sentinel value for determining whether a
# default value was specified for a request variable. We can't
# use None because that could be a valid, user-specified default
class _NotSpecified(object):
pass
NotSpecified = _NotSpecified()
def __init__(self, whence=None, converter=None, default=NotSpecified, validator=None):
"""
whence: the name of the request variable that should be used
for this parameter. Defaults to a request variable of the
same name as the parameter.
converter: a function that takes a string and returns a new
value. If specified, this will be called on the request
variable value before passing to the function
default: a value to be used for the argument if the parameter
is missing in the request
validator: similar to converter, but takes an already parsed JSON
data structure. If specified, we will parse the JSON request
variable value before passing to the function
"""
self.post_var_name = whence
self.func_var_name = None
self.converter = converter
self.validator = validator
self.default = default
if converter and validator:
raise Exception('converter and validator are mutually exclusive')
# Extracts variables from the request object and passes them as
# named function arguments. The request object must be the first
# argument to the function.
#
# To use, assign a function parameter a default value that is an
# instance of the REQ class. That paramter will then be automatically
# populated from the HTTP request. The request object must be the
# first argument to the decorated function.
#
# This should generally be the innermost (syntactically bottommost)
# decorator applied to a view, since other decorators won't preserve
# the default parameter values used by has_request_variables.
#
# Note that this can't be used in helper functions which are not
# expected to call json_error or json_success, as it uses json_error
# internally when it encounters an error
def has_request_variables(view_func):
num_params = view_func.func_code.co_argcount
if view_func.func_defaults is None:
num_default_params = 0
else:
num_default_params = len(view_func.func_defaults)
default_param_names = view_func.func_code.co_varnames[num_params - num_default_params:]
default_param_values = view_func.func_defaults
if default_param_values is None:
default_param_values = []
post_params = []
for (name, value) in zip(default_param_names, default_param_values):
if isinstance(value, REQ):
value.func_var_name = name
if value.post_var_name is None:
value.post_var_name = name
post_params.append(value)
elif value == REQ:
# If the function definition does not actually instantiate
# a REQ object but instead uses the REQ class itself as a
# value, we instantiate it as a convenience
post_var = value(name)
post_var.func_var_name = name
post_params.append(post_var)
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
for param in post_params:
if param.func_var_name in kwargs:
continue
default_assigned = False
try:
val = request.REQUEST[param.post_var_name]
except KeyError:
if param.default is REQ.NotSpecified:
raise RequestVariableMissingError(param.post_var_name)
val = param.default
default_assigned = True
if param.converter is not None and not default_assigned:
try:
val = param.converter(val)
except JsonableError:
raise
except:
raise RequestVariableConversionError(param.post_var_name, val)
# Validators are like converters, but they don't handle JSON parsing; we do.
if param.validator is not None and not default_assigned:
try:
val = ujson.loads(val)
except:
raise JsonableError('argument "%s" is not valid json.' % (param.post_var_name,))
error = param.validator(param.post_var_name, val)
if error:
raise JsonableError(error)
kwargs[param.func_var_name] = val
return view_func(request, *args, **kwargs)
return _wrapped_view_func
# Converter functions for use with has_request_variables
def to_non_negative_int(x):
x = int(x)
if x < 0:
raise ValueError("argument is negative")
return x
def to_non_negative_float(x):
x = float(x)
if x < 0:
raise ValueError("argument is negative")
return x
def flexible_boolean(boolean):
"""Returns True for any of "1", "true", or "True". Returns False otherwise."""
if boolean in ("1", "true", "True"):
return True
else:
return False
def statsd_increment(counter, val=1):
"""Increments a statsd counter on completion of the
decorated function.
Pass the name of the counter to this decorator-returning function."""
def wrapper(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
ret = func(*args, **kwargs)
statsd.incr(counter, val)
return ret
return wrapped_func
return wrapper
def rate_limit_user(request, user, domain):
"""Returns whether or not a user was rate limited. Will raise a RateLimited exception
if the user has been rate limited, otherwise returns and modifies request to contain
the rate limit information"""
ratelimited, time = is_ratelimited(user, domain)
request._ratelimit_applied_limits = True
request._ratelimit_secs_to_freedom = time
request._ratelimit_over_limit = ratelimited
# Abort this request if the user is over her rate limits
if ratelimited:
statsd.incr("ratelimiter.limited.%s.%s" % (type(user), user.id))
raise RateLimited()
incr_ratelimit(user, domain)
calls_remaining, time_reset = api_calls_left(user, domain)
request._ratelimit_remaining = calls_remaining
request._ratelimit_secs_to_freedom = time_reset
def rate_limit(domain='all'):
"""Rate-limits a view. Takes an optional 'domain' param if you wish to rate limit different
types of API calls independently.
Returns a decorator"""
def wrapper(func):
@wraps(func)
def wrapped_func(request, *args, **kwargs):
# Don't rate limit requests from Django that come from our own servers,
# and don't rate-limit dev instances
no_limits = False
if request.client and request.client.name.lower() == 'internal' and \
(request.META['REMOTE_ADDR'] in ['::1', '127.0.0.1'] or settings.DEBUG):
no_limits = True
if no_limits:
return func(request, *args, **kwargs)
try:
user = request.user
except:
user = None
# Rate-limiting data is stored in redis
# We also only support rate-limiting authenticated
# views right now.
# TODO(leo) - implement per-IP non-authed rate limiting
if not settings.RATE_LIMITING or not user:
if not user:
logging.error("Requested rate-limiting on %s but user is not authenticated!" % \
func.__name__)
return func(request, *args, **kwargs)
rate_limit_user(request, user, domain)
return func(request, *args, **kwargs)
return wrapped_func
return wrapper
def profiled(func):
"""
This decorator should obviously be used only in a dev environment.
It works best when surrounding a function that you expect to be
called once. One strategy is to write a test case in zerver/tests.py
and wrap the test case with the profiled decorator.
You can run a single test case like this:
# edit zerver/tests.py and place @profiled above the test case below
./tools/test-backend zerver.RateLimitTests.test_ratelimit_decrease
Then view the results like this:
./tools/show-profile-results.py test_ratelimit_decrease.profile
"""
@wraps(func)
def wrapped_func(*args, **kwargs):
fn = func.__name__ + ".profile"
prof = cProfile.Profile()
retval = prof.runcall(func, *args, **kwargs)
prof.dump_stats(fn)
return retval
return wrapped_func
def uses_mandrill(func):
"""
This decorator takes a function with keyword argument "mail_client" and
fills it in with the mail_client for the Mandrill account.
"""
@wraps(func)
def wrapped_func(*args, **kwargs):
kwargs['mail_client'] = get_mandrill_client()
return func(*args, **kwargs)
return wrapped_func
|
wemanuel/smry | refs/heads/master | server-auth/ls/google-cloud-sdk/lib/googlecloudapis/computeaccounts/staging_alpha/__init__.py | 4 | """Common imports for generated computeaccounts client library."""
# pylint:disable=wildcard-import
import pkgutil
from googlecloudapis.apitools.base.py import *
from googlecloudapis.computeaccounts.staging_alpha.computeaccounts_staging_alpha_client import *
from googlecloudapis.computeaccounts.staging_alpha.computeaccounts_staging_alpha_messages import *
__path__ = pkgutil.extend_path(__path__, __name__)
|
awkspace/ansible | refs/heads/devel | lib/ansible/module_utils/netapp_module.py | 25 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2018, Laurent Nicolas <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
''' Support class for NetApp ansible modules '''
import ansible.module_utils.netapp as netapp_utils
def cmp(a, b):
"""
Python 3 does not have a cmp function, this will do the cmp.
:param a: first object to check
:param b: second object to check
:return:
"""
# convert to lower case for string comparison.
if a is None:
return -1
if type(a) is str and type(b) is str:
a = a.lower()
b = b.lower()
# if list has string element, convert string to lower case.
if type(a) is list and type(b) is list:
a = [x.lower() if type(x) is str else x for x in a]
b = [x.lower() if type(x) is str else x for x in b]
a.sort()
b.sort()
return (a > b) - (a < b)
class NetAppModule(object):
'''
Common class for NetApp modules
set of support functions to derive actions based
on the current state of the system, and a desired state
'''
def __init__(self):
self.log = list()
self.changed = False
self.parameters = {'name': 'not intialized'}
self.zapi_string_keys = dict()
self.zapi_bool_keys = dict()
self.zapi_list_keys = dict()
self.zapi_int_keys = dict()
self.zapi_required = dict()
def set_parameters(self, ansible_params):
self.parameters = dict()
for param in ansible_params:
if ansible_params[param] is not None:
self.parameters[param] = ansible_params[param]
return self.parameters
def get_value_for_bool(self, from_zapi, value):
"""
Convert boolean values to string or vice-versa
If from_zapi = True, value is converted from string (as it appears in ZAPI) to boolean
If from_zapi = False, value is converted from boolean to string
For get() method, from_zapi = True
For modify(), create(), from_zapi = False
:param from_zapi: convert the value from ZAPI or to ZAPI acceptable type
:param value: value of the boolean attribute
:return: string or boolean
"""
if value is None:
return None
if from_zapi:
return True if value == 'true' else False
else:
return 'true' if value else 'false'
def get_value_for_int(self, from_zapi, value):
"""
Convert integer values to string or vice-versa
If from_zapi = True, value is converted from string (as it appears in ZAPI) to integer
If from_zapi = False, value is converted from integer to string
For get() method, from_zapi = True
For modify(), create(), from_zapi = False
:param from_zapi: convert the value from ZAPI or to ZAPI acceptable type
:param value: value of the integer attribute
:return: string or integer
"""
if value is None:
return None
if from_zapi:
return int(value)
else:
return str(value)
def get_value_for_list(self, from_zapi, zapi_parent, zapi_child=None, data=None):
"""
Convert a python list() to NaElement or vice-versa
If from_zapi = True, value is converted from NaElement (parent-children structure) to list()
If from_zapi = False, value is converted from list() to NaElement
:param zapi_parent: ZAPI parent key or the ZAPI parent NaElement
:param zapi_child: ZAPI child key
:param data: list() to be converted to NaElement parent-children object
:param from_zapi: convert the value from ZAPI or to ZAPI acceptable type
:return: list() or NaElement
"""
if from_zapi:
if zapi_parent is None:
return []
else:
return [zapi_child.get_content() for zapi_child in zapi_parent.get_children()]
else:
zapi_parent = netapp_utils.zapi.NaElement(zapi_parent)
for item in data:
zapi_parent.add_new_child(zapi_child, item)
return zapi_parent
def get_cd_action(self, current, desired):
''' takes a desired state and a current state, and return an action:
create, delete, None
eg:
is_present = 'absent'
some_object = self.get_object(source)
if some_object is not None:
is_present = 'present'
action = cd_action(current=is_present, desired = self.desired.state())
'''
if 'state' in desired:
desired_state = desired['state']
else:
desired_state = 'present'
if current is None and desired_state == 'absent':
return None
if current is not None and desired_state == 'present':
return None
# change in state
self.changed = True
if current is not None:
return 'delete'
return 'create'
@staticmethod
def check_keys(current, desired):
''' TODO: raise an error if keys do not match
with the exception of:
new_name, state in desired
'''
pass
def get_modified_attributes(self, current, desired, get_list_diff=False):
''' takes two dicts of attributes and return a dict of attributes that are
not in the current state
It is expected that all attributes of interest are listed in current and
desired.
:param: current: current attributes in ONTAP
:param: desired: attributes from playbook
:param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
:return: dict of attributes to be modified
:rtype: dict
NOTE: depending on the attribute, the caller may need to do a modify or a
different operation (eg move volume if the modified attribute is an
aggregate name)
'''
# if the object does not exist, we can't modify it
modified = dict()
if current is None:
return modified
# error out if keys do not match
self.check_keys(current, desired)
# collect changed attributes
for key, value in current.items():
if key in desired and desired[key] is not None:
if type(value) is list:
value.sort()
desired[key].sort()
if cmp(value, desired[key]) != 0:
if not get_list_diff:
modified[key] = desired[key]
else:
modified[key] = [item for item in desired[key] if item not in value]
if modified:
self.changed = True
return modified
def is_rename_action(self, source, target):
''' takes a source and target object, and returns True
if a rename is required
eg:
source = self.get_object(source_name)
target = self.get_object(target_name)
action = is_rename_action(source, target)
:return: None for error, True for rename action, False otherwise
'''
if source is None and target is None:
# error, do nothing
# cannot rename an non existent resource
# alternatively we could create B
return None
if source is not None and target is not None:
# error, do nothing
# idempotency (or) new_name_is_already_in_use
# alternatively we could delete B and rename A to B
return False
if source is None and target is not None:
# do nothing, maybe the rename was already done
return False
# source is not None and target is None:
# rename is in order
self.changed = True
return True
|
podemos-info/odoo | refs/heads/6.1 | addons/purchase_requisition/wizard/__init__.py | 65 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_requisition_partner
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
abdellatifkarroum/odoo | refs/heads/8.0 | addons/l10n_gr/__init__.py | 438 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#import sandwich_wizard
#import order_create
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Edraak/circleci-edx-platform | refs/heads/circleci-master | lms/envs/edraak_devstack.py | 2 | from .devstack import *
from .edraak_common import *
# WARNING: Don't just add/delete settings from here. Make sure the settings are
# reflected in `cms/envs/edraak_devstack.py`
|
zerodb/zerodb | refs/heads/master | zerodb/transform/interfaces.py | 2 | from zope.interface import Attribute, Interface
class ICompressor(Interface):
"""Compressing or decompressing data"""
name = Attribute("Signature that object is compressed with this algorithm. Recorded as '.cname$'")
_compress = Attribute("Low-level compress function")
_decompress = Attribute("Low-level decompress function")
def compress(data):
"""Compresses data"""
def decompress(data):
"""Decompresses data"""
def register(default):
"""Register utility"""
class IEncrypterClass(Interface):
"""Class which marks encrypting interface, not encrypting object"""
class IEncrypter(Interface):
"""Encrypting or decrypting data"""
name = Attribute("Signature that object is encrypted with this algorithm. Recorded as '.ename$'")
attributes = Attribute("List of attributes to consume from init")
def encrypt(data):
"""Encrypts data"""
def decrypt(data):
"""Decrypts data"""
def _encrypt(data):
"""Low level encrypt interface"""
def _decrypt(data):
"""Low level decrypt interface"""
def _init_encryption(**kw):
"""Extra functions to init encryption"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.