repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
amarmaduke/ankipac | ffs/parser.py | 2 | 7836 | import os
def split_path(path):
folders = []
while True:
path, folder = os.path.split(path)
if folder != "":
folders.append(folder)
else:
if path != "":
folders.append(path)
break
folders.reverse()
return folders
def lex_file(path):
stream = []
try:
text = ""
with open(path, 'r') as f:
for line in f:
for char in line:
if text[-3:] == "\[[":
text = text.replace("\[[", "[[")
elif text[-3:] == "\]]":
text = text.replace("\]]", "]]")
elif text[-3:] == "\{{":
text = text.replace("\{{", "{{")
elif text[-3:] == "\}}":
text = text.replace("\}}", "}}")
elif text[-2:] == "[[": # key start
stream.append(text[:-2])
stream.append("[[")
text = ""
elif text[-2:] == "]]": # key end
stream.append(text[:-2])
stream.append("]]")
text = ""
elif text[-2:] == "{{": # left macro expansion
stream.append(text[:-2])
stream.append("{{")
text = ""
elif text[-2:] == "}}": # right macro expansion
stream.append(text[:-2])
stream.append("}}")
text = ""
text = text + char
stream.append(text)
except IOError as error:
pass
return stream
class Tree:
def __init__(self, path):
initial_name = split_path(path)[-1]
self.macros = {}
self.options = {}
self.model = {}
self.children = []
self.parent = None
self.file_paths = []
self.files = []
self.name = initial_name
for name in os.listdir(path):
next_path = os.path.join(path, name)
if os.path.isdir(next_path):
tree = Tree(next_path)
tree.parent = self
self.children.append(tree)
else:
if name == "macros":
self.macros = self.parse_file(next_path, False)
elif name == "options":
self.options = self.parse_file(next_path, False)
elif name == "model":
self.model = self.parse_file(next_path, True)
if "name" not in self.model:
raise ValueError("Model file must specify a `name`")
else:
self.file_paths.append(next_path)
def get_full_name(self):
result = []
full_name = ""
prefix_name = ""
tree = self
while tree.parent:
result.append(tree.name)
tree = tree.parent
result.append(tree.name)
for name in reversed(result):
full_name = full_name + name + "::"
for name in reversed(result):
prefix_name = prefix_name + name + "/"
return full_name[:-2], prefix_name
def expand_macro(self, name):
tree = self
while tree.parent:
if name in tree.macros:
return tree.macros[name]
tree = tree.parent
if name in tree.macros:
return tree.macros[name]
return None
def find_model(self):
tree = self
while tree.parent:
if tree.model:
return tree.model
tree = tree.parent
if tree.model:
return tree.model
raise Exception("No model could be found for file")
def fix_expanded_stream(self, stream):
result = []
text = ""
for token in stream:
if token == "[[":
result.append(text)
text = ""
result.append("[[")
elif token == "]]":
result.append(text)
text = ""
result.append("]]")
else:
text = text + token
result.append(text)
return result
def parse_file(self, path, allow_expansion=True):
stream = lex_file(path)
if len(stream) == 0:
raise ValueError("Lexer error, are you doing \
`[[key]] value {\{macro\}} value` ? file: {0}".format(path))
estream = []
ignore = []
text = {}
if allow_expansion:
for i in range(len(stream)):
if stream[i] == "{{":
if i + 1 >= len(stream):
raise ValueError( \
"Expected macro name after {{, \
file: {0}".format(path))
elif stream[i + 1] == "{{":
raise ValueError( \
"Can't have nested macros, file: {0}".format(path))
elif stream[i + 1] == "}}":
raise ValueError( \
"Macro name must be nonempty, \
file: {0}".format(path))
if i + 2 >= len(stream) or stream[i + 2] != "}}":
raise ValueError( \
"Expected closing }}, file: {0}".format(path))
value = self.expand_macro(stream[i + 1].strip())
if value:
estream.append(value)
ignore.append(i + 1)
else:
raise ValueError( \
"Macro name does not exist, \
file: {0}".format(path))
elif stream[i] != "}}" and i not in ignore:
estream.append(stream[i])
estream = self.fix_expanded_stream(estream)
else:
estream = stream
for i in range(len(estream)):
if estream[i] == "[[":
if i + 1 >= len(estream):
raise ValueError( \
"Expected key name after [[, file: {0}".format(path))
elif estream[i + 1] == "[[":
raise ValueError( \
"Can't have nested key declarations, \
file: {0}".format(path))
elif estream[i + 1] == "]]":
raise ValueError( \
"Key name must be nonempty, file: {0}".format(path))
if i + 2 >= len(estream) or estream[i + 2] != "]]":
raise ValueError( \
"Expected closing ]], file: {0}".format(path))
if i + 3 >= len(estream) or \
estream[i + 3] == "[[" or estream[i + 3] == "]]":
raise ValueError(
"Expected field value after key declaration, \
file: {0}".format(path))
text[estream[i + 1].strip()] = \
estream[i + 3].strip().encode("utf8")
if not text:
raise ValueError("Unexpected parser error, file: {0}".format(path))
return text
def parse(self):
for path in self.file_paths:
f = self.parse_file(path)
full_name, prefix_name = self.get_full_name()
f["Filename"] = prefix_name + split_path(path)[-1]
f["ffsDeckname"] = full_name
f["ffsModel"] = self.find_model()
self.files.append(f)
for child in self.children:
f = child.parse()
self.files.extend(f)
return self.files
| mit | 6,604,012,388,414,562,000 | 35.788732 | 79 | 0.41437 | false |
google-code-export/pyglet | contrib/layout/examples/xhtml_replaced.py | 29 | 4059 | #!/usr/bin/env python
'''Example of a custom replaced element for XHTML layout.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: xml_css.py 322 2006-12-26 12:53:18Z Alex.Holkner $'
from ctypes import *
from pyglet.gl import *
from pyglet.window import Window
from pyglet import clock
from layout import *
from layout.frame import *
data = '''<?xml version="1.0"?>
<html>
<head>
<style>
.big { width:100%;border:1px solid;background-color:aqua }
.big:hover {background-color:fuschia}
</style>
</head>
<body>
<h1>Replaced element example</h1>
<p>Replaced elements are drawn by the application. You write a class
and attach it to the layout (or at a lower level, to the frame builder)
to handle desired element names.
pyglet includes such a factory to create image replaced element
frames.</p>
<p>Here we've created a custom replaced element tag: "<cube>":
<cube/>. Layout is handled by pyglet/contrib/layout, and rendering is handled by
the application. Of course, the usual CSS properties can be applied:</p>
<p><cube class="big" /></p>
<p>If you click on the cube you might even get an event handled.</p>
</body>
</html> '''
class CubeDrawable(ReplacedElementDrawable):
intrinsic_width = 32
intrinsic_height = 32
intrinsic_ratio = 1.
angle = 0
def __init__(self):
from model.geometric import cube_list
self.cube = cube_list()
def draw(self, frame, render_device, left, top, right, bottom):
glPushAttrib(GL_CURRENT_BIT | GL_LIGHTING_BIT | GL_ENABLE_BIT)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_COLOR_MATERIAL)
glEnable(GL_DEPTH_TEST)
glColor3f(1, .5, .5)
# This is a somewhat complicated way of setting up a projection
# into the render box. It's a bit hacky, really it should read
# the modelview (instead of window.height + offset_top). We
# should encapsulate this somewhere because it's useful.
# Should also be setting up clipping planes around the render area.
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
width = right - left
height = top - bottom
centerx = left + width / 2
centery = bottom + height / 2 + window.height
glTranslatef(-1, -1, 0)
glTranslatef(centerx * 2 / window.width,
centery * 2 / window.height, 0)
glScalef(width / window.width, height / window.height, 1)
gluPerspective(70., width / float(height), 1, 100)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
# Done setting up the projection, everything hereon is "normal"
vec4 = (c_float * 4)
glLightfv(GL_LIGHT0, GL_POSITION, vec4(1, 2, 1, 0))
glLightfv(GL_LIGHT0, GL_DIFFUSE, vec4(.7, .7, .7, 1))
glTranslatef(0, 0, -3)
glRotatef(30, 1, 0, 0)
glRotatef(self.angle, 0, 1, 0)
self.cube.draw()
glPopMatrix()
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopAttrib()
class CubeReplacedElementFactory(ReplacedElementFactory):
accept_names = ['cube']
def create_drawable(self, element):
return CubeDrawable()
# Create a window, attach the usual event handlers
window = Window(visible=False, resizable=True)
layout = Layout()
layout.add_replaced_element_factory(CubeReplacedElementFactory())
layout.set_xhtml(data)
window.push_handlers(layout)
@select('cube')
def on_mouse_press(element, x, y, button, modifiers):
global rate
rate = -rate
layout.push_handlers(on_mouse_press)
glClearColor(1, 1, 1, 1)
window.set_visible()
rate = 50
while not window.has_exit:
dt = clock.tick()
CubeDrawable.angle += dt * rate
print 'FPS = %.2f\r' % clock.get_fps(),
window.dispatch_events()
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
layout.draw()
window.flip()
| bsd-3-clause | 4,457,149,742,771,977,700 | 27.787234 | 85 | 0.642523 | false |
nataddrho/DigiCue-USB | Python3/src/venv/Lib/site-packages/pip/_internal/utils/pkg_resources.py | 1 | 1238 | from pip._vendor.pkg_resources import yield_lines
from pip._vendor.six import ensure_str
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Dict, Iterable, List
class DictMetadata:
"""IMetadataProvider that reads metadata files from a dictionary.
"""
def __init__(self, metadata):
# type: (Dict[str, bytes]) -> None
self._metadata = metadata
def has_metadata(self, name):
# type: (str) -> bool
return name in self._metadata
def get_metadata(self, name):
# type: (str) -> str
try:
return ensure_str(self._metadata[name])
except UnicodeDecodeError as e:
# Mirrors handling done in pkg_resources.NullProvider.
e.reason += f" in {name} file"
raise
def get_metadata_lines(self, name):
# type: (str) -> Iterable[str]
return yield_lines(self.get_metadata(name))
def metadata_isdir(self, name):
# type: (str) -> bool
return False
def metadata_listdir(self, name):
# type: (str) -> List[str]
return []
def run_script(self, script_name, namespace):
# type: (str, str) -> None
pass
| mit | -5,722,745,296,286,096,000 | 27.136364 | 69 | 0.600162 | false |
lip6-mptcp/ns3mptcp | src/flow-monitor/examples/wifi-olsr-flowmon.py | 108 | 7439 | # -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <[email protected]>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
try:
import ns.visualizer
except ImportError:
pass
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
urv = ns.core.UniformRandomVariable()
app.Start(ns.core.Seconds(urv.GetValue(20, 30)))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 | 9,155,627,021,431,760,000 | 38.994624 | 125 | 0.646727 | false |
wujuguang/motor | motor/motor_asyncio.py | 1 | 1907 | # Copyright 2011-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Asyncio support for Motor, an asynchronous driver for MongoDB."""
from . import core, motor_gridfs
from .frameworks import asyncio as asyncio_framework
from .metaprogramming import create_class_with_framework
__all__ = ['AsyncIOMotorClient']
def create_asyncio_class(cls):
return create_class_with_framework(cls, asyncio_framework, 'motor_asyncio')
AsyncIOMotorClient = create_asyncio_class(core.AgnosticClient)
AsyncIOMotorClientSession = create_asyncio_class(core.AgnosticClientSession)
AsyncIOMotorDatabase = create_asyncio_class(
core.AgnosticDatabase)
AsyncIOMotorCollection = create_asyncio_class(
core.AgnosticCollection)
AsyncIOMotorCursor = create_asyncio_class(
core.AgnosticCursor)
AsyncIOMotorCommandCursor = create_asyncio_class(
core.AgnosticCommandCursor)
AsyncIOMotorLatentCommandCursor = create_asyncio_class(
core.AgnosticLatentCommandCursor)
AsyncIOMotorChangeStream = create_asyncio_class(
core.AgnosticChangeStream)
AsyncIOMotorGridFSBucket = create_asyncio_class(
motor_gridfs.AgnosticGridFSBucket)
AsyncIOMotorGridIn = create_asyncio_class(
motor_gridfs.AgnosticGridIn)
AsyncIOMotorGridOut = create_asyncio_class(
motor_gridfs.AgnosticGridOut)
AsyncIOMotorGridOutCursor = create_asyncio_class(
motor_gridfs.AgnosticGridOutCursor)
| apache-2.0 | -4,334,285,275,192,233,500 | 25.859155 | 79 | 0.787625 | false |
myhdl/myhdl | myhdl/test/conversion/toVHDL/test_keywords.py | 4 | 4751 | import myhdl
from myhdl import *
from myhdl import ToVHDLWarning
import pytest
import tempfile
import shutil
import sys
import string
import importlib
import os
from keyword import kwlist as python_kwlist
import warnings
_vhdl_keywords = ["abs", "access", "after", "alias", "all",
"and", "architecture", "array", "assert",
"attribute", "begin", "block", "body", "buffer",
"bus", "case", "component", "configuration",
"constant", "disconnect", "downto", "else",
"elseif", "end", "entity", "exit", "file", "for",
"function", "generate", "generic", "group",
"guarded", "if", "impure", "in", "inertial",
"inout", "is", "label", "library", "linkage",
"literal", "loop", "map", "mod", "nand", "new",
"next", "nor", "not", "null", "of", "on", "open",
"or", "others", "out", "package", "port",
"postponed", "procedure", "process", "pure",
"range", "record", "register", "reject", "rem",
"report", "return", "rol", "ror", "select",
"severity", "signal", "shared", "sla", "sll", "sra",
"srl", "subtype", "then", "to", "transport", "type",
"unaffected", "units", "until", "use", "variable",
"wait", "when", "while", "with", "xnor", "xor"];
keyword_code = """
from myhdl import *
@block
def invalid_import_keyword(input_sig, output_sig):
${keyword} = Signal(False)
@always_comb
def do_something():
${keyword}.next = input_sig and input_sig
@always_comb
def something_else():
output_sig.next = ${keyword}
return do_something, something_else
"""
@block
def invalid_signal_underscore(input_sig, output_sig):
_foo = Signal(bool(0))
@always_comb
def do_something():
_foo.next = input_sig and input_sig
@always_comb
def something_else():
output_sig.next = _foo
return do_something, something_else
@block
def invalid_function_underscore(clock, input_sig, output_sig):
ttt = Signal(bool(0))
block1 = invalid_signal_underscore(input_sig, ttt)
@always(clock.posedge)
def do_something():
output_sig.next = ttt
return block1, do_something
@block
def valid(input_sig, output_sig):
@always_comb
def do_something():
output_sig.next = input_sig
return do_something
def test_multiple_conversion():
sig_1 = Signal(True)
sig_2 = Signal(True)
a_block = valid(sig_1, sig_2)
# conversions with keyword should fail
with warnings.catch_warnings() as w:
warnings.simplefilter('error')
a_block.convert(hdl='VHDL')
a_block.convert(hdl='VHDL')
def test_invalid_keyword_name():
sig_1 = Signal(True)
sig_2 = Signal(True)
temp_directory = tempfile.mkdtemp()
sys.path.append(temp_directory)
keyword_template = string.Template(keyword_code)
try:
for keyword in _vhdl_keywords:
if keyword in python_kwlist:
continue
fd, full_filename = tempfile.mkstemp(
suffix='.py', dir=temp_directory)
os.write(fd, keyword_template.substitute(keyword=keyword).encode('utf-8'))
os.close(fd)
module_name = os.path.basename(full_filename)[:-3] # chop off .py
keyword_import = importlib.import_module(module_name)
a_block = keyword_import.invalid_import_keyword(sig_1, sig_2)
with pytest.warns(ToVHDLWarning):
a_block.convert(hdl='VHDL')
finally:
sys.path.pop()
shutil.rmtree(temp_directory)
def test_invalid_signal_underscore_name():
sig_1 = Signal(True)
sig_2 = Signal(True)
a_block = invalid_signal_underscore(sig_1, sig_2)
# Multiple conversions of a valid block should pass without warning
with pytest.warns(ToVHDLWarning):
a_block.convert(hdl='VHDL')
def test_invalid_function_underscore_name():
sig_1 = Signal(True)
sig_2 = Signal(True)
clock = Signal(True)
a_block = invalid_function_underscore(clock, sig_1, sig_2)
# Multiple conversions of a valid block should pass without warning
with pytest.warns(ToVHDLWarning):
a_block.convert(hdl='VHDL')
if __name__ == '__main__':
sig_1 = Signal(True)
sig_2 = Signal(True)
a_block = invalid_signal_underscore(sig_1, sig_2)
a_block.convert(hdl='VHDL')
clock = Signal(True)
a_block = invalid_function_underscore(clock, sig_1, sig_2)
# Multiple conversions of a valid block should pass without warning
a_block.convert(hdl='VHDL')
| lgpl-2.1 | 6,288,723,911,320,395,000 | 25.994318 | 86 | 0.585982 | false |
apparena/docs | readthedocs/projects/views/private.py | 1 | 22444 | import logging
import shutil
import json
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotAllowed, Http404
from django.db.models import Q
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.views.generic import View, ListView, TemplateView
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.contrib.formtools.wizard.views import SessionWizardView
from bookmarks.models import Bookmark
from builds import utils as build_utils
from builds.models import Version
from builds.forms import AliasForm, VersionForm
from builds.filters import VersionFilter
from builds.models import VersionAlias
from core.utils import trigger_build
from oauth.models import GithubProject, BitbucketProject
from oauth import utils as oauth_utils
from projects.forms import (ProjectBackendForm, ProjectBasicsForm,
ProjectExtraForm, ProjectAdvancedForm,
UpdateProjectForm, SubprojectForm,
build_versions_form, UserForm, EmailHookForm,
TranslationForm, RedirectForm, WebHookForm)
from projects.models import Project, EmailHook, WebHook
from projects import constants
try:
from readthedocs.projects.signals import project_import
except:
from projects.signals import project_import
log = logging.getLogger(__name__)
class ProjectDashboard(ListView):
"""
A dashboard! If you aint know what that means you aint need to.
Essentially we show you an overview of your content.
"""
model = Project
template_name = 'projects/project_dashboard.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ProjectDashboard, self).dispatch(*args, **kwargs)
def get_queryset(self):
return Project.objects.dashboard(self.request.user)
def get_context_data(self, **kwargs):
context = super(ProjectDashboard, self).get_context_data(**kwargs)
filter = VersionFilter(constants.IMPORTANT_VERSION_FILTERS, queryset=self.get_queryset())
context['filter'] = filter
bookmarks = Bookmark.objects.filter(user=self.request.user)
if bookmarks.exists:
context['bookmark_list'] = bookmarks[:3]
else:
bookmarks = None
return context
@login_required
def project_manage(request, project_slug):
"""
The management view for a project, where you will have links to edit
the projects' configuration, edit the files associated with that
project, etc.
Now redirects to the normal /projects/<slug> view.
"""
return HttpResponseRedirect(reverse('projects_detail',
args=[project_slug]))
@login_required
def project_edit(request, project_slug):
"""
Edit an existing project - depending on what type of project is being
edited (created or imported) a different form will be displayed
"""
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form_class = UpdateProjectForm
form = form_class(instance=project, data=request.POST or None,
user=request.user)
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, _('Project settings updated'))
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_edit.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_advanced(request, project_slug):
"""
Edit an existing project - depending on what type of project is being
edited (created or imported) a different form will be displayed
"""
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form_class = ProjectAdvancedForm
form = form_class(instance=project, data=request.POST or None, initial={
'num_minor': 2, 'num_major': 2, 'num_point': 2})
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, _('Project settings updated'))
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_advanced.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_versions(request, project_slug):
"""
Shows the available versions and lets the user choose which ones he would
like to have built.
"""
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
if not project.is_imported:
raise Http404
form_class = build_versions_form(project)
form = form_class(data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, _('Project versions updated'))
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_versions.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_version_detail(request, project_slug, version_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
version = get_object_or_404(Version.objects.public(user=request.user, project=project, only_active=False), slug=version_slug)
form = VersionForm(request.POST or None, instance=version)
if request.method == 'POST' and form.is_valid():
form.save()
url = reverse('project_version_list', args=[project.slug])
return HttpResponseRedirect(url)
return render_to_response(
'projects/project_version_detail.html',
{'form': form, 'project': project, 'version': version},
context_instance=RequestContext(request)
)
@login_required
def project_delete(request, project_slug):
"""
Make a project as deleted on POST, otherwise show a form asking for
confirmation of delete.
"""
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
if request.method == 'POST':
# Remove the repository checkout
shutil.rmtree(project.doc_path, ignore_errors=True)
# Delete the project and everything related to it
project.delete()
messages.success(request, _('Project deleted'))
project_dashboard = reverse('projects_dashboard')
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_delete.html',
{'project': project},
context_instance=RequestContext(request)
)
class ImportWizardView(SessionWizardView):
'''Project import wizard'''
form_list = [('basics', ProjectBasicsForm),
('extra', ProjectExtraForm)]
condition_dict = {'extra': lambda self: self.is_advanced()}
def get_form_kwargs(self, step):
'''Get args to pass into form instantiation'''
kwargs = {}
kwargs['user'] = self.request.user
if step == 'basics':
kwargs['show_advanced'] = True
if step == 'extra':
extra_form = self.get_form_from_step('basics')
project = extra_form.save(commit=False)
kwargs['instance'] = project
return kwargs
def get_form_from_step(self, step):
form = self.form_list[step](
data=self.get_cleaned_data_for_step(step),
**self.get_form_kwargs(step)
)
form.full_clean()
return form
def get_template_names(self):
'''Return template names based on step name'''
return 'projects/import_{0}.html'.format(self.steps.current, 'base')
def done(self, form_list, **kwargs):
'''Save form data as object instance
Don't save form data directly, instead bypass documentation building and
other side effects for now, by signalling a save without commit. Then,
finish by added the members to the project and saving.
'''
# expect the first form
basics_form = form_list[0]
# Save the basics form to create the project instance, then alter
# attributes directly from other forms
project = basics_form.save()
for form in form_list[1:]:
for (field, value) in form.cleaned_data.items():
setattr(project, field, value)
else:
basic_only = True
project.save()
project_import.send(sender=project, request=self.request)
trigger_build(project, basic=basic_only)
return HttpResponseRedirect(reverse('projects_detail',
args=[project.slug]))
def is_advanced(self):
'''Determine if the user selected the `show advanced` field'''
data = self.get_cleaned_data_for_step('basics') or {}
return data.get('advanced', True)
class ImportView(TemplateView):
'''On GET, show the source select template, on POST, mock out a wizard
If we are accepting POST data, use the fields to seed the initial data in
:py:cls:`ImportWizardView`. The import templates will redirect the form to
`/dashboard/import`
'''
template_name = 'projects/project_import.html'
wizard_class = ImportWizardView
def post(self, request, *args, **kwargs):
initial_data = {}
initial_data['basics'] = {}
for key in ['name', 'repo', 'repo_type']:
initial_data['basics'][key] = request.POST.get(key)
initial_data['extra'] = {}
for key in ['description', 'project_url']:
initial_data['extra'][key] = request.POST.get(key)
request.method = 'GET'
return self.wizard_class.as_view(initial_dict=initial_data)(request)
class ImportDemoView(View):
'''View to pass request on to import form to import demo project'''
form_class = ProjectBasicsForm
request = None
args = None
kwargs = None
def get(self, request, *args, **kwargs):
'''Process link request as a form post to the project import form'''
self.request = request
self.args = args
self.kwargs = kwargs
data = self.get_form_data()
project = (Project.objects.for_admin_user(request.user)
.filter(repo=data['repo']).first())
if project is not None:
messages.success(
request, _('The demo project is already imported!'))
else:
kwargs = self.get_form_kwargs()
form = self.form_class(data=data, **kwargs)
if form.is_valid():
project = form.save()
project.save()
trigger_build(project, basic=True)
messages.success(
request, _('Your demo project is currently being imported'))
else:
for (_f, msg) in form.errors.items():
log.error(msg)
messages.error(request,
_('There was a problem adding the demo project'))
return HttpResponseRedirect(reverse('projects_dashboard'))
return HttpResponseRedirect(reverse('projects_detail',
args=[project.slug]))
def get_form_data(self):
'''Get form data to post to import form'''
return {
'name': '{0}-demo'.format(self.request.user.username),
'repo_type': 'git',
'repo': 'https://github.com/readthedocs/template.git'
}
def get_form_kwargs(self):
'''Form kwargs passed in during instantiation'''
return {'user': self.request.user}
@login_required
def edit_alias(request, project_slug, id=None):
proj = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
if id:
alias = proj.aliases.get(pk=id)
form = AliasForm(instance=alias, data=request.POST or None)
else:
form = AliasForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
alias = form.save()
return HttpResponseRedirect(alias.project.get_absolute_url())
return render_to_response(
'projects/alias_edit.html',
{'form': form},
context_instance=RequestContext(request)
)
class AliasList(ListView):
model = VersionAlias
template_context_name = 'alias'
template_name = 'projects/alias_list.html',
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(AliasList, self).dispatch(*args, **kwargs)
def get_queryset(self):
self.project = get_object_or_404(Project.objects.for_admin_user(self.request.user), slug=self.kwargs.get('project_slug'))
return self.project.aliases.all()
@login_required
def project_subprojects(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form = SubprojectForm(data=request.POST or None, parent=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse(
'projects_subprojects', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
subprojects = project.subprojects.all()
return render_to_response(
'projects/project_subprojects.html',
{'form': form, 'project': project, 'subprojects': subprojects},
context_instance=RequestContext(request)
)
@login_required
def project_subprojects_delete(request, project_slug, child_slug):
parent = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
child = get_object_or_404(Project.objects.for_admin_user(request.user), slug=child_slug)
parent.remove_subproject(child)
return HttpResponseRedirect(reverse('projects_subprojects',
args=[parent.slug]))
@login_required
def project_users(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form = UserForm(data=request.POST or None, project=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_users', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
users = project.users.all()
return render_to_response(
'projects/project_users.html',
{'form': form, 'project': project, 'users': users},
context_instance=RequestContext(request)
)
@login_required
def project_users_delete(request, project_slug):
if request.method != 'POST':
raise Http404
project = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
user = get_object_or_404(User.objects.all(), username=request.POST.get('username'))
if user == request.user:
raise Http404
project.users.remove(user)
project_dashboard = reverse('projects_users', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_notifications(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
email_form = EmailHookForm(data=request.POST or None, project=project)
webhook_form = WebHookForm(data=request.POST or None, project=project)
if request.method == 'POST':
if email_form.is_valid():
email_form.save()
if webhook_form.is_valid():
webhook_form.save()
project_dashboard = reverse('projects_notifications',
args=[project.slug])
return HttpResponseRedirect(project_dashboard)
emails = project.emailhook_notifications.all()
urls = project.webhook_notifications.all()
return render_to_response(
'projects/project_notifications.html',
{
'email_form': email_form,
'webhook_form': webhook_form,
'project': project,
'emails': emails,
'urls': urls,
},
context_instance=RequestContext(request)
)
@login_required
def project_notifications_delete(request, project_slug):
if request.method != 'POST':
raise Http404
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
try:
project.emailhook_notifications.get(email=request.POST.get('email')).delete()
except EmailHook.DoesNotExist:
try:
project.webhook_notifications.get(url=request.POST.get('email')).delete()
except WebHook.DoesNotExist:
raise Http404
project_dashboard = reverse('projects_notifications', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_translations(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form = TranslationForm(data=request.POST or None, parent=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_translations',
args=[project.slug])
return HttpResponseRedirect(project_dashboard)
lang_projects = project.translations.all()
return render_to_response(
'projects/project_translations.html',
{'form': form, 'project': project, 'lang_projects': lang_projects},
context_instance=RequestContext(request)
)
@login_required
def project_translations_delete(request, project_slug, child_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
subproj = get_object_or_404(Project.objects.for_admin_user(request.user), slug=child_slug)
project.translations.remove(subproj)
project_dashboard = reverse('projects_translations', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_redirects(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form = RedirectForm(data=request.POST or None, project=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_redirects', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
redirects = project.redirects.all()
return render_to_response(
'projects/project_redirects.html',
{'form': form, 'project': project, 'redirects': redirects},
context_instance=RequestContext(request)
)
@login_required
def project_redirects_delete(request, project_slug):
if request.method != 'POST':
return HttpResponseNotAllowed('Only POST is allowed')
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
redirect = get_object_or_404(project.redirects,
pk=request.POST.get('id_pk'))
if redirect.project == project:
redirect.delete()
else:
raise Http404
return HttpResponseRedirect(reverse('projects_redirects',
args=[project.slug]))
@login_required
def project_import_github(request, sync=False):
'''Show form that prefills import form with data from GitHub'''
github_connected = oauth_utils.import_github(user=request.user, sync=sync)
repos = GithubProject.objects.filter(users__in=[request.user])
# Find existing projects that match a repo url
for repo in repos:
ghetto_repo = repo.git_url.replace('git://', '').replace('.git', '')
projects = (Project
.objects
.public(request.user)
.filter(Q(repo__endswith=ghetto_repo) |
Q(repo__endswith=ghetto_repo + '.git')))
if projects:
repo.matches = [project.slug for project in projects]
else:
repo.matches = []
return render_to_response(
'projects/project_import_github.html',
{
'repos': repos,
'github_connected': github_connected,
'sync': sync,
},
context_instance=RequestContext(request)
)
@login_required
def project_import_bitbucket(request, sync=False):
'''Show form that prefills import form with data from BitBucket'''
bitbucket_connected = oauth_utils.import_bitbucket(user=request.user, sync=sync)
repos = BitbucketProject.objects.filter(users__in=[request.user])
# Find existing projects that match a repo url
for repo in repos:
ghetto_repo = repo.git_url.replace('git://', '').replace('.git', '')
projects = (Project
.objects
.public(request.user)
.filter(Q(repo__endswith=ghetto_repo) |
Q(repo__endswith=ghetto_repo + '.git')))
if projects:
repo.matches = [project.slug for project in projects]
else:
repo.matches = []
return render_to_response(
'projects/project_import_bitbucket.html',
{
'repos': repos,
'bitbucket_connected': bitbucket_connected,
'sync': sync,
},
context_instance=RequestContext(request)
)
| mit | -8,045,143,523,820,614,000 | 35.083601 | 129 | 0.638567 | false |
tigerneil/libcluster | python/testapi.py | 2 | 4409 | #! /usr/bin/env python
# libcluster -- A collection of hierarchical Bayesian clustering algorithms.
# Copyright (C) 2013 Daniel M. Steinberg ([email protected])
#
# This file is part of libcluster.
#
# libcluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# libcluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with libcluster. If not, see <http://www.gnu.org/licenses/>.
""" Script to make sure libcluster runs properly using the python API.
Author: Daniel Steinberg
Date: 13/10/2013
"""
import numpy as np
import libclusterpy as lc
# Top level cluster parameters -- Globals.... whatev...
means = np.array([[0, 0], [5, 5], [-5, -5]])
sigma = [np.eye(2)] * 3
beta = np.array([[1.0/3, 1.0/3, 1.0/3],
[1.0/2, 1.0/4, 1.0/4],
[1.0/4, 1.0/4, 1.0/2]])
def testmixtures():
""" The test function. """
print "Testing mixtures ------------------\n"
# Create points from clusters
W = gengmm(10000)
# Test VDP
print "------------ Test VDP -------------"
f, qZ, w, mu, cov = lc.learnVDP(W, verbose=True)
print ""
printgmm(w, mu, cov)
# Test BGMM
print "------------ Test BGMM ------------"
f, qZ, w, mu, cov = lc.learnBGMM(W, verbose=True)
print ""
printgmm(w, mu, cov)
def testgroupmix():
print "Testing group mixtures ------------\n"
# Create points from clusters
J = 4 # Groups
W = [gengmm(2000) for j in range(0, J)]
# Test GMC
print "------------ Test GMC -------------"
f, qZ, w, mu, cov = lc.learnGMC(W, verbose=True)
print ""
printgmm(w, mu, cov)
# Test SGMC
print "------------ Test SGMC ------------"
f, qZ, w, mu, cov = lc.learnSGMC(W, verbose=True)
print ""
printgmm(w, mu, cov)
def testmultmix():
""" The the models that cluster at multiple levels. Just using J=1. """
# Generate top-level clusters
I = 200
Ni = 100
betas, Y = gensetweights(I)
# Create points from clusters
W = np.zeros((I, means.shape[1]))
X = []
for i in xrange(0, I):
W[i, :] = np.random.multivariate_normal(means[Y[i]], sigma[Y[i]], 1)
X.append(gengmm(Ni, betas[i, :]))
# Test SCM
print "------------ Test SCM -------------"
f, qY, qZ, wi, ws, mu, cov = lc.learnSCM([X], trunc=30, verbose=True)
print ""
printgmm(ws, mu, cov)
# Test MCM
print "------------ Test MCM -------------"
f, qY, qZ, wi, ws, mui, mus, covi, covs = lc.learnMCM([W], [X], trunc=30,
verbose=True)
print "\nTop level mixtures:"
printgmm(wi, mui, covi)
print "Bottom level mixtures:"
printgmm(ws, mus, covs)
def gengmm(N, weights=None):
""" Make a random GMM with N observations. """
K = len(sigma)
pi = np.random.rand(K) if weights is None else weights
pi /= pi.sum()
Nk = np.round(pi * N)
Nk[-1] = N - Nk[0:-1].sum()
X = [np.random.multivariate_normal(means[k, :], sigma[k], int(Nk[k]))
for k in range(0, K)]
return np.concatenate(X)
def gensetweights(I):
""" Generate sets of similar weights. """
T = beta.shape[0]
pi = np.random.rand(T)
pi /= pi.sum()
Nt = np.round(pi * I)
Nt[-1] = I - Nt[0:-1].sum()
betas = []
Y = []
for t in xrange(0, T):
Y += Nt[t] * [t]
betas.append(Nt[t] * [beta[t, :]])
return np.concatenate(betas), Y
def printgmm(W, Mu, Cov):
""" Print the parameters of a GMM. """
Wnp = np.array(W)
for i, (mu, cov) in enumerate(zip(Mu, Cov)):
print "Mixture {0}:".format(i)
if Wnp.ndim == 2:
print " weight --\n{0}".format(Wnp[i, :])
elif Wnp.ndim == 3:
print " group weights --\n{0}".format(Wnp[:, i, :])
print " mean --\n{0}\n cov --\n{1}\n".format(mu, cov)
if __name__ == "__main__":
testmixtures()
testgroupmix()
testmultmix()
| gpl-3.0 | 8,875,051,140,514,799,000 | 25.884146 | 77 | 0.557496 | false |
eykd/owyl | src/owyl/core.py | 1 | 13782 | # -*- coding: utf-8 -*-
"""core -- core behaviors for Owyl.
Copyright 2008 David Eyk. All rights reserved.
$Author$\n
$Rev$\n
$Date$
"""
__author__ = "$Author$"[9:-2]
__revision__ = "$Rev$"[6:-2]
__date__ = "$Date$"[7:-2]
import logging
try:
from mx.Stack import Stack, EmptyError
except ImportError:
from stack import Stack, EmptyError
RETURN_VALUES = set((True, False, None))
__all__ = ['wrap', 'task', 'taskmethod', 'parent_task', 'parent_taskmethod', 'visit',
'succeed', 'fail', 'succeedAfter', 'failAfter',
'sequence', 'selector', 'parallel', 'PARALLEL_SUCCESS',
'queue', 'parallel_queue',
'throw', 'catch',
'log',]
def wrap(func, *args, **kwargs):
"""Wrap a callable as a task. Yield the boolean of its result.
"""
def initTask(**initkwargs):
def makeIterator(**runkwargs):
result = func(*args, **kwargs)
yield bool(result)
try: makeIterator.__name__ = func.__name__
except AttributeError: pass
try: makeIterator.__doc__ = func.__doc__
except AttributeError: pass
return makeIterator
try: initTask.__doc__ = func.__doc__
except AttributeError: pass
try: initTask.__name__ = func.__name__
except AttributeError: pass
return initTask
def task(func):
"""Task decorator.
Decorate a generator function to produce a re-usable generator
factory for the given task.
"""
def initTask(**initkwargs):
def makeIterator(**runkwargs):
runkwargs.update(initkwargs)
iterator = func(**runkwargs)
return iterator
try: makeIterator.__name__ = func.__name__
except AttributeError: pass
try: makeIterator.__doc__ = func.__doc__
except AttributeError: pass
return makeIterator
try: initTask.__doc__ = func.__doc__
except AttributeError: pass
try: initTask.__name__ = func.__name__
except AttributeError: pass
return initTask
def taskmethod(func):
"""Task decorator.
Decorate a generator function to produce a re-usable generator
factory for the given task.
"""
def initTask(self, **initkwargs):
def makeIterator(**runkwargs):
runkwargs.update(initkwargs)
iterator = func(self, **runkwargs)
return iterator
try: makeIterator.__name__ = func.__name__
except AttributeError: pass
try: makeIterator.__doc__ = func.__doc__
except AttributeError: pass
return makeIterator
try: initTask.__doc__ = func.__doc__
except AttributeError: pass
try: initTask.__name__ = func.__name__
except AttributeError: pass
return initTask
def parent_task(func):
"""Parent task decorator.
A parent task is a task that accepts children.
Decorate a generator function to produce a re-usable generator
factory for the given task.
"""
def initTask(*children, **initkwargs):
def makeIterator(**runkwargs):
runkwargs.update(initkwargs)
iterator = func(*children, **runkwargs)
return iterator
try: makeIterator.__name__ = func.__name__
except AttributeError: pass
try: makeIterator.__doc__ = func.__doc__
except AttributeError: pass
return makeIterator
try: initTask.__doc__ = func.__doc__
except AttributeError: pass
try: initTask.__name__ = func.__name__
except AttributeError: pass
return initTask
def parent_taskmethod(func):
"""Parent task decorator.
A parent task is a task that accepts children.
Decorate a generator function to produce a re-usable generator
factory for the given task.
"""
def initTask(self, *children, **initkwargs):
def makeIterator(**runkwargs):
runkwargs.update(initkwargs)
iterator = func(self, *children, **runkwargs)
return iterator
try: makeIterator.__name__ = func.__name__
except AttributeError: pass
try: makeIterator.__doc__ = func.__doc__
except AttributeError: pass
return makeIterator
try: initTask.__doc__ = func.__doc__
except AttributeError: pass
try: initTask.__name__ = func.__name__
except AttributeError: pass
return initTask
def visit(tree, **kwargs):
"""Iterate over a tree of nested iterators.
Apply the U{Visitor
Pattern<http://en.wikipedia.org/wiki/Visitor_pattern>} to a tree
of nested iterators. Iterators should yield True, False, None, or
a child iterator. Values of True or False are passed back to the
parent iterator. A value of None is silently ignored, and the
current iterator will be queried again on the next pass.
The visitor will yield None until the tree raises StopIteration,
upon which the visitor will yield the last value yielded by the
tree, and terminate itself with StopIteration.
The visitor is essentially a micro-scheduler for a Behavior Tree
implemented as a tree of nested iterators. For more information,
see the discussion at
U{http://aigamedev.com/programming-tips/scheduler}.
"""
s = Stack()
return_values = RETURN_VALUES
current = tree(**kwargs)
send_value = None
send_ok = False
while True:
try:
if send_ok:
child = current.send(send_value)
send_value = None
send_ok = False
else:
child = current.next()
if child in return_values:
send_value = child
yield send_value
else:
# Descend into child node
s.push(current)
current = child
except StopIteration:
try:
current = s.pop()
send_ok = True
except EmptyError:
raise StopIteration
@task
def succeed(**kwargs):
"""Always succeed.
"""
yield True
@task
def fail(**kwargs):
"""Always fail.
"""
yield False
@task
def stall(**kwargs):
"""Wrap a callable as a task. Yield the boolean of its result after 'after' iterations.
Yields 'None' 'after' times.
@keyword func: The callable to run.
@type func: callable
@keyword after: Run the callable after this many iterations.
@type after: int
"""
func = kwargs.pop('func')
after = kwargs.pop('after', 1)
for x in xrange(after):
yield None
yield bool(func())
@task
def succeedAfter(**kwargs):
"""Succeed after a given number of iterations.
Yields 'None' 'after' times.
@keyword after: How many iterations to succeed after.
@type after: int
"""
after = kwargs.pop('after', 1)
for x in xrange(after):
yield None
yield True
@task
def failAfter(**kwargs):
"""Fail after a given number of iterations.
Yields 'None' 'after' times.
@keyword after: How many iterations to fail after.
@type after: int
"""
after = kwargs.pop('after', 1)
for x in xrange(after):
yield None
yield False
@parent_task
def sequence(*children, **kwargs):
"""Run tasks in sequence until one fails.
The sequence will run each task in sequence until one fails,
returning a failure. If all fail, returns a success.
For more information, see the discussion at
U{http://aigamedev.com/hierarchical-logic/sequence}.
@param children: tasks to run in sequence as children.
"""
final_value = True
for child in children:
result = yield child(**kwargs)
if not result and result is not None:
final_value = False
break
yield final_value
@parent_task
def queue(queue, **kwargs):
"""Run tasks in the queue in sequence.
The queue will run each task in the queue in sequence. If the
queue is empty, it will stall until the queue receives new items.
Note: the queue task *never* returns a success or failure code.
The queue should be an object implementing pop(). If the queue has
items in it, it should evaluate to True, otherwise False. The
queue task will pop the next task in the queue and evaluate it in
the normal fashion.
@param queue: task queue.
@type queue: A sequence object implementing pop()
"""
while True:
if queue:
child = queue.pop()
yield child(**kwargs)
else:
yield None
@parent_task
def parallel_queue(queue, **kwargs):
"""Run tasks in the queue in parallel.
The queue will run each task in the queue in parallel. If the
queue is empty, it will stall until the queue receives new items.
Note: the queue task *never* returns a success or failure code.
The queue should be an object implementing pop(). If the queue has
items in it, it should evaluate to True, otherwise False. The
queue task will pop the next task in the queue and evaluate it in
the normal fashion.
@param queue: task queue.
"""
visits = [] # Canonical list of visited children
visiting = [] # Working list of visited children
while True:
if queue:
child = queue.pop()
visits.append(visit(child, **kwargs))
visiting[:] = visits # Se we can remove from visits
for child in visiting:
try:
child.next()
except StopIteration:
visits.remove(child)
yield None
@parent_task
def selector(*children, **kwargs):
"""Run tasks in sequence until one succeeds.
The selector will run each task in sequence until one succeeds,
returning a success. If all fail, returns a failure.
For more information, see the discussion at
U{http://aigamedev.com/hierarchical-logic/selector}.
@param children: child tasks to select from.
"""
final_value = False
for child in children:
result = (yield child(**kwargs))
if result:
final_value = True
break
yield final_value
class Enum(object):
"""Enum/namespace class. Cannot be implemented.
Subclass and add class variables.
"""
def __init__(self):
raise NotImplementedError("_Enum class object. Do not instantiate.")
class PARALLEL_SUCCESS(Enum):
"""Success policy enumerator for parallel behavior.
C{REQUIRE_ALL}: All child tasks must succeed.
C{REQUIRE_ONE}: Only one child task must succeed.
"""
REQUIRE_ALL = "ALL"
REQUIRE_ONE = "ONE"
@parent_task
def parallel(*children, **kwargs):
"""Run tasks in parallel until the success policy is fulfilled or broken.
If the success policy is met, return a success. If the policy is
broken, return a failure.
For more information, see the discussion at
U{aigamedev.com/hierarchical-logic/parallel}.
@param children: tasks to run in parallel as children.
@keyword policy: The success policy. All must succeed,
or only one must succeed.
@type policy: C{PARALLEL_SUCCESS.REQUIRE_ALL} or
C{PARALLEL_SUCCESS.REQUIRE_ONE}.
"""
return_values = set((True, False))
policy = kwargs.pop('policy', PARALLEL_SUCCESS.REQUIRE_ONE)
all_must_succeed = (policy == PARALLEL_SUCCESS.REQUIRE_ALL)
visits = [visit(arg, **kwargs) for arg in children]
final_value = True
while True:
try:
# Run one step on each child per iteration.
for child in visits:
result = child.next()
if result in return_values:
if not result and all_must_succeed:
final_value = False
break
elif result and not all_must_succeed:
final_value = True
break
else:
final_value = result
yield None
except StopIteration:
break
except EmptyError:
break
yield final_value
@task
def throw(**kwargs):
"""Throw (raise) an exception.
@keyword throws: An Exception to throw.
@type throws: C{Exception}
@keyword throws_message: Text to instantiate C{throws} with.
@type throws_message: C{str}
"""
throws = kwargs.pop('throws', Exception)
throws_message = kwargs.pop('throws_message', '')
class gen(object):
def __iter__(self):
return self
def next(self):
raise throws(throws_message)
return gen()
@parent_task
def catch(child, **kwargs):
"""Catch a raised exception from child and run an alternate branch.
Note: this will not catch exceptions raised in the branch.
@keyword caught: An Exception to catch.
@type caught: C{Exception}
@keyword branch: An alternate tree to visit when caught.
"""
caught = kwargs.pop('caught', Exception)
branch = kwargs.pop('branch', fail())
result = None
tree = visit(child, **kwargs)
try:
while result is None:
result = tree.next()
yield None
except caught:
while result is None:
result = (yield branch(**kwargs))
yield result
@parent_task
def log(message, **kwargs):
"""Log a message to the given logger.
@keyword name: The name of the logger to use.
@type name: str
@keyword level: The logging level to use.
@default level: logging.DEBUG
"""
name = kwargs.pop('name', None)
if name is None:
logger = logging.getLogger()
else:
logger = logging.getLogger(name)
level = kwargs.pop('level', logging.DEBUG)
logger.log(level, message)
yield True
| bsd-3-clause | 4,855,135,930,262,106,000 | 27.126531 | 91 | 0.61145 | false |
kuhli/flask | flask/__init__.py | 142 | 1676 | # -*- coding: utf-8 -*-
"""
flask
~~~~~
A microframework based on Werkzeug. It's extensively documented
and follows best practice patterns.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.11.dev0'
# utilities we import from Werkzeug and Jinja2 that are unused
# in the module but are exported as public interface.
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from jinja2 import Markup, escape
from .app import Flask, Request, Response
from .config import Config
from .helpers import url_for, flash, send_file, send_from_directory, \
get_flashed_messages, get_template_attribute, make_response, safe_join, \
stream_with_context
from .globals import current_app, g, request, session, _request_ctx_stack, \
_app_ctx_stack
from .ctx import has_request_context, has_app_context, \
after_this_request, copy_current_request_context
from .blueprints import Blueprint
from .templating import render_template, render_template_string
# the signals
from .signals import signals_available, template_rendered, request_started, \
request_finished, got_request_exception, request_tearing_down, \
appcontext_tearing_down, appcontext_pushed, \
appcontext_popped, message_flashed, before_render_template
# We're not exposing the actual json module but a convenient wrapper around
# it.
from . import json
# This was the only thing that flask used to export at one point and it had
# a more generic name.
jsonify = json.jsonify
# backwards compat, goes away in 1.0
from .sessions import SecureCookieSession as Session
json_available = True
| bsd-3-clause | 8,580,452,336,733,166,000 | 33.204082 | 78 | 0.745823 | false |
lipro-yocto/git-repo | tests/test_git_trace2_event_log.py | 1 | 7135 | # Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for the git_trace2_event_log.py module."""
import json
import os
import tempfile
import unittest
from unittest import mock
import git_trace2_event_log
class EventLogTestCase(unittest.TestCase):
"""TestCase for the EventLog module."""
PARENT_SID_KEY = 'GIT_TRACE2_PARENT_SID'
PARENT_SID_VALUE = 'parent_sid'
SELF_SID_REGEX = r'repo-\d+T\d+Z-.*'
FULL_SID_REGEX = r'^%s/%s' % (PARENT_SID_VALUE, SELF_SID_REGEX)
def setUp(self):
"""Load the event_log module every time."""
self._event_log_module = None
# By default we initialize with the expected case where
# repo launches us (so GIT_TRACE2_PARENT_SID is set).
env = {
self.PARENT_SID_KEY: self.PARENT_SID_VALUE,
}
self._event_log_module = git_trace2_event_log.EventLog(env=env)
self._log_data = None
def verifyCommonKeys(self, log_entry, expected_event_name, full_sid=True):
"""Helper function to verify common event log keys."""
self.assertIn('event', log_entry)
self.assertIn('sid', log_entry)
self.assertIn('thread', log_entry)
self.assertIn('time', log_entry)
# Do basic data format validation.
self.assertEqual(expected_event_name, log_entry['event'])
if full_sid:
self.assertRegex(log_entry['sid'], self.FULL_SID_REGEX)
else:
self.assertRegex(log_entry['sid'], self.SELF_SID_REGEX)
self.assertRegex(log_entry['time'], r'^\d+-\d+-\d+T\d+:\d+:\d+\.\d+Z$')
def readLog(self, log_path):
"""Helper function to read log data into a list."""
log_data = []
with open(log_path, mode='rb') as f:
for line in f:
log_data.append(json.loads(line))
return log_data
def test_initial_state_with_parent_sid(self):
"""Test initial state when 'GIT_TRACE2_PARENT_SID' is set by parent."""
self.assertRegex(self._event_log_module.full_sid, self.FULL_SID_REGEX)
def test_initial_state_no_parent_sid(self):
"""Test initial state when 'GIT_TRACE2_PARENT_SID' is not set."""
# Setup an empty environment dict (no parent sid).
self._event_log_module = git_trace2_event_log.EventLog(env={})
self.assertRegex(self._event_log_module.full_sid, self.SELF_SID_REGEX)
def test_version_event(self):
"""Test 'version' event data is valid.
Verify that the 'version' event is written even when no other
events are addded.
Expected event log:
<version event>
"""
with tempfile.TemporaryDirectory(prefix='event_log_tests') as tempdir:
log_path = self._event_log_module.Write(path=tempdir)
self._log_data = self.readLog(log_path)
# A log with no added events should only have the version entry.
self.assertEqual(len(self._log_data), 1)
version_event = self._log_data[0]
self.verifyCommonKeys(version_event, expected_event_name='version')
# Check for 'version' event specific fields.
self.assertIn('evt', version_event)
self.assertIn('exe', version_event)
# Verify "evt" version field is a string.
self.assertIsInstance(version_event['evt'], str)
def test_start_event(self):
"""Test and validate 'start' event data is valid.
Expected event log:
<version event>
<start event>
"""
self._event_log_module.StartEvent()
with tempfile.TemporaryDirectory(prefix='event_log_tests') as tempdir:
log_path = self._event_log_module.Write(path=tempdir)
self._log_data = self.readLog(log_path)
self.assertEqual(len(self._log_data), 2)
start_event = self._log_data[1]
self.verifyCommonKeys(self._log_data[0], expected_event_name='version')
self.verifyCommonKeys(start_event, expected_event_name='start')
# Check for 'start' event specific fields.
self.assertIn('argv', start_event)
self.assertTrue(isinstance(start_event['argv'], list))
def test_exit_event_result_none(self):
"""Test 'exit' event data is valid when result is None.
We expect None result to be converted to 0 in the exit event data.
Expected event log:
<version event>
<exit event>
"""
self._event_log_module.ExitEvent(None)
with tempfile.TemporaryDirectory(prefix='event_log_tests') as tempdir:
log_path = self._event_log_module.Write(path=tempdir)
self._log_data = self.readLog(log_path)
self.assertEqual(len(self._log_data), 2)
exit_event = self._log_data[1]
self.verifyCommonKeys(self._log_data[0], expected_event_name='version')
self.verifyCommonKeys(exit_event, expected_event_name='exit')
# Check for 'exit' event specific fields.
self.assertIn('code', exit_event)
# 'None' result should convert to 0 (successful) return code.
self.assertEqual(exit_event['code'], 0)
def test_exit_event_result_integer(self):
"""Test 'exit' event data is valid when result is an integer.
Expected event log:
<version event>
<exit event>
"""
self._event_log_module.ExitEvent(2)
with tempfile.TemporaryDirectory(prefix='event_log_tests') as tempdir:
log_path = self._event_log_module.Write(path=tempdir)
self._log_data = self.readLog(log_path)
self.assertEqual(len(self._log_data), 2)
exit_event = self._log_data[1]
self.verifyCommonKeys(self._log_data[0], expected_event_name='version')
self.verifyCommonKeys(exit_event, expected_event_name='exit')
# Check for 'exit' event specific fields.
self.assertIn('code', exit_event)
self.assertEqual(exit_event['code'], 2)
def test_write_with_filename(self):
"""Test Write() with a path to a file exits with None."""
self.assertIsNone(self._event_log_module.Write(path='path/to/file'))
def test_write_with_git_config(self):
"""Test Write() uses the git config path when 'git config' call succeeds."""
with tempfile.TemporaryDirectory(prefix='event_log_tests') as tempdir:
with mock.patch.object(self._event_log_module,
'_GetEventTargetPath', return_value=tempdir):
self.assertEqual(os.path.dirname(self._event_log_module.Write()), tempdir)
def test_write_no_git_config(self):
"""Test Write() with no git config variable present exits with None."""
with mock.patch.object(self._event_log_module,
'_GetEventTargetPath', return_value=None):
self.assertIsNone(self._event_log_module.Write())
def test_write_non_string(self):
"""Test Write() with non-string type for |path| throws TypeError."""
with self.assertRaises(TypeError):
self._event_log_module.Write(path=1234)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 3,565,504,848,270,866,400 | 36.952128 | 82 | 0.682551 | false |
zqhuang/COOP | lib/submit.py | 1 | 2610 | #!/usr/bin/env python
### submit cosmomc jobs
#### by Zhiqi Huang ([email protected])
import re
import os
import sys
import glob
import string
def search_value(fname, pattern):
fp = open(fname, 'r')
file_content = fp.read()
fp.close()
m = re.search(pattern, file_content, flags = re.M + re.I)
if m:
return m.group(1)
else:
return ""
if(len(sys.argv) < 2):
print "Syntax: "
print "python submit.py [R-1 threshold]"
sys.exit()
inifile = sys.argv[1]
if( not os.path.isfile(inifile)):
print inifile + " does not exist"
sys.exit()
threshold = 0.03
if(len(sys.argv) >= 3):
try:
threshold = float(sys.argv[2])
except:
print "R-1 threshold format is incorrect"
sys.exit()
fileroot = search_value(inifile, r'file\_root\s*\=\s*(\S+)')
rootdir = search_value(inifile, r'root\_dir\s*\=\s*(\S+)')
covmat = search_value(inifile, r'propose\_matrix\s*\=\s*(\S+)')
if(fileroot == ''):
print "ini file does not contain key file_root"
sys.exit()
fileroot = rootdir+fileroot
print "file_root = " + fileroot
if(covmat != ''):
if(os.path.isfile(covmat)):
print "propose_matrix = " + covmat
else:
print "propose_matrix " + covmat + " does not exist"
sys.exit()
else:
print "no propose_matrix"
if(os.path.isfile(fileroot + r'.converge_stat')):
fp = open(fileroot + r'.converge_stat', 'r')
conv = fp.read()
fp.close()
try:
rm = float(conv)
except:
rm = 1000.
if(rm < threshold):
print "chains are already converged, not submitting the job."
sys.exit()
print "submitting " + inifile
current_path = os.getcwd()
patterns = [r'.*\/', r'scan\_', r'fixrp(\d\d)\d', r'dpp', r'dpl', r'qcdm\_1param', r'qcdm\_3param', 'lowTEB_', 'plikTTTEEE', 'plikTT', 'plikTEEE', 'plikEE', 'BAO_JLA_HSTlow', 'BAO', 'lens', 'RSD_WL', 'RSD', 'WL', r'\.ini']
repls = ['', '', r'r\1', 'S', 'L', 'q1', 'q3', '', 'A', 'T', 'TE', 'E', 'pr', 'B', 'l', 'RW', 'R','W', '']
jobname = inifile
for i in range(len(patterns)):
jobname = re.sub(patterns[i], repls[i], jobname)
fp = open(r'scripts/' + jobname + r'.jb', 'w')
fp.write(r'#!/bin/csh -f' + "\n" + r'#PBS -N '+jobname + "\n" + r'#PBS -l nodes=8:ppn=16' + "\n" + r'#PBS -q hpq' + "\n" + r'#PBS -l walltime=48:00:00' + "\n" + r'#PBS -r n' + "\n" + r'#PBS -j oe'+ "\n" + r'cd ' + current_path + "\n" + 'mpirun -pernode ./cosmomc ' + inifile + ' > ./scripts/'+jobname+r'.log' + "\n")
fp.close()
os.chdir('scripts')
os.system('qsub ' + jobname + r'.jb')
os.chdir(current_path)
| gpl-3.0 | 6,965,492,952,397,711,000 | 30.071429 | 317 | 0.564368 | false |
Java1Guy/ansible-modules-extras | cloud/cloudstack/cs_portforward.py | 11 | 13884 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_portforward
short_description: Manages port forwarding rules on Apache CloudStack based clouds.
description:
- Create, update and remove port forwarding rules.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address the rule is assigned to.
required: true
vm:
description:
- Name of virtual machine which we make the port forwarding rule for.
- Required if C(state=present).
required: false
default: null
state:
description:
- State of the port forwarding rule.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
protocol:
description:
- Protocol of the port forwarding rule.
required: false
default: 'tcp'
choices: [ 'tcp', 'udp' ]
public_port:
description:
- Start public port for this rule.
required: true
public_end_port:
description:
- End public port for this rule.
- If not specified equal C(public_port).
required: false
default: null
private_port:
description:
- Start private port for this rule.
required: true
private_end_port:
description:
- End private port for this rule.
- If not specified equal C(private_port).
required: false
default: null
open_firewall:
description:
- Whether the firewall rule for public port should be created, while creating the new rule.
- Use M(cs_firewall) for managing firewall rules.
required: false
default: false
vm_guest_ip:
description:
- VM guest NIC secondary IP address for the port forwarding rule.
required: false
default: false
domain:
description:
- Domain the C(vm) is related to.
required: false
default: null
account:
description:
- Account the C(vm) is related to.
required: false
default: null
project:
description:
- Name of the project the C(vm) is located in.
required: false
default: null
zone:
description:
- Name of the zone in which the virtual machine is in.
- If not set, default zone is used.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# 1.2.3.4:80 -> web01:8080
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: web01
public_port: 80
private_port: 8080
# forward SSH and open firewall
- local_action:
module: cs_portforward
ip_address: '{{ public_ip }}'
vm: '{{ inventory_hostname }}'
public_port: '{{ ansible_ssh_port }}'
private_port: 22
open_firewall: true
# forward DNS traffic, but do not open firewall
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: '{{ inventory_hostname }}'
public_port: 53
private_port: 53
protocol: udp
open_firewall: true
# remove ssh port forwarding
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
public_port: 22
private_port: 22
state: absent
'''
RETURN = '''
---
id:
description: UUID of the public IP address.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: string
sample: 1.2.3.4
protocol:
description: Protocol.
returned: success
type: string
sample: tcp
private_port:
description: Start port on the virtual machine's IP address.
returned: success
type: int
sample: 80
private_end_port:
description: End port on the virtual machine's IP address.
returned: success
type: int
public_port:
description: Start port on the public IP address.
returned: success
type: int
sample: 80
public_end_port:
description: End port on the public IP address.
returned: success
type: int
sample: 80
tags:
description: Tags related to the port forwarding.
returned: success
type: list
sample: []
vm_name:
description: Name of the virtual machine.
returned: success
type: string
sample: web-01
vm_display_name:
description: Display name of the virtual machine.
returned: success
type: string
sample: web-01
vm_guest_ip:
description: IP of the virtual machine.
returned: success
type: string
sample: 10.101.65.152
'''
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackPortforwarding, self).__init__(module)
self.returns = {
'virtualmachinedisplayname': 'vm_display_name',
'virtualmachinename': 'vm_name',
'ipaddress': 'ip_address',
'vmguestip': 'vm_guest_ip',
'publicip': 'public_ip',
'protocol': 'protocol',
}
# these values will be casted to int
self.returns_to_int = {
'publicport': 'public_port',
'publicendport': 'public_end_port',
'privateport': 'private_port',
'private_end_port': 'private_end_port',
}
self.portforwarding_rule = None
self.vm_default_nic = None
def get_vm_guest_ip(self):
vm_guest_ip = self.module.params.get('vm_guest_ip')
default_nic = self.get_vm_default_nic()
if not vm_guest_ip:
return default_nic['ipaddress']
for secondary_ip in default_nic['secondaryip']:
if vm_guest_ip == secondary_ip['ipaddress']:
return vm_guest_ip
self.module.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip)
def get_vm_default_nic(self):
if self.vm_default_nic:
return self.vm_default_nic
nics = self.cs.listNics(virtualmachineid=self.get_vm(key='id'))
if nics:
for n in nics['nic']:
if n['isdefault']:
self.vm_default_nic = n
return self.vm_default_nic
self.module.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm'))
def get_portforwarding_rule(self):
if not self.portforwarding_rule:
protocol = self.module.params.get('protocol')
public_port = self.module.params.get('public_port')
public_end_port = self.get_or_fallback('public_end_port', 'public_port')
private_port = self.module.params.get('private_port')
private_end_port = self.get_or_fallback('private_end_port', 'private_port')
args = {}
args['ipaddressid'] = self.get_ip_address(key='id')
args['projectid'] = self.get_project(key='id')
portforwarding_rules = self.cs.listPortForwardingRules(**args)
if portforwarding_rules and 'portforwardingrule' in portforwarding_rules:
for rule in portforwarding_rules['portforwardingrule']:
if protocol == rule['protocol'] \
and public_port == int(rule['publicport']):
self.portforwarding_rule = rule
break
return self.portforwarding_rule
def present_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule)
else:
portforwarding_rule = self.create_portforwarding_rule()
return portforwarding_rule
def create_portforwarding_rule(self):
args = {}
args['protocol'] = self.module.params.get('protocol')
args['publicport'] = self.module.params.get('public_port')
args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
args['privateport'] = self.module.params.get('private_port')
args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
args['openfirewall'] = self.module.params.get('open_firewall')
args['vmguestip'] = self.get_vm_guest_ip()
args['ipaddressid'] = self.get_ip_address(key='id')
args['virtualmachineid'] = self.get_vm(key='id')
portforwarding_rule = None
self.result['changed'] = True
if not self.module.check_mode:
portforwarding_rule = self.cs.createPortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self._poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def update_portforwarding_rule(self, portforwarding_rule):
args = {}
args['protocol'] = self.module.params.get('protocol')
args['publicport'] = self.module.params.get('public_port')
args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
args['privateport'] = self.module.params.get('private_port')
args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
args['openfirewall'] = self.module.params.get('open_firewall')
args['vmguestip'] = self.get_vm_guest_ip()
args['ipaddressid'] = self.get_ip_address(key='id')
args['virtualmachineid'] = self.get_vm(key='id')
if self._has_changed(args, portforwarding_rule):
self.result['changed'] = True
if not self.module.check_mode:
# API broken in 4.2.1?, workaround using remove/create instead of update
# portforwarding_rule = self.cs.updatePortForwardingRule(**args)
self.absent_portforwarding_rule()
portforwarding_rule = self.cs.createPortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self._poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def absent_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
self.result['changed'] = True
args = {}
args['id'] = portforwarding_rule['id']
if not self.module.check_mode:
res = self.cs.deletePortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self._poll_job(res, 'portforwardingrule')
return portforwarding_rule
def get_result(self, portforwarding_rule):
super(AnsibleCloudStackPortforwarding, self).get_result(portforwarding_rule)
if portforwarding_rule:
# Bad bad API does not always return int when it should.
for search_key, return_key in self.returns_to_int.iteritems():
if search_key in portforwarding_rule:
self.result[return_key] = int(portforwarding_rule[search_key])
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address = dict(required=True),
protocol= dict(choices=['tcp', 'udp'], default='tcp'),
public_port = dict(type='int', required=True),
public_end_port = dict(type='int', default=None),
private_port = dict(type='int', required=True),
private_end_port = dict(type='int', default=None),
state = dict(choices=['present', 'absent'], default='present'),
open_firewall = dict(choices=BOOLEANS, default=False),
vm_guest_ip = dict(default=None),
vm = dict(default=None),
zone = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(choices=BOOLEANS, default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
try:
acs_pf = AnsibleCloudStackPortforwarding(module)
state = module.params.get('state')
if state in ['absent']:
pf_rule = acs_pf.absent_portforwarding_rule()
else:
pf_rule = acs_pf.present_portforwarding_rule()
result = acs_pf.get_result(pf_rule)
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | 753,207,023,258,316,000 | 32.290168 | 106 | 0.621524 | false |
dpniel/snapcraft | snapcraft/tests/plugins/test_nodejs.py | 4 | 11845 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from os import path
from unittest import mock
from testtools.matchers import HasLength
import snapcraft
from snapcraft.plugins import nodejs
from snapcraft import tests
class NodePluginTestCase(tests.TestCase):
def setUp(self):
super().setUp()
self.project_options = snapcraft.ProjectOptions()
patcher = mock.patch('snapcraft.internal.common.run')
self.run_mock = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('snapcraft.sources.Tar')
self.tar_mock = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('sys.stdout')
patcher.start()
self.addCleanup(patcher.stop)
def test_pull_local_sources(self):
class Options:
source = '.'
node_packages = []
node_engine = '4'
npm_run = []
plugin = nodejs.NodePlugin('test-part', Options(),
self.project_options)
os.makedirs(plugin.sourcedir)
plugin.pull()
self.assertFalse(self.run_mock.called, 'run() was called')
self.tar_mock.assert_has_calls([
mock.call(
nodejs.get_nodejs_release(
plugin.options.node_engine, plugin.project.deb_arch),
path.join(self.parts_dir, 'test-part', 'npm')),
mock.call().download()])
def test_build_local_sources(self):
class Options:
source = '.'
node_packages = []
node_engine = '4'
npm_run = []
plugin = nodejs.NodePlugin('test-part', Options(),
self.project_options)
os.makedirs(plugin.builddir)
open(os.path.join(plugin.builddir, 'package.json'), 'w').close()
plugin.build()
self.run_mock.assert_has_calls([
mock.call(['npm', '--cache-min=Infinity', 'install'],
cwd=plugin.builddir),
mock.call(['npm', '--cache-min=Infinity', 'install', '--global'],
cwd=plugin.builddir)])
self.tar_mock.assert_has_calls([
mock.call(
nodejs.get_nodejs_release(
plugin.options.node_engine, plugin.project.deb_arch),
path.join(self.parts_dir, 'test-part', 'npm')),
mock.call().provision(
plugin.installdir, clean_target=False, keep_tarball=True)])
def test_pull_and_build_node_packages_sources(self):
class Options:
source = None
node_packages = ['my-pkg']
node_engine = '4'
npm_run = []
plugin = nodejs.NodePlugin('test-part', Options(),
self.project_options)
os.makedirs(plugin.sourcedir)
plugin.pull()
plugin.build()
self.run_mock.assert_has_calls([
mock.call(['npm', '--cache-min=Infinity', 'install', '--global',
'my-pkg'], cwd=plugin.builddir)])
self.tar_mock.assert_has_calls([
mock.call(
nodejs.get_nodejs_release(
plugin.options.node_engine, plugin.project.deb_arch),
path.join(self.parts_dir, 'test-part', 'npm')),
mock.call().download(),
mock.call().provision(
plugin.installdir, clean_target=False, keep_tarball=True)])
def test_build_executes_npm_run_commands(self):
class Options:
source = '.'
node_packages = []
node_engine = '4'
npm_run = ['command_one', 'avocado']
plugin = nodejs.NodePlugin('test-part', Options(),
self.project_options)
os.makedirs(plugin.sourcedir)
open(os.path.join(plugin.sourcedir, 'package.json'), 'w').close()
plugin.build()
self.run_mock.assert_has_calls([
mock.call(['npm', 'run', 'command_one'],
cwd=plugin.builddir),
mock.call(['npm', 'run', 'avocado'],
cwd=plugin.builddir)])
@mock.patch('snapcraft.ProjectOptions.deb_arch', 'fantasy-arch')
def test_unsupported_arch_raises_exception(self):
class Options:
source = None
node_packages = []
node_engine = '4'
npm_run = []
raised = self.assertRaises(
EnvironmentError,
nodejs.NodePlugin,
'test-part', Options(),
self.project_options)
self.assertEqual(raised.__str__(),
'architecture not supported (fantasy-arch)')
def test_schema(self):
schema = nodejs.NodePlugin.schema()
properties = schema['properties']
self.assertTrue('node-packages' in properties,
'Expected "node-packages" to be included in '
'properties')
node_packages = properties['node-packages']
self.assertTrue(
'type' in node_packages,
'Expected "type" to be included in "node-packages"')
self.assertEqual(node_packages['type'], 'array',
'Expected "node-packages" "type" to be "array", but '
'it was "{}"'.format(node_packages['type']))
self.assertTrue(
'minitems' in node_packages,
'Expected "minitems" to be included in "node-packages"')
self.assertEqual(node_packages['minitems'], 1,
'Expected "node-packages" "minitems" to be 1, but '
'it was "{}"'.format(node_packages['minitems']))
self.assertTrue(
'uniqueItems' in node_packages,
'Expected "uniqueItems" to be included in "node-packages"')
self.assertTrue(
node_packages['uniqueItems'],
'Expected "node-packages" "uniqueItems" to be "True"')
self.assertTrue('node-packages' in properties,
'Expected "node-packages" to be included in '
'properties')
npm_run = properties['npm-run']
self.assertTrue(
'type' in npm_run,
'Expected "type" to be included in "npm-run"')
self.assertEqual(npm_run['type'], 'array',
'Expected "npm-run" "type" to be "array", but '
'it was "{}"'.format(npm_run['type']))
self.assertTrue(
'minitems' in npm_run,
'Expected "minitems" to be included in "npm-run"')
self.assertEqual(npm_run['minitems'], 1,
'Expected "npm-run" "minitems" to be 1, but '
'it was "{}"'.format(npm_run['minitems']))
self.assertTrue(
'uniqueItems' in npm_run,
'Expected "uniqueItems" to be included in "npm-run"')
self.assertFalse(
npm_run['uniqueItems'],
'Expected "npm-run" "uniqueItems" to be "False"')
self.assertTrue('node-engine' in properties,
'Expected "node-engine" to be included in '
'properties')
node_engine_type = properties['node-engine']['type']
self.assertEqual(node_engine_type, 'string',
'Expected "node_engine" "type" to be '
'"string", but it was "{}"'
.format(node_engine_type))
def test_get_build_properties(self):
expected_build_properties = ['node-packages', 'npm-run']
resulting_build_properties = nodejs.NodePlugin.get_build_properties()
self.assertThat(resulting_build_properties,
HasLength(len(expected_build_properties)))
for property in expected_build_properties:
self.assertIn(property, resulting_build_properties)
def test_get_pull_properties(self):
expected_pull_properties = ['node-engine']
resulting_pull_properties = nodejs.NodePlugin.get_pull_properties()
self.assertThat(resulting_pull_properties,
HasLength(len(expected_pull_properties)))
for property in expected_pull_properties:
self.assertIn(property, resulting_pull_properties)
@mock.patch('snapcraft.BasePlugin.schema')
def test_required_not_in_parent_schema(self, schema_mock):
schema_mock.return_value = {
'properties': {},
'pull-properties': [],
'build-properties': []
}
self.assertTrue('required' not in nodejs.NodePlugin.schema())
def test_clean_pull_step(self):
class Options:
source = '.'
node_packages = []
node_engine = '4'
npm_run = []
plugin = nodejs.NodePlugin('test-part', Options(),
self.project_options)
os.makedirs(plugin.sourcedir)
plugin.pull()
self.assertTrue(os.path.exists(plugin._npm_dir))
plugin.clean_pull()
self.assertFalse(os.path.exists(plugin._npm_dir))
class NodeReleaseTestCase(tests.TestCase):
scenarios = [
('i686', dict(
architecture=('32bit', 'ELF'),
machine='i686',
engine='4.4.4',
expected_url=(
'https://nodejs.org/dist/v4.4.4/'
'node-v4.4.4-linux-x86.tar.gz'))),
('x86_64', dict(
architecture=('64bit', 'ELF'),
machine='x86_64',
engine='4.4.4',
expected_url=(
'https://nodejs.org/dist/v4.4.4/'
'node-v4.4.4-linux-x64.tar.gz'))),
('i686-on-x86_64', dict(
architecture=('32bit', 'ELF'),
machine='x86_64',
engine='4.4.4',
expected_url=(
'https://nodejs.org/dist/v4.4.4/'
'node-v4.4.4-linux-x86.tar.gz'))),
('armv7l', dict(
architecture=('32bit', 'ELF'),
machine='armv7l',
engine='4.4.4',
expected_url=(
'https://nodejs.org/dist/v4.4.4/'
'node-v4.4.4-linux-armv7l.tar.gz'))),
('aarch64', dict(
architecture=('64bit', 'ELF'),
machine='aarch64',
engine='4.4.4',
expected_url=(
'https://nodejs.org/dist/v4.4.4/'
'node-v4.4.4-linux-arm64.tar.gz'))),
('armv7l-on-aarch64', dict(
architecture=('32bit', 'ELF'),
machine='aarch64',
engine='4.4.4',
expected_url=(
'https://nodejs.org/dist/v4.4.4/'
'node-v4.4.4-linux-armv7l.tar.gz'))),
]
@mock.patch('platform.architecture')
@mock.patch('platform.machine')
def test_get_nodejs_release(self, machine_mock, architecture_mock):
machine_mock.return_value = self.machine
architecture_mock.return_value = self.architecture
project = snapcraft.ProjectOptions()
node_url = nodejs.get_nodejs_release(self.engine, project.deb_arch)
self.assertEqual(node_url, self.expected_url)
| gpl-3.0 | 1,353,711,722,917,171,000 | 34.785498 | 78 | 0.543183 | false |
Johnetordoff/osf.io | scripts/clear_sessions.py | 12 | 1349 | import sys
import time
import logging
import datetime
from django.db import transaction
from django.utils import timezone
from framework.celery_tasks import app as celery_app
from website.app import setup_django
setup_django()
from osf.models import Session
from scripts.utils import add_file_logger
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
SESSION_AGE_THRESHOLD = 30
def main(dry_run=True):
old_sessions = Session.objects.filter(modified__lt=timezone.now() - datetime.timedelta(days=SESSION_AGE_THRESHOLD))
if dry_run:
logger.warn('Dry run mode, will delete sessions and then abort the transaction')
logger.info('Preparing to delete Session objects older than {} days'.format(SESSION_AGE_THRESHOLD))
with transaction.atomic():
start = time.time()
sessions_deleted = old_sessions.delete()[1]['osf.Session']
end = time.time()
logger.info('Deleting {} Session objects took {} seconds'.format(sessions_deleted, end - start))
if dry_run:
raise Exception('Dry run, aborting the transaction!')
@celery_app.task(name='scripts.clear_sessions')
def run_main(dry_run=True):
if not dry_run:
add_file_logger(logger, __file__)
main(dry_run=dry_run)
if __name__ == '__main__':
run_main(dry_run='--dry' in sys.argv)
| apache-2.0 | 4,827,146,905,656,189,000 | 26.530612 | 119 | 0.699778 | false |
pwong-mapr/private-hue | desktop/core/ext-py/Django-1.4.5/tests/regressiontests/localflavor/cl/tests.py | 33 | 2239 | from django.contrib.localflavor.cl.forms import CLRutField, CLRegionSelect
from django.test import SimpleTestCase
class CLLocalFlavorTests(SimpleTestCase):
def test_CLRegionSelect(self):
f = CLRegionSelect()
out = u'''<select name="foo">
<option value="RM">Regi\xf3n Metropolitana de Santiago</option>
<option value="I">Regi\xf3n de Tarapac\xe1</option>
<option value="II">Regi\xf3n de Antofagasta</option>
<option value="III">Regi\xf3n de Atacama</option>
<option value="IV">Regi\xf3n de Coquimbo</option>
<option value="V">Regi\xf3n de Valpara\xedso</option>
<option value="VI">Regi\xf3n del Libertador Bernardo O'Higgins</option>
<option value="VII">Regi\xf3n del Maule</option>
<option value="VIII">Regi\xf3n del B\xedo B\xedo</option>
<option value="IX">Regi\xf3n de la Araucan\xeda</option>
<option value="X">Regi\xf3n de los Lagos</option>
<option value="XI">Regi\xf3n de Ays\xe9n del General Carlos Ib\xe1\xf1ez del Campo</option>
<option value="XII">Regi\xf3n de Magallanes y la Ant\xe1rtica Chilena</option>
<option value="XIV">Regi\xf3n de Los R\xedos</option>
<option value="XV">Regi\xf3n de Arica-Parinacota</option>
</select>'''
self.assertHTMLEqual(f.render('foo', 'bar'), out)
def test_CLRutField(self):
error_invalid = [u'The Chilean RUT is not valid.']
error_format = [u'Enter a valid Chilean RUT. The format is XX.XXX.XXX-X.']
valid = {
'11-6': '11-6',
'116': '11-6',
'767484100': '76.748.410-0',
'78.412.790-7': '78.412.790-7',
'8.334.6043': '8.334.604-3',
'76793310-K': '76.793.310-K',
'76793310-k': '76.793.310-K',
}
invalid = {
'11.111.111-0': error_invalid,
'111': error_invalid,
}
self.assertFieldOutput(CLRutField, valid, invalid)
# deal with special "Strict Mode".
invalid = {
'11-6': error_format,
'767484100': error_format,
'8.334.6043': error_format,
'76793310-K': error_format,
'11.111.111-0': error_invalid
}
self.assertFieldOutput(CLRutField,
{}, invalid, field_kwargs={"strict": True}
)
| apache-2.0 | 4,824,381,532,471,481,000 | 38.982143 | 91 | 0.620813 | false |
arnaud-morvan/QGIS | python/plugins/processing/algs/grass7/ext/r_li_cwed_ascii.py | 12 | 1536 | # -*- coding: utf-8 -*-
"""
***************************************************************************
r_li_cwed_ascii.py
------------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from .r_li import checkMovingWindow, configFile, moveOutputTxtFile
def checkParameterValuesBeforeExecuting(alg, parameters, context):
return checkMovingWindow(alg, parameters, context, True)
def processCommand(alg, parameters, context, feedback):
configFile(alg, parameters, context, feedback, True)
def processOutputs(alg, parameters, context, feedback):
moveOutputTxtFile(alg, parameters, context)
| gpl-2.0 | -44,795,668,410,692,376 | 37.25 | 75 | 0.488235 | false |
ariandi/ktmavia | backend/web/ionicons-2.0.1/builder/scripts/generate_font.py | 348 | 5381 | # Font generation script from FontCustom
# https://github.com/FontCustom/fontcustom/
# http://fontcustom.com/
import fontforge
import os
import md5
import subprocess
import tempfile
import json
import copy
SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
INPUT_SVG_DIR = os.path.join(SCRIPT_PATH, '..', '..', 'src')
OUTPUT_FONT_DIR = os.path.join(SCRIPT_PATH, '..', '..', 'fonts')
MANIFEST_PATH = os.path.join(SCRIPT_PATH, '..', 'manifest.json')
BUILD_DATA_PATH = os.path.join(SCRIPT_PATH, '..', 'build_data.json')
AUTO_WIDTH = True
KERNING = 15
cp = 0xf100
m = md5.new()
f = fontforge.font()
f.encoding = 'UnicodeFull'
f.design_size = 16
f.em = 512
f.ascent = 448
f.descent = 64
manifest_file = open(MANIFEST_PATH, 'r')
manifest_data = json.loads(manifest_file.read())
manifest_file.close()
print "Load Manifest, Icons: %s" % ( len(manifest_data['icons']) )
build_data = copy.deepcopy(manifest_data)
build_data['icons'] = []
font_name = manifest_data['name']
m.update(font_name + ';')
m.update(manifest_data['prefix'] + ';')
for dirname, dirnames, filenames in os.walk(INPUT_SVG_DIR):
for filename in filenames:
name, ext = os.path.splitext(filename)
filePath = os.path.join(dirname, filename)
size = os.path.getsize(filePath)
if ext in ['.svg', '.eps']:
# see if this file is already in the manifest
chr_code = None
for ionicon in manifest_data['icons']:
if ionicon['name'] == name:
chr_code = ionicon['code']
break
if chr_code is None:
# this is a new src icon
print 'New Icon: \n - %s' % (name)
while True:
chr_code = '0x%x' % (cp)
already_exists = False
for ionicon in manifest_data['icons']:
if ionicon.get('code') == chr_code:
already_exists = True
cp += 1
chr_code = '0x%x' % (cp)
continue
if not already_exists:
break
print ' - %s' % chr_code
manifest_data['icons'].append({
'name': name,
'code': chr_code
})
build_data['icons'].append({
'name': name,
'code': chr_code
})
if ext in ['.svg']:
# hack removal of <switch> </switch> tags
svgfile = open(filePath, 'r+')
tmpsvgfile = tempfile.NamedTemporaryFile(suffix=ext, delete=False)
svgtext = svgfile.read()
svgfile.seek(0)
# replace the <switch> </switch> tags with 'nothing'
svgtext = svgtext.replace('<switch>', '')
svgtext = svgtext.replace('</switch>', '')
tmpsvgfile.file.write(svgtext)
svgfile.close()
tmpsvgfile.file.close()
filePath = tmpsvgfile.name
# end hack
m.update(name + str(size) + ';')
glyph = f.createChar( int(chr_code, 16) )
glyph.importOutlines(filePath)
# if we created a temporary file, let's clean it up
if tmpsvgfile:
os.unlink(tmpsvgfile.name)
# set glyph size explicitly or automatically depending on autowidth
if AUTO_WIDTH:
glyph.left_side_bearing = glyph.right_side_bearing = 0
glyph.round()
# resize glyphs if autowidth is enabled
if AUTO_WIDTH:
f.autoWidth(0, 0, 512)
fontfile = '%s/ionicons' % (OUTPUT_FONT_DIR)
build_hash = m.hexdigest()
if build_hash == manifest_data.get('build_hash'):
print "Source files unchanged, did not rebuild fonts"
else:
manifest_data['build_hash'] = build_hash
f.fontname = font_name
f.familyname = font_name
f.fullname = font_name
f.generate(fontfile + '.ttf')
f.generate(fontfile + '.svg')
# Fix SVG header for webkit
# from: https://github.com/fontello/font-builder/blob/master/bin/fontconvert.py
svgfile = open(fontfile + '.svg', 'r+')
svgtext = svgfile.read()
svgfile.seek(0)
svgfile.write(svgtext.replace('''<svg>''', '''<svg xmlns="http://www.w3.org/2000/svg">'''))
svgfile.close()
scriptPath = os.path.dirname(os.path.realpath(__file__))
try:
subprocess.Popen([scriptPath + '/sfnt2woff', fontfile + '.ttf'], stdout=subprocess.PIPE)
except OSError:
# If the local version of sfnt2woff fails (i.e., on Linux), try to use the
# global version. This allows us to avoid forcing OS X users to compile
# sfnt2woff from source, simplifying install.
subprocess.call(['sfnt2woff', fontfile + '.ttf'])
# eotlitetool.py script to generate IE7-compatible .eot fonts
subprocess.call('python ' + scriptPath + '/eotlitetool.py ' + fontfile + '.ttf -o ' + fontfile + '.eot', shell=True)
subprocess.call('mv ' + fontfile + '.eotlite ' + fontfile + '.eot', shell=True)
# Hint the TTF file
subprocess.call('ttfautohint -s -f -n ' + fontfile + '.ttf ' + fontfile + '-hinted.ttf > /dev/null 2>&1 && mv ' + fontfile + '-hinted.ttf ' + fontfile + '.ttf', shell=True)
manifest_data['icons'] = sorted(manifest_data['icons'], key=lambda k: k['name'])
build_data['icons'] = sorted(build_data['icons'], key=lambda k: k['name'])
print "Save Manifest, Icons: %s" % ( len(manifest_data['icons']) )
f = open(MANIFEST_PATH, 'w')
f.write( json.dumps(manifest_data, indent=2, separators=(',', ': ')) )
f.close()
print "Save Build, Icons: %s" % ( len(build_data['icons']) )
f = open(BUILD_DATA_PATH, 'w')
f.write( json.dumps(build_data, indent=2, separators=(',', ': ')) )
f.close()
| bsd-3-clause | 676,173,602,519,459,700 | 30.104046 | 174 | 0.616056 | false |
srowe/xen-api | ocaml/idl/binding_sanity_checks/sharedstorage.py | 34 | 1972 | #!/usr/bin/env python
import datetime
import XenAPI
import sanitychecklib
#parameters for the shared storage to be created
storage_type='nfs'
device_config={'server':sanitychecklib.network_storage_server, 'serverpath':sanitychecklib.network_storage_path }
physical_size = '100000'
name_label = 'created by sharedstorage.py '+ datetime.datetime.now().strftime("%X on %a %x")
name_description = 'shared storage created for testing purposes by the script sharedstorage.py'
content_type = 'content type field, wonder what goes here'
shared=True
sm_config={}
#log in
session=sanitychecklib.getsession()
sx=session.xenapi
server=sanitychecklib.server
#find the reference to our host
hosts=sx.host.get_all()
print "According to "+server+" the following hosts exist: ", [sx.host.get_name_label(x) for x in hosts], "\n"
host = [x for x in hosts if sx.host.get_name_label(x)==server][0]
print "We assume that the reference", host, "is the server itself, since its name label is\"%s\"\n" % sx.host.get_name_label(host)
#create a new networked storage repository
new_sr=sx.SR.create( host, device_config, physical_size, name_label, name_description, storage_type, content_type, shared, sm_config)
new_sr_record = sx.SR.get_record(new_sr)
print "Created new shared storage:"
print new_sr_record
#when an sr is created, it appears with PBDs already attached.
print "\nPBD(s) created along with the new SR"
for pbd in new_sr_record['PBDs']:
print " ", sx.PBD.get_record(new_sr_record['PBDs'][0])
#now we should set this as the default for our pool
pools=sx.pool.get_all()
print "There are ", len(pools), "pools"
our_pool=pools[0]
print "Assuming our pool is ",our_pool
print "Setting new storage to be the default sr for this pool"
sx.pool.set_default_SR(our_pool, new_sr)
print "Setting it to be the default for suspend and crashdump images too"
sx.pool.set_suspend_image_SR(our_pool, new_sr)
sx.pool.set_crash_dump_SR(our_pool, new_sr)
#log out
session.logout()
| lgpl-2.1 | 4,057,534,475,928,699,000 | 35.518519 | 138 | 0.743915 | false |
datalyze-solutions/pandas-qt | pandasqt/views/EditDialogs.py | 4 | 8445 | import re
from pandasqt.compat import QtCore, QtGui, Qt, Slot, Signal
from pandasqt.models.SupportedDtypes import SupportedDtypes
import numpy
from pandas import Timestamp
from pandas.tslib import NaTType
class DefaultValueValidator(QtGui.QValidator):
def __init__(self, parent=None):
super(DefaultValueValidator, self).__init__(parent)
self.dtype = None
self.intPattern = re.compile('[-+]?\d+')
self.uintPattern = re.compile('\d+')
self.floatPattern = re.compile('[+-]? *(?:\d+(?:\.\d*)?|\.\d+)')
self.boolPattern = re.compile('(1|t|0|f){1}$')
@Slot(numpy.dtype)
def validateType(self, dtype):
self.dtype = dtype
def fixup(self, string):
pass
def validate(self, s, pos):
if not s:
# s is emtpy
return (QtGui.QValidator.Acceptable, s, pos)
if self.dtype in SupportedDtypes.strTypes():
return (QtGui.QValidator.Acceptable, s, pos)
elif self.dtype in SupportedDtypes.boolTypes():
match = re.match(self.boolPattern, s)
if match:
return (QtGui.QValidator.Acceptable, s, pos)
else:
return (QtGui.QValidator.Invalid, s, pos)
elif self.dtype in SupportedDtypes.datetimeTypes():
try:
ts = Timestamp(s)
except ValueError, e:
return (QtGui.QValidator.Intermediate, s, pos)
return (QtGui.QValidator.Acceptable, s, pos)
else:
dtypeInfo = None
if self.dtype in SupportedDtypes.intTypes():
match = re.search(self.intPattern, s)
if match:
try:
value = int(match.string)
except ValueError, e:
return (QtGui.QValidator.Invalid, s, pos)
dtypeInfo = numpy.iinfo(self.dtype)
elif self.dtype in SupportedDtypes.uintTypes():
match = re.search(self.uintPattern, s)
if match:
try:
value = int(match.string)
except ValueError, e:
return (QtGui.QValidator.Invalid, s, pos)
dtypeInfo = numpy.iinfo(self.dtype)
elif self.dtype in SupportedDtypes.floatTypes():
match = re.search(self.floatPattern, s)
print match
if match:
try:
value = float(match.string)
except ValueError, e:
return (QtGui.QValidator.Invalid, s, pos)
dtypeInfo = numpy.finfo(self.dtype)
if dtypeInfo is not None:
if value >= dtypeInfo.min and value <= dtypeInfo.max:
return (QtGui.QValidator.Acceptable, s, pos)
else:
return (QtGui.QValidator.Invalid, s, pos)
else:
return (QtGui.QValidator.Invalid, s, pos)
return (QtGui.QValidator.Invalid, s, pos)
class AddAttributesDialog(QtGui.QDialog):
accepted = Signal(str, object, object)
def __init__(self, parent=None):
super(AddAttributesDialog, self).__init__(parent)
self.initUi()
def initUi(self):
self.setModal(True)
self.resize(303, 168)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
self.setSizePolicy(sizePolicy)
self.verticalLayout = QtGui.QVBoxLayout(self)
self.dialogHeading = QtGui.QLabel(self.tr('Add a new attribute column'), self)
self.gridLayout = QtGui.QGridLayout()
self.columnNameLineEdit = QtGui.QLineEdit(self)
self.columnNameLabel = QtGui.QLabel(self.tr('Name'), self)
self.dataTypeComboBox = QtGui.QComboBox(self)
self.dataTypeComboBox.addItems(SupportedDtypes.names())
self.columnTypeLabel = QtGui.QLabel(self.tr('Type'), self)
self.defaultValueLineEdit = QtGui.QLineEdit(self)
self.lineEditValidator = DefaultValueValidator(self)
self.defaultValueLineEdit.setValidator(self.lineEditValidator)
self.defaultValueLabel = QtGui.QLabel(self.tr('Inital Value(s)'), self)
self.gridLayout.addWidget(self.columnNameLabel, 0, 0, 1, 1)
self.gridLayout.addWidget(self.columnNameLineEdit, 0, 1, 1, 1)
self.gridLayout.addWidget(self.columnTypeLabel, 1, 0, 1, 1)
self.gridLayout.addWidget(self.dataTypeComboBox, 1, 1, 1, 1)
self.gridLayout.addWidget(self.defaultValueLabel, 2, 0, 1, 1)
self.gridLayout.addWidget(self.defaultValueLineEdit, 2, 1, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel | QtGui.QDialogButtonBox.Ok)
self.verticalLayout.addWidget(self.dialogHeading)
self.verticalLayout.addLayout(self.gridLayout)
self.verticalLayout.addWidget(self.buttonBox)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.dataTypeComboBox.currentIndexChanged.connect(self.updateValidatorDtype)
self.updateValidatorDtype(self.dataTypeComboBox.currentIndex())
def accept(self):
super(AddAttributesDialog, self).accept()
newColumn = self.columnNameLineEdit.text()
dtype = SupportedDtypes.dtype(self.dataTypeComboBox.currentText())
defaultValue = self.defaultValueLineEdit.text()
try:
if dtype in SupportedDtypes.intTypes() + SupportedDtypes.uintTypes():
defaultValue = int(defaultValue)
elif dtype in SupportedDtypes.floatTypes():
defaultValue = float(defaultValue)
elif dtype in SupportedDtypes.boolTypes():
defaultValue = defaultValue.lower() in ['t', '1']
elif dtype in SupportedDtypes.datetimeTypes():
defaultValue = Timestamp(defaultValue)
if isinstance(defaultValue, NaTType):
defaultValue = Timestamp('')
else:
defaultValue = dtype.type()
except ValueError, e:
defaultValue = dtype.type()
self.accepted.emit(newColumn, dtype, defaultValue)
@Slot(int)
def updateValidatorDtype(self, index):
(dtype, name) = SupportedDtypes.tupleAt(index)
self.defaultValueLineEdit.clear()
self.lineEditValidator.validateType(dtype)
class RemoveAttributesDialog(QtGui.QDialog):
accepted = Signal(list)
def __init__(self, columns, parent=None):
super(RemoveAttributesDialog, self).__init__(parent)
self.columns = columns
self.initUi()
def initUi(self):
self.setWindowTitle(self.tr('Remove Attributes'))
self.setModal(True)
self.resize(366, 274)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Expanding)
self.setSizePolicy(sizePolicy)
self.gridLayout = QtGui.QGridLayout(self)
self.dialogHeading = QtGui.QLabel(self.tr('Select the attribute column(s) which shall be removed'), self)
self.listView = QtGui.QListView(self)
model = QtGui.QStandardItemModel()
for column in self.columns:
item = QtGui.QStandardItem(column)
model.appendRow(item)
self.listView.setModel(model)
self.listView.setSelectionMode(QtGui.QListView.MultiSelection)
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel | QtGui.QDialogButtonBox.Ok)
self.gridLayout.addWidget(self.dialogHeading, 0, 0, 1, 1)
self.gridLayout.addWidget(self.listView, 1, 0, 1, 1)
self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 1)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def accept(self):
selection = self.listView.selectedIndexes()
names = []
for index in selection:
position = index.row()
names.append((position, index.data(QtCore.Qt.DisplayRole)))
super(RemoveAttributesDialog, self).accept()
self.accepted.emit(names) | mit | 8,912,547,466,978,800,000 | 34.940426 | 113 | 0.619183 | false |
chiragjogi/odoo | addons/stock_account/__init__.py | 384 | 1060 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product
import stock_account
import stock
import wizard
import res_config
| agpl-3.0 | -8,533,595,445,704,118,000 | 39.769231 | 78 | 0.616981 | false |
felipsmartins/namebench | tools/check_dns_servers.py | 174 | 3835 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for checking a lot of DNS servers from stdin for possible inclusion."""
__author__ = '[email protected] (Thomas Stromberg)'
import csv
import re
import sys
import GeoIP
sys.path.append('..')
sys.path.append('/Users/tstromberg/namebench')
import third_party
from libnamebench import nameserver_list
from libnamebench import config
from libnamebench import addr_util
import check_nameserver_popularity
gi = GeoIP.open('/usr/local/share/GeoLiteCity.dat', GeoIP.GEOIP_MEMORY_CACHE)
asn_lookup = GeoIP.open('/usr/local/share/GeoIPASNum.dat', GeoIP.GEOIP_MEMORY_CACHE)
existing_nameservers = config.GetLocalNameServerList()
check_ns = []
output = csv.writer(open('output.csv', 'w'))
for line in sys.stdin:
ips = addr_util.ExtractIPsFromString(line)
for ip in ips:
print ip
# disable IPV6 until we can improve our regular expression matching
if ':' in ip:
continue
if ip not in existing_nameservers:
check_ns.append((ip, ip))
if not check_ns:
print "no new servers to check"
sys.exit(1)
else:
print "%s servers to check" % len(check_ns)
print '-' * 80
nameserver_list.MAX_INITIAL_HEALTH_THREAD_COUNT = 100
nameservers = nameserver_list.NameServers([],
global_servers=check_ns,
timeout=10,
health_timeout=10,
threads=100,
num_servers=5000,
skip_cache_collusion_checks=True,
)
nameservers.min_healthy_percent = 0
sanity_checks = config.GetLocalSanityChecks()
try:
nameservers.CheckHealth(sanity_checks['primary'], sanity_checks['secondary'])
except nameserver_list.TooFewNameservers:
pass
print '-' * 80
for ns in nameservers:
try:
details = gi.record_by_addr(ns.ip)
except:
pass
if not details:
details = {}
city = details.get('city', '')
if city:
city = city.decode('latin-1')
latitude = details.get('latitude', '')
longitude = details.get('longitude', '')
country = details.get('country_name', '')
if country:
country = country.decode('latin-1')
country_code = details.get('country_code', '')
region = details.get('region_name', '')
if region:
region = region.decode('latin-1')
try:
results = check_nameserver_popularity.CheckPopularity(ns.ip)
urls = [ x['Url'] for x in results ]
except:
urls = ['(exception)']
num_urls = len(urls)
main = "%s=UNKNOWN" % ns.ip
if 'Responded with: REFUSED' in ns.warnings:
note = '_REFUSED_'
elif 'a.root-servers.net.: Timeout' in ns.warnings:
note = '_TIMEOUT_'
elif 'No answer (NOERROR): a.root-servers.net.' in ns.warnings:
note = '_NOANSWER_'
elif ns.warnings:
note = '_WARNING/%s_' % '/'.join(list(ns.warnings))
else:
note = ''
if ns.hostname != ns.ip:
domain = addr_util.GetDomainPartOfHostname(ns.hostname)
if domain:
good_urls = [x for x in urls if re.search(domain, x, re.I)]
if good_urls:
urls = good_urls
geo = '/'.join([x for x in [country_code, region, city] if x and not x.isdigit()]).encode('utf-8')
coords = ','.join(map(str, [latitude,longitude]))
asn = asn_lookup.org_by_addr(ns.ip)
row = [ns.ip, 'regional', 'UNKNOWN', '', ns.hostname, geo, coords, asn, note, num_urls, ' '.join(urls[:2]), ns.version]
print row
output.writerow(row)
| apache-2.0 | 3,839,970,656,477,663,000 | 28.960938 | 121 | 0.686832 | false |
alpinelinux/linux-stable-grsec | tools/perf/scripts/python/syscall-counts-by-pid.py | 1996 | 2105 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 | -5,189,957,477,307,039,000 | 27.445946 | 77 | 0.625178 | false |
darjus-amzn/ryu | ryu/ofproto/oxs_fields.py | 26 | 5855 | # Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# there are two representations of value which this module deal with.
#
# "user"
# the readable value which are strings.
#
# "internal"
# the on-wire bytes value.
# There are two types of OXS headers.
#
# 32-bit OXS header
# 31 16 15 9 8 7 0
# +-------------------------------+-------------+-+---------------+
# | class | field |r| length |
# +-------------------------------+-------------+-+---------------+
#
# 64-bit experimenter OXS header
# 31 16 15 9 8 7 0
# +-------------------------------+-------------+-+---------------+
# | class (OFPXSC_EXPERIMENTER) | field |r| length |
# +-------------------------------+-------------+-+---------------+
# | experimenter ID |
# +---------------------------------------------------------------+
#
# Description of OXS header fields
# +----------------------+-------+--------------------------------------------+
# | Name | Width | Usage |
# +----------+-----------+-------+--------------------------------------------+
# | oxs_type | oxs_class | 16 | Stat class: member class or reserved class |
# | +-----------+-------+--------------------------------------------+
# | | oxs_field | 7 | Stat field within the class |
# +----------+-----------+-------+--------------------------------------------+
# | reserved | 1 | Reserved for future use |
# +----------------------+-------+--------------------------------------------+
# | length | 8 | Length of OXS payload |
# +----------------------+-------+--------------------------------------------+
# Assumption: The followings can be applied for OXSs too.
# OpenFlow Spec 1.5 mandates that Experimenter OXMs encode the experimenter
# type in the oxm_field field of the OXM header (EXT-380).
from ryu.ofproto.oxx_fields import (
_from_user,
_from_user_header,
_to_user,
_to_user_header,
_field_desc,
_parse,
_parse_header,
_serialize,
_serialize_header)
OFPXSC_OPENFLOW_BASIC = 0x8002
OFPXSC_EXPERIMENTER = 0xFFFF
OFPXSC_HEADER_PACK_STR = '!I'
OFPXSC_EXP_HEADER_PACK_STR = '!I'
class _OxsClass(object):
# _class = OFPXSC_* must be an attribute of subclass.
def __init__(self, name, num, type_):
self.name = name
self.oxs_field = num
self.oxs_type = num | (self._class << 7)
# 'num' has not corresponding field in the specification.
# This is specific to this implementation and used to retrieve
# _OxsClass subclass from 'num_to_field' dictionary.
self.num = self.oxs_type
self.type = type_
class OpenFlowBasic(_OxsClass):
_class = OFPXSC_OPENFLOW_BASIC
class _Experimenter(_OxsClass):
_class = OFPXSC_EXPERIMENTER
# experimenter_id must be an attribute of subclass.
def __init__(self, name, num, type_):
super(_Experimenter, self).__init__(name, num, type_)
self.num = (self.experimenter_id, self.oxs_type)
self.exp_type = self.oxs_field
def generate(modname):
import sys
import functools
mod = sys.modules[modname]
def add_attr(k, v):
setattr(mod, k, v)
for i in mod.oxs_types:
if isinstance(i.num, tuple):
continue
if i._class != OFPXSC_OPENFLOW_BASIC:
continue
uk = i.name.upper()
ofpxst = i.oxs_field
td = i.type
add_attr('OFPXST_OFB_' + uk, ofpxst)
add_attr('OXS_OF_' + uk, mod.oxs_tlv_header(ofpxst, td.size))
# 'oxx' indicates the OpenFlow Extensible class type.
# eg.) 'oxs' indicates that this class is OXS class.
oxx = 'oxs'
name_to_field = dict((f.name, f) for f in mod.oxs_types)
num_to_field = dict((f.num, f) for f in mod.oxs_types)
# create functions by using oxx_fields module.
add_attr('oxs_from_user',
functools.partial(_from_user, oxx, name_to_field))
add_attr('oxs_from_user_header',
functools.partial(_from_user_header, oxx, name_to_field))
add_attr('oxs_to_user',
functools.partial(_to_user, oxx, num_to_field))
add_attr('oxs_to_user_header',
functools.partial(_to_user_header, oxx, num_to_field))
add_attr('_oxs_field_desc', # oxx is not required
functools.partial(_field_desc, num_to_field))
add_attr('oxs_parse', # oxx is not required
functools.partial(_parse, mod))
add_attr('oxs_parse_header', # oxx is not required
functools.partial(_parse_header, mod))
add_attr('oxs_serialize',
functools.partial(_serialize, oxx, mod))
add_attr('oxs_serialize_header',
functools.partial(_serialize_header, oxx, mod))
add_attr('oxs_to_jsondict', _to_jsondict)
add_attr('oxs_from_jsondict', _from_jsondict)
def _to_jsondict(k, uv):
return {"OXSTlv": {"field": k, "value": uv}}
def _from_jsondict(j):
tlv = j['OXSTlv']
field = tlv['field']
value = tlv['value']
return (field, value)
| apache-2.0 | -9,174,764,506,429,617,000 | 35.141975 | 79 | 0.51392 | false |
skirsdeda/django | django/contrib/auth/tests/test_forms.py | 15 | 21532 | from __future__ import unicode_literals
import os
import re
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import (UserCreationForm, AuthenticationForm,
PasswordChangeForm, SetPasswordForm, UserChangeForm, PasswordResetForm,
ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget)
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import Field, CharField
from django.test import TestCase, override_settings
from django.utils.encoding import force_text
from django.utils._os import upath
from django.utils import translation
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserCreationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [force_text(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
def test_success(self):
# The success case.
data = {
'username': '[email protected]',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(repr(u), '<User: [email protected]>')
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AuthenticationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SetPasswordFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class PasswordChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[force_text(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields),
['old_password', 'new_password1', 'new_password2'])
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unsuable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161')
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
@skipIfCustomUser
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
USE_TZ=False,
)
class PasswordResetFormTest(TestCase):
fixtures = ['authtestdata.json']
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = '[email protected]'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': '[email protected]'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': '[email protected]'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
template_path = os.path.join(os.path.dirname(__file__), 'templates')
with self.settings(TEMPLATE_DIRS=(template_path,)):
data = {'email': '[email protected]'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['[email protected]'],
headers={'Reply-To': '[email protected]'},
alternatives=[("Really sorry to hear you forgot your password.",
"text/html")]).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['[email protected]'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', '[email protected]', 'test')
self.assertEqual(user.email, '[email protected]')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Test that inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', '[email protected]', 'test')
data = {"email": "[email protected]"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetFOrm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(
re.match(r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$',
message.get_payload(1).get_payload())
)
class ReadOnlyPasswordHashTest(TestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field.has_changed('aaa', 'bbb'))
| bsd-3-clause | -5,898,819,682,186,163,000 | 38.948052 | 110 | 0.620333 | false |
SnappleCap/oh-mainline | vendor/packages/whoosh/src/whoosh/qparser/common.py | 16 | 2233 | # Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""
This module contains common utility objects/functions for the other query
parser modules.
"""
from whoosh.compat import string_type
class QueryParserError(Exception):
def __init__(self, cause, msg=None):
super(QueryParserError, self).__init__(str(cause))
self.cause = cause
def get_single_text(field, text, **kwargs):
"""Returns the first token from an analyzer's output.
"""
for t in field.process_text(text, mode="query", **kwargs):
return t
def attach(q, stxnode):
if q:
q.startchar = stxnode.startchar
q.endchar = stxnode.endchar
return q
def print_debug(level, msg):
if level:
print(" " * (level - 1), msg)
| agpl-3.0 | -2,108,783,699,514,952,200 | 36.847458 | 78 | 0.734438 | false |
gardner/youtube-dl | youtube_dl/extractor/vier.py | 69 | 4342 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class VierIE(InfoExtractor):
IE_NAME = 'vier'
_VALID_URL = r'https?://(?:www\.)?vier\.be/(?:[^/]+/videos/(?P<display_id>[^/]+)(?:/(?P<id>\d+))?|video/v3/embed/(?P<embed_id>\d+))'
_TESTS = [{
'url': 'http://www.vier.be/planb/videos/het-wordt-warm-de-moestuin/16129',
'info_dict': {
'id': '16129',
'display_id': 'het-wordt-warm-de-moestuin',
'ext': 'mp4',
'title': 'Het wordt warm in De Moestuin',
'description': 'De vele uren werk eisen hun tol. Wim droomt van assistentie...',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.vier.be/planb/videos/mieren-herders-van-de-bladluizen',
'only_matching': True,
}, {
'url': 'http://www.vier.be/video/v3/embed/16129',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
embed_id = mobj.group('embed_id')
display_id = mobj.group('display_id') or embed_id
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
[r'data-nid="(\d+)"', r'"nid"\s*:\s*"(\d+)"'],
webpage, 'video id')
application = self._search_regex(
[r'data-application="([^"]+)"', r'"application"\s*:\s*"([^"]+)"'],
webpage, 'application', default='vier_vod')
filename = self._search_regex(
[r'data-filename="([^"]+)"', r'"filename"\s*:\s*"([^"]+)"'],
webpage, 'filename')
playlist_url = 'http://vod.streamcloud.be/%s/mp4:_definst_/%s.mp4/playlist.m3u8' % (application, filename)
formats = self._extract_m3u8_formats(playlist_url, display_id, 'mp4')
title = self._og_search_title(webpage, default=display_id)
description = self._og_search_description(webpage, default=None)
thumbnail = self._og_search_thumbnail(webpage, default=None)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats,
}
class VierVideosIE(InfoExtractor):
IE_NAME = 'vier:videos'
_VALID_URL = r'https?://(?:www\.)?vier\.be/(?P<program>[^/]+)/videos(?:\?.*\bpage=(?P<page>\d+)|$)'
_TESTS = [{
'url': 'http://www.vier.be/demoestuin/videos',
'info_dict': {
'id': 'demoestuin',
},
'playlist_mincount': 153,
}, {
'url': 'http://www.vier.be/demoestuin/videos?page=6',
'info_dict': {
'id': 'demoestuin-page6',
},
'playlist_mincount': 20,
}, {
'url': 'http://www.vier.be/demoestuin/videos?page=7',
'info_dict': {
'id': 'demoestuin-page7',
},
'playlist_mincount': 13,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
program = mobj.group('program')
webpage = self._download_webpage(url, program)
page_id = mobj.group('page')
if page_id:
page_id = int(page_id)
start_page = page_id
last_page = start_page + 1
playlist_id = '%s-page%d' % (program, page_id)
else:
start_page = 0
last_page = int(self._search_regex(
r'videos\?page=(\d+)">laatste</a>',
webpage, 'last page', default=0)) + 1
playlist_id = program
entries = []
for current_page_id in range(start_page, last_page):
current_page = self._download_webpage(
'http://www.vier.be/%s/videos?page=%d' % (program, current_page_id),
program,
'Downloading page %d' % (current_page_id + 1)) if current_page_id != page_id else webpage
page_entries = [
self.url_result('http://www.vier.be' + video_url, 'Vier')
for video_url in re.findall(
r'<h3><a href="(/[^/]+/videos/[^/]+(?:/\d+)?)">', current_page)]
entries.extend(page_entries)
return self.playlist_result(entries, playlist_id)
| unlicense | -8,440,910,133,286,628,000 | 34.884298 | 136 | 0.515891 | false |
hkernbach/arangodb | 3rdParty/boost/1.62.0/tools/litre/litre.py | 67 | 1906 | from docutils import writers
from docutils import nodes
class LitreTranslator(nodes.GenericNodeVisitor):
def __init__(self, document, config):
nodes.GenericNodeVisitor.__init__(self,document)
self._config = config
def default_visit(self, node):
pass
# print '**visiting:', repr(node)
def default_departure(self, node):
pass
# print '**departing:', repr(node)
def visit_raw(self, node):
if node.has_key('format'):
key = node['format'].lower()
if key == 'litre':
# This is probably very evil ;-)
#if node.has_key('source'):
# node.file = node.attributes['source']
self._handle_code(node, node.astext())
raise nodes.SkipNode
def visit_comment(self, node):
code = node.astext()
if code[0] == '@':
self._handle_code(node, code[1:].strip())
def _handle_code(self, node, code):
start_line = node.line or 0
start_line -= code.count('\n') + 2 # docutils bug workaround?
try:
self._execute(compile( start_line*'\n' + code, str(node.source), 'exec'))
except:
print '\n------- begin offending Python source -------'
print code
print '------- end offending Python source -------'
raise
def _execute(self, code):
"""Override this to set up local variable context for code before
invoking it
"""
eval(code)
class Writer(writers.Writer):
translator = LitreTranslator
_config = None
def translate(self):
visitor = self.translator(self.document, self._config)
self.document.walkabout(visitor)
self.output = visitor.astext()
| apache-2.0 | -3,705,056,537,410,984,400 | 30.245902 | 85 | 0.524134 | false |
theakholic/ThinkStats2 | code/timeseries.py | 66 | 18035 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import statsmodels.tsa.stattools as smtsa
import matplotlib.pyplot as pyplot
import thinkplot
import thinkstats2
FORMATS = ['png']
def ReadData():
"""Reads data about cannabis transactions.
http://zmjones.com/static/data/mj-clean.csv
returns: DataFrame
"""
transactions = pandas.read_csv('mj-clean.csv', parse_dates=[5])
return transactions
def tmean(series):
"""Computes a trimmed mean.
series: Series
returns: float
"""
t = series.values
n = len(t)
if n <= 3:
return t.mean()
trim = max(1, n/10)
return np.mean(sorted(t)[trim:n-trim])
def GroupByDay(transactions, func=np.mean):
"""Groups transactions by day and compute the daily mean ppg.
transactions: DataFrame of transactions
returns: DataFrame of daily prices
"""
groups = transactions[['date', 'ppg']].groupby('date')
daily = groups.aggregate(func)
daily['date'] = daily.index
start = daily.date[0]
one_year = np.timedelta64(1, 'Y')
daily['years'] = (daily.date - start) / one_year
return daily
def GroupByQualityAndDay(transactions):
"""Divides transactions by quality and computes mean daily price.
transaction: DataFrame of transactions
returns: map from quality to time series of ppg
"""
groups = transactions.groupby('quality')
dailies = {}
for name, group in groups:
dailies[name] = GroupByDay(group)
return dailies
def PlotDailies(dailies):
"""Makes a plot with daily prices for different qualities.
dailies: map from name to DataFrame
"""
thinkplot.PrePlot(rows=3)
for i, (name, daily) in enumerate(dailies.items()):
thinkplot.SubPlot(i+1)
title = 'price per gram ($)' if i == 0 else ''
thinkplot.Config(ylim=[0, 20], title=title)
thinkplot.Scatter(daily.ppg, s=10, label=name)
if i == 2:
pyplot.xticks(rotation=30)
else:
thinkplot.Config(xticks=[])
thinkplot.Save(root='timeseries1',
formats=FORMATS)
def RunLinearModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
model = smf.ols('ppg ~ years', data=daily)
results = model.fit()
return model, results
def PlotFittedValues(model, results, label=''):
"""Plots original data and fitted values.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
values = model.endog
thinkplot.Scatter(years, values, s=15, label=label)
thinkplot.Plot(years, results.fittedvalues, label='model')
def PlotResiduals(model, results):
"""Plots the residuals of a model.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
thinkplot.Plot(years, results.resid, linewidth=0.5, alpha=0.5)
def PlotResidualPercentiles(model, results, index=1, num_bins=20):
"""Plots percentiles of the residuals.
model: StatsModel model object
results: StatsModel results object
index: which exogenous variable to use
num_bins: how many bins to divide the x-axis into
"""
exog = model.exog[:, index]
resid = results.resid.values
df = pandas.DataFrame(dict(exog=exog, resid=resid))
bins = np.linspace(np.min(exog), np.max(exog), num_bins)
indices = np.digitize(exog, bins)
groups = df.groupby(indices)
means = [group.exog.mean() for _, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.resid) for _, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
percentiles = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(means, percentiles, label=label)
def SimulateResults(daily, iters=101, func=RunLinearModel):
"""Run simulations based on resampling residuals.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
_, results = func(daily)
fake = daily.copy()
result_seq = []
for _ in range(iters):
fake.ppg = results.fittedvalues + thinkstats2.Resample(results.resid)
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def SimulateIntervals(daily, iters=101, func=RunLinearModel):
"""Run simulations based on different subsets of the data.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
result_seq = []
starts = np.linspace(0, len(daily), iters).astype(int)
for start in starts[:-2]:
subset = daily[start:]
_, results = func(subset)
fake = subset.copy()
for _ in range(iters):
fake.ppg = (results.fittedvalues +
thinkstats2.Resample(results.resid))
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def GeneratePredictions(result_seq, years, add_resid=False):
"""Generates an array of predicted values from a list of model results.
When add_resid is False, predictions represent sampling error only.
When add_resid is True, they also include residual error (which is
more relevant to prediction).
result_seq: list of model results
years: sequence of times (in years) to make predictions for
add_resid: boolean, whether to add in resampled residuals
returns: sequence of predictions
"""
n = len(years)
d = dict(Intercept=np.ones(n), years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict_seq = []
for fake_results in result_seq:
predict = fake_results.predict(predict_df)
if add_resid:
predict += thinkstats2.Resample(fake_results.resid, n)
predict_seq.append(predict)
return predict_seq
def GenerateSimplePrediction(results, years):
"""Generates a simple prediction.
results: results object
years: sequence of times (in years) to make predictions for
returns: sequence of predicted values
"""
n = len(years)
inter = np.ones(n)
d = dict(Intercept=inter, years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict = results.predict(predict_df)
return predict
def PlotPredictions(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateResults(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.3, color='gray')
predict_seq = GeneratePredictions(result_seq, years, add_resid=False)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.5, color='gray')
def PlotIntervals(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions based on different intervals.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateIntervals(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.2, color='gray')
def Correlate(dailies):
"""Compute the correlation matrix between prices for difference qualities.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
df[name] = daily.ppg
return df.corr()
def CorrelateResid(dailies):
"""Compute the correlation matrix between residuals.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
_, results = RunLinearModel(daily)
df[name] = results.resid
return df.corr()
def TestCorrelateResid(dailies, iters=101):
"""Tests observed correlations.
dailies: map from quality to time series of ppg
iters: number of simulations
"""
t = []
names = ['high', 'medium', 'low']
for name in names:
daily = dailies[name]
t.append(SimulateResults(daily, iters=iters))
corr = CorrelateResid(dailies)
arrays = []
for result_seq in zip(*t):
df = pandas.DataFrame()
for name, results in zip(names, result_seq):
df[name] = results.resid
opp_sign = corr * df.corr() < 0
arrays.append((opp_sign.astype(int)))
print(np.sum(arrays))
def RunModels(dailies):
"""Runs linear regression for each group in dailies.
dailies: map from group name to DataFrame
"""
rows = []
for daily in dailies.values():
_, results = RunLinearModel(daily)
intercept, slope = results.params
p1, p2 = results.pvalues
r2 = results.rsquared
s = r'%0.3f (%0.2g) & %0.3f (%0.2g) & %0.3f \\'
row = s % (intercept, p1, slope, p2, r2)
rows.append(row)
# print results in a LaTeX table
print(r'\begin{tabular}{|c|c|c|}')
print(r'\hline')
print(r'intercept & slope & $R^2$ \\ \hline')
for row in rows:
print(row)
print(r'\hline')
print(r'\end{tabular}')
def FillMissing(daily, span=30):
"""Fills missing values with an exponentially weighted moving average.
Resulting DataFrame has new columns 'ewma' and 'resid'.
daily: DataFrame of daily prices
span: window size (sort of) passed to ewma
returns: new DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
ewma = pandas.ewma(reindexed.ppg, span=span)
resid = (reindexed.ppg - ewma).dropna()
fake_data = ewma + thinkstats2.Resample(resid, len(reindexed))
reindexed.ppg.fillna(fake_data, inplace=True)
reindexed['ewma'] = ewma
reindexed['resid'] = reindexed.ppg - ewma
return reindexed
def AddWeeklySeasonality(daily):
"""Adds a weekly pattern.
daily: DataFrame of daily prices
returns: new DataFrame of daily prices
"""
frisat = (daily.index.dayofweek==4) | (daily.index.dayofweek==5)
fake = daily.copy()
fake.ppg[frisat] += np.random.uniform(0, 2, frisat.sum())
return fake
def PrintSerialCorrelations(dailies):
"""Prints a table of correlations with different lags.
dailies: map from category name to DataFrame of daily prices
"""
filled_dailies = {}
for name, daily in dailies.items():
filled_dailies[name] = FillMissing(daily, span=30)
# print serial correlations for raw price data
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.ppg, lag=1)
print(name, corr)
rows = []
for lag in [1, 7, 30, 365]:
row = [str(lag)]
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.resid, lag)
row.append('%.2g' % corr)
rows.append(row)
print(r'\begin{tabular}{|c|c|c|c|}')
print(r'\hline')
print(r'lag & high & medium & low \\ \hline')
for row in rows:
print(' & '.join(row) + r' \\')
print(r'\hline')
print(r'\end{tabular}')
filled = filled_dailies['high']
acf = smtsa.acf(filled.resid, nlags=365, unbiased=True)
print('%0.3f, %0.3f, %0.3f, %0.3f, %0.3f' %
(acf[0], acf[1], acf[7], acf[30], acf[365]))
def SimulateAutocorrelation(daily, iters=1001, nlags=40):
"""Resample residuals, compute autocorrelation, and plot percentiles.
daily: DataFrame
iters: number of simulations to run
nlags: maximum lags to compute autocorrelation
"""
# run simulations
t = []
for _ in range(iters):
filled = FillMissing(daily, span=30)
resid = thinkstats2.Resample(filled.resid)
acf = smtsa.acf(resid, nlags=nlags, unbiased=True)[1:]
t.append(np.abs(acf))
high = thinkstats2.PercentileRows(t, [97.5])[0]
low = -high
lags = range(1, nlags+1)
thinkplot.FillBetween(lags, low, high, alpha=0.2, color='gray')
def PlotAutoCorrelation(dailies, nlags=40, add_weekly=False):
"""Plots autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
nlags: number of lags to compute
add_weekly: boolean, whether to add a simulated weekly pattern
"""
thinkplot.PrePlot(3)
daily = dailies['high']
SimulateAutocorrelation(daily)
for name, daily in dailies.items():
if add_weekly:
daily = AddWeeklySeasonality(daily)
filled = FillMissing(daily, span=30)
acf = smtsa.acf(filled.resid, nlags=nlags, unbiased=True)
lags = np.arange(len(acf))
thinkplot.Plot(lags[1:], acf[1:], label=name)
def MakeAcfPlot(dailies):
"""Makes a figure showing autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
"""
axis = [0, 41, -0.2, 0.2]
thinkplot.PrePlot(cols=2)
PlotAutoCorrelation(dailies, add_weekly=False)
thinkplot.Config(axis=axis,
loc='lower right',
ylabel='correlation',
xlabel='lag (day)')
thinkplot.SubPlot(2)
PlotAutoCorrelation(dailies, add_weekly=True)
thinkplot.Save(root='timeseries9',
axis=axis,
loc='lower right',
xlabel='lag (days)',
formats=FORMATS)
def PlotRollingMean(daily, name):
"""Plots rolling mean and EWMA.
daily: DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
thinkplot.PrePlot(cols=2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
roll_mean = pandas.rolling_mean(reindexed.ppg, 30)
thinkplot.Plot(roll_mean, label='rolling mean')
pyplot.xticks(rotation=30)
thinkplot.Config(ylabel='price per gram ($)')
thinkplot.SubPlot(2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
ewma = pandas.ewma(reindexed.ppg, span=30)
thinkplot.Plot(ewma, label='EWMA')
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries10',
formats=FORMATS)
def PlotFilled(daily, name):
"""Plots the EWMA and filled data.
daily: DataFrame of daily prices
"""
filled = FillMissing(daily, span=30)
thinkplot.Scatter(filled.ppg, s=15, alpha=0.3, label=name)
thinkplot.Plot(filled.ewma, label='EWMA', alpha=0.4)
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries8',
ylabel='price per gram ($)',
formats=FORMATS)
def PlotLinearModel(daily, name):
"""Plots a linear fit to a sequence of prices, and the residuals.
daily: DataFrame of daily prices
name: string
"""
model, results = RunLinearModel(daily)
PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries2',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)',
formats=FORMATS)
PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries3',
title='residuals',
xlabel='years',
ylabel='price per gram ($)',
formats=FORMATS)
#years = np.linspace(0, 5, 101)
#predict = GenerateSimplePrediction(results, years)
def main(name):
thinkstats2.RandomSeed(18)
transactions = ReadData()
dailies = GroupByQualityAndDay(transactions)
PlotDailies(dailies)
RunModels(dailies)
PrintSerialCorrelations(dailies)
MakeAcfPlot(dailies)
name = 'high'
daily = dailies[name]
PlotLinearModel(daily, name)
PlotRollingMean(daily, name)
PlotFilled(daily, name)
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries4',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
name = 'medium'
daily = dailies[name]
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotIntervals(daily, years)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries5',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 | 3,049,585,497,129,932,000 | 27.995177 | 78 | 0.636096 | false |
nowopen/scrapy | tests/test_http_response.py | 95 | 19527 | import unittest
import six
from w3lib.encoding import resolve_encoding
from scrapy.http import (Request, Response, TextResponse, HtmlResponse,
XmlResponse, Headers)
from scrapy.selector import Selector
from scrapy.utils.python import to_native_str
class BaseResponseTest(unittest.TestCase):
response_class = Response
def test_init(self):
# Response requires url in the consturctor
self.assertRaises(Exception, self.response_class)
self.assertTrue(isinstance(self.response_class('http://example.com/'), self.response_class))
# body can be str or None
self.assertTrue(isinstance(self.response_class('http://example.com/', body=b''), self.response_class))
self.assertTrue(isinstance(self.response_class('http://example.com/', body=b'body'), self.response_class))
# test presence of all optional parameters
self.assertTrue(isinstance(self.response_class('http://example.com/', body=b'', headers={}, status=200), self.response_class))
r = self.response_class("http://www.example.com")
assert isinstance(r.url, str)
self.assertEqual(r.url, "http://www.example.com")
self.assertEqual(r.status, 200)
assert isinstance(r.headers, Headers)
self.assertEqual(r.headers, {})
headers = {"foo": "bar"}
body = b"a body"
r = self.response_class("http://www.example.com", headers=headers, body=body)
assert r.headers is not headers
self.assertEqual(r.headers[b"foo"], b"bar")
r = self.response_class("http://www.example.com", status=301)
self.assertEqual(r.status, 301)
r = self.response_class("http://www.example.com", status='301')
self.assertEqual(r.status, 301)
self.assertRaises(ValueError, self.response_class, "http://example.com", status='lala200')
def test_copy(self):
"""Test Response copy"""
r1 = self.response_class("http://www.example.com", body=b"Some body")
r1.flags.append('cached')
r2 = r1.copy()
self.assertEqual(r1.status, r2.status)
self.assertEqual(r1.body, r2.body)
# make sure flags list is shallow copied
assert r1.flags is not r2.flags, "flags must be a shallow copy, not identical"
self.assertEqual(r1.flags, r2.flags)
# make sure headers attribute is shallow copied
assert r1.headers is not r2.headers, "headers must be a shallow copy, not identical"
self.assertEqual(r1.headers, r2.headers)
def test_copy_meta(self):
req = Request("http://www.example.com")
req.meta['foo'] = 'bar'
r1 = self.response_class("http://www.example.com", body=b"Some body", request=req)
assert r1.meta is req.meta
def test_copy_inherited_classes(self):
"""Test Response children copies preserve their class"""
class CustomResponse(self.response_class):
pass
r1 = CustomResponse('http://www.example.com')
r2 = r1.copy()
assert type(r2) is CustomResponse
def test_replace(self):
"""Test Response.replace() method"""
hdrs = Headers({"key": "value"})
r1 = self.response_class("http://www.example.com")
r2 = r1.replace(status=301, body=b"New body", headers=hdrs)
assert r1.body == b''
self.assertEqual(r1.url, r2.url)
self.assertEqual((r1.status, r2.status), (200, 301))
self.assertEqual((r1.body, r2.body), (b'', b"New body"))
self.assertEqual((r1.headers, r2.headers), ({}, hdrs))
# Empty attributes (which may fail if not compared properly)
r3 = self.response_class("http://www.example.com", flags=['cached'])
r4 = r3.replace(body=b'', flags=[])
self.assertEqual(r4.body, b'')
self.assertEqual(r4.flags, [])
def _assert_response_values(self, response, encoding, body):
if isinstance(body, six.text_type):
body_unicode = body
body_bytes = body.encode(encoding)
else:
body_unicode = body.decode(encoding)
body_bytes = body
assert isinstance(response.body, bytes)
self._assert_response_encoding(response, encoding)
self.assertEqual(response.body, body_bytes)
self.assertEqual(response.body_as_unicode(), body_unicode)
def _assert_response_encoding(self, response, encoding):
self.assertEqual(response.encoding, resolve_encoding(encoding))
def test_immutable_attributes(self):
r = self.response_class("http://example.com")
self.assertRaises(AttributeError, setattr, r, 'url', 'http://example2.com')
self.assertRaises(AttributeError, setattr, r, 'body', 'xxx')
def test_urljoin(self):
"""Test urljoin shortcut (only for existence, since behavior equals urljoin)"""
joined = self.response_class('http://www.example.com').urljoin('/test')
absolute = 'http://www.example.com/test'
self.assertEqual(joined, absolute)
class TextResponseTest(BaseResponseTest):
response_class = TextResponse
def test_replace(self):
super(TextResponseTest, self).test_replace()
r1 = self.response_class("http://www.example.com", body="hello", encoding="cp852")
r2 = r1.replace(url="http://www.example.com/other")
r3 = r1.replace(url="http://www.example.com/other", encoding="latin1")
assert isinstance(r2, self.response_class)
self.assertEqual(r2.url, "http://www.example.com/other")
self._assert_response_encoding(r2, "cp852")
self.assertEqual(r3.url, "http://www.example.com/other")
self.assertEqual(r3._declared_encoding(), "latin1")
def test_unicode_url(self):
# instantiate with unicode url without encoding (should set default encoding)
resp = self.response_class(u"http://www.example.com/")
self._assert_response_encoding(resp, self.response_class._DEFAULT_ENCODING)
# make sure urls are converted to str
resp = self.response_class(url=u"http://www.example.com/", encoding='utf-8')
assert isinstance(resp.url, str)
resp = self.response_class(url=u"http://www.example.com/price/\xa3", encoding='utf-8')
self.assertEqual(resp.url, to_native_str(b'http://www.example.com/price/\xc2\xa3'))
resp = self.response_class(url=u"http://www.example.com/price/\xa3", encoding='latin-1')
self.assertEqual(resp.url, 'http://www.example.com/price/\xa3')
resp = self.response_class(u"http://www.example.com/price/\xa3", headers={"Content-type": ["text/html; charset=utf-8"]})
self.assertEqual(resp.url, to_native_str(b'http://www.example.com/price/\xc2\xa3'))
resp = self.response_class(u"http://www.example.com/price/\xa3", headers={"Content-type": ["text/html; charset=iso-8859-1"]})
self.assertEqual(resp.url, 'http://www.example.com/price/\xa3')
def test_unicode_body(self):
unicode_string = u'\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0447\u0435\u0441\u043a\u0438\u0439 \u0442\u0435\u043a\u0441\u0442'
self.assertRaises(TypeError, self.response_class, 'http://www.example.com', body=u'unicode body')
original_string = unicode_string.encode('cp1251')
r1 = self.response_class('http://www.example.com', body=original_string, encoding='cp1251')
# check body_as_unicode
self.assertTrue(isinstance(r1.body_as_unicode(), six.text_type))
self.assertEqual(r1.body_as_unicode(), unicode_string)
def test_encoding(self):
r1 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=utf-8"]}, body=b"\xc2\xa3")
r2 = self.response_class("http://www.example.com", encoding='utf-8', body=u"\xa3")
r3 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=iso-8859-1"]}, body=b"\xa3")
r4 = self.response_class("http://www.example.com", body=b"\xa2\xa3")
r5 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=None"]}, body=b"\xc2\xa3")
r6 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=gb2312"]}, body=b"\xa8D")
r7 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=gbk"]}, body=b"\xa8D")
self.assertEqual(r1._headers_encoding(), "utf-8")
self.assertEqual(r2._headers_encoding(), None)
self.assertEqual(r2._declared_encoding(), 'utf-8')
self._assert_response_encoding(r2, 'utf-8')
self.assertEqual(r3._headers_encoding(), "cp1252")
self.assertEqual(r3._declared_encoding(), "cp1252")
self.assertEqual(r4._headers_encoding(), None)
self.assertEqual(r5._headers_encoding(), None)
self._assert_response_encoding(r5, "utf-8")
assert r4._body_inferred_encoding() is not None and r4._body_inferred_encoding() != 'ascii'
self._assert_response_values(r1, 'utf-8', u"\xa3")
self._assert_response_values(r2, 'utf-8', u"\xa3")
self._assert_response_values(r3, 'iso-8859-1', u"\xa3")
self._assert_response_values(r6, 'gb18030', u"\u2015")
self._assert_response_values(r7, 'gb18030', u"\u2015")
# TextResponse (and subclasses) must be passed a encoding when instantiating with unicode bodies
self.assertRaises(TypeError, self.response_class, "http://www.example.com", body=u"\xa3")
def test_declared_encoding_invalid(self):
"""Check that unknown declared encodings are ignored"""
r = self.response_class("http://www.example.com",
headers={"Content-type": ["text/html; charset=UKNOWN"]},
body=b"\xc2\xa3")
self.assertEqual(r._declared_encoding(), None)
self._assert_response_values(r, 'utf-8', u"\xa3")
def test_utf16(self):
"""Test utf-16 because UnicodeDammit is known to have problems with"""
r = self.response_class("http://www.example.com",
body=b'\xff\xfeh\x00i\x00',
encoding='utf-16')
self._assert_response_values(r, 'utf-16', u"hi")
def test_invalid_utf8_encoded_body_with_valid_utf8_BOM(self):
r6 = self.response_class("http://www.example.com",
headers={"Content-type": ["text/html; charset=utf-8"]},
body=b"\xef\xbb\xbfWORD\xe3\xab")
self.assertEqual(r6.encoding, 'utf-8')
self.assertEqual(r6.body_as_unicode(), u'WORD\ufffd\ufffd')
def test_bom_is_removed_from_body(self):
# Inferring encoding from body also cache decoded body as sideeffect,
# this test tries to ensure that calling response.encoding and
# response.body_as_unicode() in indistint order doesn't affect final
# values for encoding and decoded body.
url = 'http://example.com'
body = b"\xef\xbb\xbfWORD"
headers = {"Content-type": ["text/html; charset=utf-8"]}
# Test response without content-type and BOM encoding
response = self.response_class(url, body=body)
self.assertEqual(response.encoding, 'utf-8')
self.assertEqual(response.body_as_unicode(), u'WORD')
response = self.response_class(url, body=body)
self.assertEqual(response.body_as_unicode(), u'WORD')
self.assertEqual(response.encoding, 'utf-8')
# Body caching sideeffect isn't triggered when encoding is declared in
# content-type header but BOM still need to be removed from decoded
# body
response = self.response_class(url, headers=headers, body=body)
self.assertEqual(response.encoding, 'utf-8')
self.assertEqual(response.body_as_unicode(), u'WORD')
response = self.response_class(url, headers=headers, body=body)
self.assertEqual(response.body_as_unicode(), u'WORD')
self.assertEqual(response.encoding, 'utf-8')
def test_replace_wrong_encoding(self):
"""Test invalid chars are replaced properly"""
r = self.response_class("http://www.example.com", encoding='utf-8', body=b'PREFIX\xe3\xabSUFFIX')
# XXX: Policy for replacing invalid chars may suffer minor variations
# but it should always contain the unicode replacement char (u'\ufffd')
assert u'\ufffd' in r.body_as_unicode(), repr(r.body_as_unicode())
assert u'PREFIX' in r.body_as_unicode(), repr(r.body_as_unicode())
assert u'SUFFIX' in r.body_as_unicode(), repr(r.body_as_unicode())
# Do not destroy html tags due to encoding bugs
r = self.response_class("http://example.com", encoding='utf-8', \
body=b'\xf0<span>value</span>')
assert u'<span>value</span>' in r.body_as_unicode(), repr(r.body_as_unicode())
# FIXME: This test should pass once we stop using BeautifulSoup's UnicodeDammit in TextResponse
#r = self.response_class("http://www.example.com", body='PREFIX\xe3\xabSUFFIX')
#assert u'\ufffd' in r.body_as_unicode(), repr(r.body_as_unicode())
def test_selector(self):
body = b"<html><head><title>Some page</title><body></body></html>"
response = self.response_class("http://www.example.com", body=body)
self.assertIsInstance(response.selector, Selector)
self.assertEqual(response.selector.type, 'html')
self.assertIs(response.selector, response.selector) # property is cached
self.assertIs(response.selector.response, response)
self.assertEqual(
response.selector.xpath("//title/text()").extract(),
[u'Some page']
)
self.assertEqual(
response.selector.css("title::text").extract(),
[u'Some page']
)
self.assertEqual(
response.selector.re("Some (.*)</title>"),
[u'page']
)
def test_selector_shortcuts(self):
body = b"<html><head><title>Some page</title><body></body></html>"
response = self.response_class("http://www.example.com", body=body)
self.assertEqual(
response.xpath("//title/text()").extract(),
response.selector.xpath("//title/text()").extract(),
)
self.assertEqual(
response.css("title::text").extract(),
response.selector.css("title::text").extract(),
)
def test_urljoin_with_base_url(self):
"""Test urljoin shortcut which also evaluates base-url through get_base_url()."""
body = b'<html><body><base href="https://example.net"></body></html>'
joined = self.response_class('http://www.example.com', body=body).urljoin('/test')
absolute = 'https://example.net/test'
self.assertEqual(joined, absolute)
body = b'<html><body><base href="/elsewhere"></body></html>'
joined = self.response_class('http://www.example.com', body=body).urljoin('test')
absolute = 'http://www.example.com/test'
self.assertEqual(joined, absolute)
body = b'<html><body><base href="/elsewhere/"></body></html>'
joined = self.response_class('http://www.example.com', body=body).urljoin('test')
absolute = 'http://www.example.com/elsewhere/test'
self.assertEqual(joined, absolute)
class HtmlResponseTest(TextResponseTest):
response_class = HtmlResponse
def test_html_encoding(self):
body = b"""<html><head><title>Some page</title><meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
</head><body>Price: \xa3100</body></html>'
"""
r1 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r1, 'iso-8859-1', body)
body = b"""<?xml version="1.0" encoding="iso-8859-1"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
Price: \xa3100
"""
r2 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r2, 'iso-8859-1', body)
# for conflicting declarations headers must take precedence
body = b"""<html><head><title>Some page</title><meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head><body>Price: \xa3100</body></html>'
"""
r3 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=iso-8859-1"]}, body=body)
self._assert_response_values(r3, 'iso-8859-1', body)
# make sure replace() preserves the encoding of the original response
body = b"New body \xa3"
r4 = r3.replace(body=body)
self._assert_response_values(r4, 'iso-8859-1', body)
def test_html5_meta_charset(self):
body = b"""<html><head><meta charset="gb2312" /><title>Some page</title><body>bla bla</body>"""
r1 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r1, 'gb2312', body)
class XmlResponseTest(TextResponseTest):
response_class = XmlResponse
def test_xml_encoding(self):
body = b"<xml></xml>"
r1 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r1, self.response_class._DEFAULT_ENCODING, body)
body = b"""<?xml version="1.0" encoding="iso-8859-1"?><xml></xml>"""
r2 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r2, 'iso-8859-1', body)
# make sure replace() preserves the explicit encoding passed in the constructor
body = b"""<?xml version="1.0" encoding="iso-8859-1"?><xml></xml>"""
r3 = self.response_class("http://www.example.com", body=body, encoding='utf-8')
body2 = b"New body"
r4 = r3.replace(body=body2)
self._assert_response_values(r4, 'utf-8', body2)
def test_replace_encoding(self):
# make sure replace() keeps the previous encoding unless overridden explicitly
body = b"""<?xml version="1.0" encoding="iso-8859-1"?><xml></xml>"""
body2 = b"""<?xml version="1.0" encoding="utf-8"?><xml></xml>"""
r5 = self.response_class("http://www.example.com", body=body)
r6 = r5.replace(body=body2)
r7 = r5.replace(body=body2, encoding='utf-8')
self._assert_response_values(r5, 'iso-8859-1', body)
self._assert_response_values(r6, 'iso-8859-1', body2)
self._assert_response_values(r7, 'utf-8', body2)
def test_selector(self):
body = b'<?xml version="1.0" encoding="utf-8"?><xml><elem>value</elem></xml>'
response = self.response_class("http://www.example.com", body=body)
self.assertIsInstance(response.selector, Selector)
self.assertEqual(response.selector.type, 'xml')
self.assertIs(response.selector, response.selector) # property is cached
self.assertIs(response.selector.response, response)
self.assertEqual(
response.selector.xpath("//elem/text()").extract(),
[u'value']
)
def test_selector_shortcuts(self):
body = b'<?xml version="1.0" encoding="utf-8"?><xml><elem>value</elem></xml>'
response = self.response_class("http://www.example.com", body=body)
self.assertEqual(
response.xpath("//elem/text()").extract(),
response.selector.xpath("//elem/text()").extract(),
)
| bsd-3-clause | 1,001,461,029,639,952,000 | 46.743276 | 137 | 0.629283 | false |
commtrack/commtrack-core | apps/maps/views.py | 1 | 2488 | import datetime
#import simplejson as json
from django.utils import simplejson as json
from django.db.models.query_utils import Q
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.http import HttpResponseRedirect, Http404
from django.core.exceptions import *
from domain.decorators import login_and_domain_required
from rapidsms.webui.utils import render_to_response, paginated
from facilities.models import Facility
from maps.utils import encode_myway
from maps.forms import FilterChoiceForm
from resources.models import Resource
@login_and_domain_required
def index(req):
facilities = Facility.objects.all().order_by('name')
if req.method == 'POST': # If the form has been submitted...
form = FilterChoiceForm(req.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
# saving the form data is not cleaned
#form.save()
start_date = form.cleaned_data["start_date"]
end_date = form.cleaned_data['end_date']
status = form.cleaned_data['resource_status']
resources = Resource.objects.filter(facility__in = facilities)
resources = resources.filter(status__in = status)
facilities = []
for resource in resources:
facilities.append(resource.facility)
else:
form = FilterChoiceForm() # An unbound form
return render_to_response(req,
'mapindex.html',
{
'facilities': facilities,
'form': form,
},
)
@login_and_domain_required
def map_resource(req,pk):
resource = get_object_or_404(Resource, pk=pk)
if resource:
# get a coordinate for the resource
# currently depends on the assigned facility
# TODO: allow resource to have independent coordinates
point = resource.facility
return render_to_response(req,
'generic_map.html',
{
'point': point,
'resource': resource,
}
) | bsd-3-clause | -7,265,024,699,764,267,000 | 37.292308 | 74 | 0.57717 | false |
emgreen33/easy_bake | pin_lights.py | 1 | 1041 | # import RPi.GPIO as gpio
# import time
# #use board numbering on the pi
# gpio.setmode(gpio.BOARD)
# # output_pins = [40, 38]
# output_pins = 16
# gpio.setup(output_pins, gpio.OUT)
# #true and 1 are the same
# # gpio.output(40, True)
# # gpio.output(38, 1)
# while True:
# gpio.output(output_pins, (True, False))
# # # gpio.output(40, True)
# # # gpio.output(38, False)
# time.sleep(1)
# # # gpio.output(40, False)
# # # gpio.output(38, True)
# gpio.output(output_pins, (False, True))
# time.sleep(1)
# gpio.cleanup()
import RPi.GPIO as gpio
import time
#use board numbering on the pi
gpio.setmode(gpio.BOARD)
output_pins = [40, 38, 10]
gpio.setup(output_pins, gpio.OUT)
#true and 1 are the same
# gpio.output(40, True)
# gpio.output(38, 1)
while True:
gpio.output(output_pins, (True, False, False))
# gpio.output(40, True)
# gpio.output(38, False)
time.sleep(2)
# gpio.output(40, False)
# gpio.output(38, True)
gpio.output(output_pins, (False, True, True))
time.sleep(2)
gpio.cleanup()
| mit | 2,288,275,546,881,519,000 | 16.948276 | 48 | 0.644573 | false |
dparshin/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/watchlist/amountchangedpattern.py | 134 | 3279 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class AmountChangedPattern:
def __init__(self, compile_regex, index_for_zero_value):
self._regex = compile_regex
self._index_for_zero_value = index_for_zero_value
def match(self, path, diff_file):
examined_strings = set()
for diff_line in diff_file:
if diff_line[self._index_for_zero_value]:
continue
match = self._regex.search(diff_line[2])
if not match:
continue
matching_string = match.group(0)
if matching_string in examined_strings:
continue
if self._instance_difference(diff_file, matching_string) > 0:
return True
# Avoid reprocessing this same string.
examined_strings.add(matching_string)
return False
def _instance_difference(self, diff_file, matching_string):
'''Returns the difference between the number of string occurences in
the added lines and deleted lines (which one is subtracted from the
other depends on _index_for_zero_value).'''
count = 0
for diff_line in diff_file:
# If the line is unchanged, then don't examine it.
if diff_line[self._index_for_zero_value] and diff_line[1 - self._index_for_zero_value]:
continue
location_found = -len(matching_string)
while True:
location_found = diff_line[2].find(matching_string, location_found + len(matching_string))
if location_found == -1:
break
if not diff_line[self._index_for_zero_value]:
count += 1
else:
count -= 1
return count
| bsd-3-clause | 1,042,983,532,930,626,700 | 45.842857 | 106 | 0.665752 | false |
TalShafir/ansible | lib/ansible/modules/network/cnos/cnos_image.py | 14 | 8925 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to download new image to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_image
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Perform firmware upgrade/download from a remote server on
devices running Lenovo CNOS
description:
- This module allows you to work with switch firmware images. It provides a
way to download a firmware image to a network device from a remote server
using FTP, SFTP, TFTP, or SCP. The first step is to create a directory
from where the remote server can be reached. The next step is to provide
the full file path of the image's location. Authentication details
required by the remote server must be provided as well. By default, this
method makes the newly downloaded firmware image the active image, which
will be used by the switch during the next restart.
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the
playbook is run.
version_added: "2.3"
extends_documentation_fragment: cnos
options:
protocol:
description:
- This refers to the protocol used by the network device to
interact with the remote server from where to download the
firmware image. The choices are FTP, SFTP, TFTP, or SCP. Any other
protocols will result in error. If this parameter is not specified
there is no default value to be used.
required: true
choices: [SFTP, SCP, FTP, TFTP]
serverip:
description:
- This specifies the IP Address of the remote server from where the
software image will be downloaded.
required: true
imgpath:
description:
- This specifies the full file path of the image located on the
remote server. In case the relative path is used as the variable
value, the root folder for the user of the server needs to be
specified.
required: true
imgtype:
description:
- This specifies the firmware image type to be downloaded
required: true
choices: [all, boot, os, onie]
serverusername:
description:
- Specify the username for the server relating to the protocol used
required: true
serverpassword:
description:
- Specify the password for the server relating to the protocol used
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_image. These are
written in the main.yml file of the tasks directory.
---
- name: Test Image transfer
cnos_image:
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_image_{{ inventory_hostname }}_output.txt"
protocol: "sftp"
serverip: "10.241.106.118"
imgpath: "/root/cnos_images/G8272-10.1.0.112.img"
imgtype: "os"
serverusername: "root"
serverpassword: "root123"
- name: Test Image tftp
cnos_image:
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_image_{{ inventory_hostname }}_output.txt"
protocol: "tftp"
serverip: "10.241.106.118"
imgpath: "/anil/G8272-10.2.0.34.img"
imgtype: "os"
serverusername: "root"
serverpassword: "root123"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Image file tranferred to device"
'''
import sys
import time
import socket
import array
import json
import time
import re
import os
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def doImageDownload(module, prompt, answer):
protocol = module.params['protocol'].lower()
server = module.params['serverip']
imgPath = module.params['imgpath']
imgType = module.params['imgtype']
username = module.params['serverusername']
password = module.params['serverpassword']
retVal = ''
command = "copy " + protocol + " " + protocol + "://" + username + "@"
command = command + server + "/" + imgPath + " system-image "
command = command + imgType + " vrf management"
cmd = []
if(protocol == "scp"):
prompt = ['timeout', 'Confirm download operation', 'Password',
'Do you want to change that to the standby image']
answer = ['240', 'y', password, 'y']
scp_cmd = [{'command': command, 'prompt': prompt, 'answer': answer,
'check_all': True}]
cmd.extend(scp_cmd)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
elif(protocol == "sftp"):
prompt = ['Confirm download operation', 'Password',
'Do you want to change that to the standby image']
answer = ['y', password, 'y']
sftp_cmd = [{'command': command, 'prompt': prompt, 'answer': answer,
'check_all': True}]
cmd.extend(sftp_cmd)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
elif(protocol == "ftp"):
prompt = ['Confirm download operation', 'Password',
'Do you want to change that to the standby image']
answer = ['y', password, 'y']
ftp_cmd = [{'command': command, 'prompt': prompt, 'answer': answer,
'check_all': True}]
cmd.extend(ftp_cmd)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
elif(protocol == "tftp"):
command = "copy " + protocol + " " + protocol + "://" + server
command = command + "/" + imgPath + " system-image " + imgType
command = command + " vrf management"
prompt = ['Confirm download operation',
'Do you want to change that to the standby image']
answer = ['y', 'y']
tftp_cmd = [{'command': command, 'prompt': prompt, 'answer': answer,
'check_all': True}]
cmd.extend(tftp_cmd)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
else:
return "Error-110"
return retVal
# EOM
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=False),
username=dict(required=False),
password=dict(required=False, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
protocol=dict(required=True),
serverip=dict(required=True),
imgpath=dict(required=True),
imgtype=dict(required=True),
serverusername=dict(required=False),
serverpassword=dict(required=False, no_log=True),),
supports_check_mode=False)
outputfile = module.params['outputfile']
protocol = module.params['protocol'].lower()
output = ''
# Invoke method for image transfer from server
if(protocol == "tftp" or protocol == "ftp" or protocol == "sftp" or
protocol == "scp"):
transfer_status = doImageDownload(module, None, None)
else:
transfer_status = "Invalid Protocol option"
output = output + "\n Image Transfer status \n" + transfer_status
# Save it into the file
path = outputfile.rsplit('/', 1)
if not os.path.exists(path[0]):
os.makedirs(path[0])
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Image file tranferred to device")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 | 5,461,305,516,231,691,000 | 36.1875 | 79 | 0.63619 | false |
AMOboxTV/AMOBox.LegoBuild | script.module.youtube.dl/lib/youtube_dl/extractor/camdemy.py | 124 | 5425 | # coding: utf-8
from __future__ import unicode_literals
import datetime
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urlparse,
)
from ..utils import (
parse_iso8601,
str_to_int,
)
class CamdemyIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?camdemy\.com/media/(?P<id>\d+)'
_TESTS = [{
# single file
'url': 'http://www.camdemy.com/media/5181/',
'md5': '5a5562b6a98b37873119102e052e311b',
'info_dict': {
'id': '5181',
'ext': 'mp4',
'title': 'Ch1-1 Introduction, Signals (02-23-2012)',
'thumbnail': 're:^https?://.*\.jpg$',
'description': '',
'creator': 'ss11spring',
'upload_date': '20130114',
'timestamp': 1358154556,
'view_count': int,
}
}, {
# With non-empty description
'url': 'http://www.camdemy.com/media/13885',
'md5': '4576a3bb2581f86c61044822adbd1249',
'info_dict': {
'id': '13885',
'ext': 'mp4',
'title': 'EverCam + Camdemy QuickStart',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'md5:050b62f71ed62928f8a35f1a41e186c9',
'creator': 'evercam',
'upload_date': '20140620',
'timestamp': 1403271569,
}
}, {
# External source
'url': 'http://www.camdemy.com/media/14842',
'md5': '50e1c3c3aa233d3d7b7daa2fa10b1cf7',
'info_dict': {
'id': '2vsYQzNIsJo',
'ext': 'mp4',
'upload_date': '20130211',
'uploader': 'Hun Kim',
'description': 'Excel 2013 Tutorial for Beginners - How to add Password Protection',
'uploader_id': 'hunkimtutorials',
'title': 'Excel 2013 Tutorial - How to add Password Protection',
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
page = self._download_webpage(url, video_id)
src_from = self._html_search_regex(
r"<div class='srcFrom'>Source: <a title='([^']+)'", page,
'external source', default=None)
if src_from:
return self.url_result(src_from)
oembed_obj = self._download_json(
'http://www.camdemy.com/oembed/?format=json&url=' + url, video_id)
thumb_url = oembed_obj['thumbnail_url']
video_folder = compat_urlparse.urljoin(thumb_url, 'video/')
file_list_doc = self._download_xml(
compat_urlparse.urljoin(video_folder, 'fileList.xml'),
video_id, 'Filelist XML')
file_name = file_list_doc.find('./video/item/fileName').text
video_url = compat_urlparse.urljoin(video_folder, file_name)
timestamp = parse_iso8601(self._html_search_regex(
r"<div class='title'>Posted\s*:</div>\s*<div class='value'>([^<>]+)<",
page, 'creation time', fatal=False),
delimiter=' ', timezone=datetime.timedelta(hours=8))
view_count = str_to_int(self._html_search_regex(
r"<div class='title'>Views\s*:</div>\s*<div class='value'>([^<>]+)<",
page, 'view count', fatal=False))
return {
'id': video_id,
'url': video_url,
'title': oembed_obj['title'],
'thumbnail': thumb_url,
'description': self._html_search_meta('description', page),
'creator': oembed_obj['author_name'],
'duration': oembed_obj['duration'],
'timestamp': timestamp,
'view_count': view_count,
}
class CamdemyFolderIE(InfoExtractor):
_VALID_URL = r'http://www.camdemy.com/folder/(?P<id>\d+)'
_TESTS = [{
# links with trailing slash
'url': 'http://www.camdemy.com/folder/450',
'info_dict': {
'id': '450',
'title': '信號與系統 2012 & 2011 (Signals and Systems)',
},
'playlist_mincount': 145
}, {
# links without trailing slash
# and multi-page
'url': 'http://www.camdemy.com/folder/853',
'info_dict': {
'id': '853',
'title': '科學計算 - 使用 Matlab'
},
'playlist_mincount': 20
}, {
# with displayMode parameter. For testing the codes to add parameters
'url': 'http://www.camdemy.com/folder/853/?displayMode=defaultOrderByOrg',
'info_dict': {
'id': '853',
'title': '科學計算 - 使用 Matlab'
},
'playlist_mincount': 20
}]
def _real_extract(self, url):
folder_id = self._match_id(url)
# Add displayMode=list so that all links are displayed in a single page
parsed_url = list(compat_urlparse.urlparse(url))
query = dict(compat_urlparse.parse_qsl(parsed_url[4]))
query.update({'displayMode': 'list'})
parsed_url[4] = compat_urllib_parse.urlencode(query)
final_url = compat_urlparse.urlunparse(parsed_url)
page = self._download_webpage(final_url, folder_id)
matches = re.findall(r"href='(/media/\d+/?)'", page)
entries = [self.url_result('http://www.camdemy.com' + media_path)
for media_path in matches]
folder_title = self._html_search_meta('keywords', page)
return self.playlist_result(entries, folder_id, folder_title)
| gpl-2.0 | -2,679,662,175,431,187,000 | 34.235294 | 96 | 0.543684 | false |
rtucker-mozilla/mozpackager | vendor-local/lib/python/jinja2/debug.py | 112 | 11028 | # -*- coding: utf-8 -*-
"""
jinja2.debug
~~~~~~~~~~~~
Implements the debug interface for Jinja. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import traceback
from types import TracebackType
from jinja2.utils import CodeType, missing, internal_code
from jinja2.exceptions import TemplateSyntaxError
# on pypy we can take advantage of transparent proxies
try:
from __pypy__ import tproxy
except ImportError:
tproxy = None
# how does the raise helper look like?
try:
exec "raise TypeError, 'foo'"
except SyntaxError:
raise_helper = 'raise __jinja_exception__[1]'
except TypeError:
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
class TracebackFrameProxy(object):
"""Proxies a traceback frame."""
def __init__(self, tb):
self.tb = tb
self._tb_next = None
@property
def tb_next(self):
return self._tb_next
def set_next(self, next):
if tb_set_next is not None:
try:
tb_set_next(self.tb, next and next.tb or None)
except Exception:
# this function can fail due to all the hackery it does
# on various python implementations. We just catch errors
# down and ignore them if necessary.
pass
self._tb_next = next
@property
def is_jinja_frame(self):
return '__jinja_template__' in self.tb.tb_frame.f_globals
def __getattr__(self, name):
return getattr(self.tb, name)
def make_frame_proxy(frame):
proxy = TracebackFrameProxy(frame)
if tproxy is None:
return proxy
def operation_handler(operation, *args, **kwargs):
if operation in ('__getattribute__', '__getattr__'):
return getattr(proxy, args[0])
elif operation == '__setattr__':
proxy.__setattr__(*args, **kwargs)
else:
return getattr(proxy, operation)(*args, **kwargs)
return tproxy(TracebackType, operation_handler)
class ProcessedTraceback(object):
"""Holds a Jinja preprocessed traceback for priting or reraising."""
def __init__(self, exc_type, exc_value, frames):
assert frames, 'no frames for this traceback?'
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
# newly concatenate the frames (which are proxies)
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.set_next(tb)
prev_tb = tb
prev_tb.set_next(None)
def render_as_text(self, limit=None):
"""Return a string with the traceback."""
lines = traceback.format_exception(self.exc_type, self.exc_value,
self.frames[0], limit=limit)
return ''.join(lines).rstrip()
def render_as_html(self, full=False):
"""Return a unicode string with the traceback as rendered HTML."""
from jinja2.debugrenderer import render_traceback
return u'%s\n\n<!--\n%s\n-->' % (
render_traceback(self, full=full),
self.render_as_text().decode('utf-8', 'replace')
)
@property
def is_template_syntax_error(self):
"""`True` if this is a template syntax error."""
return isinstance(self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
"""Exception info tuple with a proxy around the frame objects."""
return self.exc_type, self.exc_value, self.frames[0]
@property
def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
tb = self.frames[0]
# the frame will be an actual traceback (or transparent proxy) if
# we are on pypy or a python implementation with support for tproxy
if type(tb) is not TracebackType:
tb = tb.tb
return self.exc_type, self.exc_value, tb
def make_traceback(exc_info, source_hint=None):
"""Creates a processed traceback object from the exc_info."""
exc_type, exc_value, tb = exc_info
if isinstance(exc_value, TemplateSyntaxError):
exc_info = translate_syntax_error(exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return translate_exception(exc_info, initial_skip)
def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno)
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in xrange(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(make_frame_proxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to
# reraise it unchanged.
# XXX: can we backup here? when could this happen?
if not frames:
raise exc_info[0], exc_info[1], exc_info[2]
return ProcessedTraceback(exc_info[0], exc_info[1], frames)
def fake_exc_info(exc_info, filename, lineno):
"""Helper for `translate_exception`."""
exc_type, exc_value, tb = exc_info
# figure the real context out
if tb is not None:
real_locals = tb.tb_frame.f_locals.copy()
ctx = real_locals.get('context')
if ctx:
locals = ctx.get_all()
else:
locals = {}
for name, value in real_locals.iteritems():
if name.startswith('l_') and value is not missing:
locals[name[2:]] = value
# if there is a local called __jinja_exception__, we get
# rid of it to not break the debug functionality.
locals.pop('__jinja_exception__', None)
else:
locals = {}
# assamble fake globals we need
globals = {
'__name__': filename,
'__file__': filename,
'__jinja_exception__': exc_info[:2],
# we don't want to keep the reference to the template around
# to not cause circular dependencies, but we mark it as Jinja
# frame for the ProcessedTraceback
'__jinja_template__': None
}
# and fake the exception
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
# if it's possible, change the name of the code. This won't work
# on some python environments such as google appengine
try:
if tb is None:
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if function == 'root':
location = 'top-level template code'
elif function.startswith('block_'):
location = 'block "%s"' % function[6:]
else:
location = 'template'
code = CodeType(0, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
except:
pass
# execute the code and catch the new traceback
try:
exec code in globals, locals
except:
exc_info = sys.exc_info()
new_tb = exc_info[2].tb_next
# return without this frame
return exc_info[:2] + (new_tb,)
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object. Do not attempt to use this on non cpython
interpreters
"""
import ctypes
from types import TracebackType
# figure out side of _Py_ssize_t
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
_Py_ssize_t = ctypes.c_int64
else:
_Py_ssize_t = ctypes.c_int
# regular python
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
# python with trace
if hasattr(sys, 'getobjects'):
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('_ob_next', ctypes.POINTER(_PyObject)),
('_ob_prev', ctypes.POINTER(_PyObject)),
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
class _Traceback(_PyObject):
pass
_Traceback._fields_ = [
('tb_next', ctypes.POINTER(_Traceback)),
('tb_frame', ctypes.POINTER(_PyObject)),
('tb_lasti', ctypes.c_int),
('tb_lineno', ctypes.c_int)
]
def tb_set_next(tb, next):
"""Set the tb_next attribute of a traceback object."""
if not (isinstance(tb, TracebackType) and
(next is None or isinstance(next, TracebackType))):
raise TypeError('tb_set_next arguments must be traceback objects')
obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
old = _Traceback.from_address(id(tb.tb_next))
old.ob_refcnt -= 1
if next is None:
obj.tb_next = ctypes.POINTER(_Traceback)()
else:
next = _Traceback.from_address(id(next))
next.ob_refcnt += 1
obj.tb_next = ctypes.pointer(next)
return tb_set_next
# try to get a tb_set_next implementation if we don't have transparent
# proxies.
tb_set_next = None
if tproxy is None:
try:
from jinja2._debugsupport import tb_set_next
except ImportError:
try:
tb_set_next = _init_ugly_crap()
except:
pass
del _init_ugly_crap
| bsd-3-clause | 4,639,038,660,091,515,000 | 31.530973 | 78 | 0.593036 | false |
IndonesiaX/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/test_split_w_old_mongo.py | 46 | 6683 | import datetime
import random
import unittest
import uuid
from nose.plugins.attrib import attr
import mock
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
from xmodule.modulestore import ModuleStoreEnum
from xmodule.x_module import XModuleMixin
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore.mongo import DraftMongoModuleStore
from xmodule.modulestore.split_mongo.split import SplitMongoModuleStore
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from xmodule.modulestore.tests.utils import MemoryCache
@attr('mongo')
class SplitWMongoCourseBootstrapper(unittest.TestCase):
"""
Helper for tests which need to construct split mongo & old mongo based courses to get interesting internal structure.
Override _create_course and after invoking the super() _create_course, have it call _create_item for
each xblock you want in the course.
This class ensures the db gets created, opened, and cleaned up in addition to creating the course
Defines the following attrs on self:
* user_id: a random non-registered mock user id
* split_mongo: a pointer to the split mongo instance
* draft_mongo: a pointer to the old draft instance
* split_course_key (CourseLocator): of the new course
* old_course_key: the SlashSpecifiedCourseKey for the course
"""
# Snippet of what would be in the django settings envs file
db_config = {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'db': 'test_xmodule',
}
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': '',
'render_template': mock.Mock(return_value=""),
'xblock_mixins': (InheritanceMixin, XModuleMixin)
}
split_course_key = CourseLocator('test_org', 'test_course', 'runid', branch=ModuleStoreEnum.BranchName.draft)
def setUp(self):
self.db_config['collection'] = 'modulestore{0}'.format(uuid.uuid4().hex[:5])
self.user_id = random.getrandbits(32)
super(SplitWMongoCourseBootstrapper, self).setUp()
self.split_mongo = SplitMongoModuleStore(
None,
self.db_config,
**self.modulestore_options
)
self.addCleanup(self.split_mongo.db.connection.close)
self.addCleanup(self.tear_down_split)
self.draft_mongo = DraftMongoModuleStore(
None, self.db_config, branch_setting_func=lambda: ModuleStoreEnum.Branch.draft_preferred,
metadata_inheritance_cache_subsystem=MemoryCache(),
**self.modulestore_options
)
self.addCleanup(self.tear_down_mongo)
self.old_course_key = None
self.runtime = None
self._create_course()
def tear_down_split(self):
"""
Remove the test collections, close the db connection
"""
split_db = self.split_mongo.db
split_db.drop_collection(split_db.course_index.proxied_object)
split_db.drop_collection(split_db.structures.proxied_object)
split_db.drop_collection(split_db.definitions.proxied_object)
def tear_down_mongo(self):
"""
Remove the test collections, close the db connection
"""
split_db = self.split_mongo.db
# old_mongo doesn't give a db attr, but all of the dbs are the same
split_db.drop_collection(self.draft_mongo.collection.proxied_object)
def _create_item(self, category, name, data, metadata, parent_category, parent_name, draft=True, split=True):
"""
Create the item of the given category and block id in split and old mongo, add it to the optional
parent. The parent category is only needed because old mongo requires it for the id.
Note: if draft = False, it will create the draft and then publish it; so, it will overwrite any
existing draft for both the new item and the parent
"""
location = self.old_course_key.make_usage_key(category, name)
self.draft_mongo.create_item(
self.user_id,
location.course_key,
location.block_type,
block_id=location.block_id,
definition_data=data,
metadata=metadata,
runtime=self.runtime
)
if not draft:
self.draft_mongo.publish(location, self.user_id)
if isinstance(data, basestring):
fields = {'data': data}
else:
fields = data.copy()
fields.update(metadata)
if parent_name:
# add child to parent in mongo
parent_location = self.old_course_key.make_usage_key(parent_category, parent_name)
parent = self.draft_mongo.get_item(parent_location)
parent.children.append(location)
self.draft_mongo.update_item(parent, self.user_id)
if not draft:
self.draft_mongo.publish(parent_location, self.user_id)
# create child for split
if split:
self.split_mongo.create_child(
self.user_id,
BlockUsageLocator(
course_key=self.split_course_key,
block_type=parent_category,
block_id=parent_name
),
category,
block_id=name,
fields=fields
)
else:
if split:
self.split_mongo.create_item(
self.user_id,
self.split_course_key,
category,
block_id=name,
fields=fields
)
def _create_course(self, split=True):
"""
* some detached items
* some attached children
* some orphans
"""
metadata = {
'start': datetime.datetime(2000, 3, 13, 4),
'display_name': 'Migration test course',
}
data = {
'wiki_slug': 'test_course_slug'
}
fields = metadata.copy()
fields.update(data)
if split:
# split requires the course to be created separately from creating items
self.split_mongo.create_course(
self.split_course_key.org, self.split_course_key.course, self.split_course_key.run, self.user_id, fields=fields, root_block_id='runid'
)
old_course = self.draft_mongo.create_course(self.split_course_key.org, 'test_course', 'runid', self.user_id, fields=fields)
self.old_course_key = old_course.id
self.runtime = old_course.runtime
| agpl-3.0 | 7,442,936,480,802,987,000 | 39.017964 | 150 | 0.619482 | false |
napkindrawing/ansible | lib/ansible/modules/system/openwrt_init.py | 58 | 6912 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Andrew Gaffney <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: openwrt_init
author:
- "Andrew Gaffney (@agaffney)"
version_added: "2.3"
short_description: Manage services on OpenWrt.
description:
- Controls OpenWrt services on remote hosts.
options:
name:
required: true
description:
- Name of the service.
aliases: ['service']
state:
required: false
default: null
choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
C(restarted) will always bounce the service. C(reloaded) will always reload.
enabled:
required: false
choices: [ "yes", "no" ]
default: null
description:
- Whether the service should start on boot. B(At least one of state and enabled are required.)
pattern:
required: false
description:
- If the service does not respond to the 'running' command, name a
substring to look for as would be found in the output of the I(ps)
command as a stand-in for a 'running' result. If the string is found,
the service will be assumed to be running.
notes:
- One option other than name is required.
requirements:
- An OpenWrt system
'''
EXAMPLES = '''
# Example action to start service httpd, if not running
- openwrt_init:
state: started
name: httpd
# Example action to stop service cron, if running
- openwrt_init:
name: cron
state: stopped
# Example action to reload service httpd, in all cases
- openwrt_init:
name: httpd
state: reloaded
# Example action to enable service httpd
- openwrt_init:
name: httpd
enabled: yes
'''
RETURN = '''
'''
import os
import glob
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
module = None
init_script = None
# ===============================
# Check if service is enabled
def is_enabled():
(rc, out, err) = module.run_command("%s enabled" % init_script)
if rc == 0:
return True
return False
# ===========================================
# Main control flow
def main():
global module, init_script
# init
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, type='str', aliases=['service']),
state = dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'),
enabled = dict(type='bool'),
pattern = dict(required=False, default=None),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled']],
)
# initialize
service = module.params['name']
init_script = '/etc/init.d/' + service
rc = 0
out = err = ''
result = {
'name': service,
'changed': False,
}
# check if service exists
if not os.path.exists(init_script):
module.fail_json(msg='service %s does not exist' % service)
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
# do we need to enable the service?
enabled = is_enabled()
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s" % (init_script, action))
# openwrt init scripts can return a non-zero exit code on a successful 'enable'
# command if the init script doesn't contain a STOP value, so we ignore the exit
# code and explicitly check if the service is now in the desired state
if is_enabled() != module.params['enabled']:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
result['enabled'] = not enabled
if module.params['state'] is not None:
running = False
# check if service is currently running
if module.params['pattern']:
# Find ps binary
psbin = module.get_bin_path('ps', True)
# this should be busybox ps, so we only want/need to the 'w' option
(rc, psout, pserr) = module.run_command('%s w' % psbin)
# If rc is 0, set running as appropriate
if rc == 0:
lines = psout.split("\n")
for line in lines:
if module.params['pattern'] in line and not "pattern=" in line:
# so as to not confuse ./hacking/test-module
running = True
break
else:
(rc, out, err) = module.run_command("%s running" % init_script)
if rc == 0:
running = True
# default to desired state
result['state'] = module.params['state']
# determine action, if any
action = None
if module.params['state'] == 'started':
if not running:
action = 'start'
result['changed'] = True
elif module.params['state'] == 'stopped':
if running:
action = 'stop'
result['changed'] = True
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
result['changed'] = True
if action:
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s" % (init_script, action))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 2,623,725,772,899,651,600 | 31.299065 | 106 | 0.576823 | false |
lulandco/SickRage | lib/mako/_ast_util.py | 39 | 25690 | # mako/_ast_util.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import * # noqa
from mako.compat import arg_stringname
BOOLOP_SYMBOLS = {
And: 'and',
Or: 'or'
}
BINOP_SYMBOLS = {
Add: '+',
Sub: '-',
Mult: '*',
Div: '/',
FloorDiv: '//',
Mod: '%',
LShift: '<<',
RShift: '>>',
BitOr: '|',
BitAnd: '&',
BitXor: '^'
}
CMPOP_SYMBOLS = {
Eq: '==',
Gt: '>',
GtE: '>=',
In: 'in',
Is: 'is',
IsNot: 'is not',
Lt: '<',
LtE: '<=',
NotEq: '!=',
NotIn: 'not in'
}
UNARYOP_SYMBOLS = {
Invert: '~',
Not: 'not',
UAdd: '+',
USub: '-'
}
ALL_SYMBOLS = {}
ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
ALL_SYMBOLS.update(BINOP_SYMBOLS)
ALL_SYMBOLS.update(CMPOP_SYMBOLS)
ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
def parse(expr, filename='<unknown>', mode='exec'):
"""Parse an expression into an AST node."""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def to_source(node, indent_with=' ' * 4):
"""
This function can convert a node tree back into python sourcecode. This
is useful for debugging purposes, especially if you're dealing with custom
asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
"""
generator = SourceGenerator(indent_with)
generator.visit(node)
return ''.join(generator.result)
def dump(node):
"""
A very verbose representation of the node passed. This is useful for
debugging purposes.
"""
def _format(node):
if isinstance(node, AST):
return '%s(%s)' % (node.__class__.__name__,
', '.join('%s=%s' % (a, _format(b))
for a, b in iter_fields(node)))
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy the source location hint (`lineno` and `col_offset`) from the
old to the new node if possible and return the new one.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
Some nodes require a line number and the column offset. Without that
information the compiler will abort the compilation. Because it can be
a dull task to add appropriate line numbers and column offsets when
adding new nodes this function can help. It copies the line number and
column offset of the parent node to the child nodes without this
information.
Unlike `copy_location` this works recursive and won't touch nodes that
already have a location information.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line numbers of all nodes by `n` if they have line number
attributes. This is useful to "move code" to a different location in a
file.
"""
for node in zip((node,), walk(node)):
if 'lineno' in node._attributes:
node.lineno = getattr(node, 'lineno', 0) + n
def iter_fields(node):
"""Iterate over all fields of a node, only yielding existing fields."""
# CPython 2.5 compat
if not hasattr(node, '_fields') or not node._fields:
return
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def get_fields(node):
"""Like `iter_fiels` but returns a dict."""
return dict(iter_fields(node))
def iter_child_nodes(node):
"""Iterate over all child nodes or a node."""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_child_nodes(node):
"""Like `iter_child_nodes` but returns a list."""
return list(iter_child_nodes(node))
def get_compile_mode(node):
"""
Get the mode for `compile` of a given node. If the node is not a `mod`
node (`Expression`, `Module` etc.) a `TypeError` is thrown.
"""
if not isinstance(node, mod):
raise TypeError('expected mod node, got %r' % node.__class__.__name__)
return {
Expression: 'eval',
Interactive: 'single'
}.get(node.__class__, 'expr')
def get_docstring(node):
"""
Return the docstring for the given node or `None` if no docstring can be
found. If the node provided does not accept docstrings a `TypeError`
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Str):
return node.body[0].s
def walk(node):
"""
Iterate over all nodes. This is useful if you only want to modify nodes in
place and don't care about the context or the order the nodes are returned.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
Walks the abstract syntax tree and call visitor functions for every node
found. The visitor functions may return values which will be forwarded
by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def get_visitor(self, node):
"""
Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node)
return self.generic_visit(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
Here an example transformer that rewrites all `foo` to `data['foo']`::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes
you must either transform the child nodes yourself or call the generic
visit function for the node first.
Nodes that were part of a collection of statements (that applies to
all statement nodes) may also return a list of nodes rather than just
a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
class SourceGenerator(NodeVisitor):
"""
This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with):
self.result = []
self.indent_with = indent_with
self.indentation = 0
self.new_lines = 0
def write(self, x):
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, n=1):
self.new_lines = max(self.new_lines, n)
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + arg_stringname(node.vararg))
if node.kwarg is not None:
write_comma()
self.write('**' + arg_stringname(node.kwarg))
def decorators(self, node):
for decorator in node.decorator_list:
self.newline()
self.write('@')
self.visit(decorator)
# Statements
def visit_Assign(self, node):
self.newline()
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline()
self.visit(node.target)
self.write(BINOP_SYMBOLS[type(node.op)] + '=')
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline()
self.write('from %s%s import ' % ('.' * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(', ')
self.write(item)
def visit_Import(self, node):
self.newline()
for item in node.names:
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
self.newline()
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(n=2)
self.decorators(node)
self.newline()
self.write('def %s(' % node.name)
self.signature(node.args)
self.write('):')
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(n=3)
self.decorators(node)
self.newline()
self.write('class %s' % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if getattr(node, "starargs", None):
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if getattr(node, "kwargs", None):
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_If(self, node):
self.newline()
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
self.newline()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
self.newline()
self.write('else:')
self.body(else_)
break
def visit_For(self, node):
self.newline()
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline()
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_With(self, node):
self.newline()
self.write('with ')
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Pass(self, node):
self.newline()
self.write('pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline()
self.write('print ')
want_comma = False
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_Delete(self, node):
self.newline()
self.write('del ')
for idx, target in enumerate(node):
if idx:
self.write(', ')
self.visit(target)
def visit_TryExcept(self, node):
self.newline()
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline()
self.write('try:')
self.body(node.body)
self.newline()
self.write('finally:')
self.body(node.finalbody)
def visit_Global(self, node):
self.newline()
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline()
self.write('nonlocal ' + ', '.join(node.names))
def visit_Return(self, node):
self.newline()
self.write('return ')
self.visit(node.value)
def visit_Break(self, node):
self.newline()
self.write('break')
def visit_Continue(self, node):
self.newline()
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline()
self.write('raise')
if hasattr(node, 'exc') and node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if getattr(node, "starargs", None):
write_comma()
self.write('*')
self.visit(node.starargs)
if getattr(node, "kwargs", None):
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_NameConstant(self, node):
self.write(str(node.value))
def visit_arg(self, node):
self.write(node.arg)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
visit_Set = sequence_visit('{', '}')
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.write('(')
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
self.write(')')
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.write('(')
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s ' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(')')
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.write('yield ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.signature(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('Ellipsis')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_excepthandler(self, node):
self.newline()
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
self.visit(node.name)
self.write(':')
self.body(node.body)
| gpl-3.0 | -8,453,796,403,953,522,000 | 29.188014 | 79 | 0.555313 | false |
way2heavy/youtube-dl-1 | youtube_dl/extractor/jpopsukitv.py | 170 | 2584 | # coding=utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
)
class JpopsukiIE(InfoExtractor):
IE_NAME = 'jpopsuki.tv'
_VALID_URL = r'https?://(?:www\.)?jpopsuki\.tv/(?:category/)?video/[^/]+/(?P<id>\S+)'
_TEST = {
'url': 'http://www.jpopsuki.tv/video/ayumi-hamasaki---evolution/00be659d23b0b40508169cdee4545771',
'md5': '88018c0c1a9b1387940e90ec9e7e198e',
'info_dict': {
'id': '00be659d23b0b40508169cdee4545771',
'ext': 'mp4',
'title': 'ayumi hamasaki - evolution',
'description': 'Release date: 2001.01.31\r\n浜崎あゆみ - evolution',
'thumbnail': 'http://www.jpopsuki.tv/cache/89722c74d2a2ebe58bcac65321c115b2.jpg',
'uploader': 'plama_chan',
'uploader_id': '404',
'upload_date': '20121101'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = 'http://www.jpopsuki.tv' + self._html_search_regex(
r'<source src="(.*?)" type', webpage, 'video url')
video_title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
uploader = self._html_search_regex(
r'<li>from: <a href="/user/view/user/(.*?)/uid/',
webpage, 'video uploader', fatal=False)
uploader_id = self._html_search_regex(
r'<li>from: <a href="/user/view/user/\S*?/uid/(\d*)',
webpage, 'video uploader_id', fatal=False)
upload_date = unified_strdate(self._html_search_regex(
r'<li>uploaded: (.*?)</li>', webpage, 'video upload_date',
fatal=False))
view_count_str = self._html_search_regex(
r'<li>Hits: ([0-9]+?)</li>', webpage, 'video view_count',
fatal=False)
comment_count_str = self._html_search_regex(
r'<h2>([0-9]+?) comments</h2>', webpage, 'video comment_count',
fatal=False)
return {
'id': video_id,
'url': video_url,
'title': video_title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'uploader_id': uploader_id,
'upload_date': upload_date,
'view_count': int_or_none(view_count_str),
'comment_count': int_or_none(comment_count_str),
}
| unlicense | 4,610,952,722,750,756,000 | 36.852941 | 106 | 0.556721 | false |
ashang/calibre | src/calibre/utils/opensearch/__init__.py | 24 | 1335 | '''
Based on the OpenSearch Python module by Ed Summers <[email protected]> from
https://github.com/edsu/opensearch .
This module is heavily modified and does not implement all the features from
the original. The ability for the the module to perform a search and retrieve
search results has been removed. The original module used a modified version
of the Universal feed parser from http://feedparser.org/ . The use of
FeedPaser made getting search results very slow. There is also a bug in the
modified FeedParser that causes the system to run out of file descriptors.
Instead of fixing the modified feed parser it was decided to remove it and
manually parse the feeds in a set of type specific classes. This is much
faster and as we know in advance the feed format is simpler than using
FeedParser. Also, replacing the modified FeedParser with the newest version
of FeedParser caused some feeds to be parsed incorrectly and result in a loss
of data.
The module was also rewritten to use lxml instead of MiniDom.
Usage:
description = Description(open_search_url)
url_template = description.get_best_template()
if not url_template:
return
query = Query(url_template)
# set up initial values.
query.searchTerms = search_terms
# Note the count is ignored by some feeds.
query.count = max_results
search_url = oquery.url()
'''
| gpl-3.0 | -7,356,611,286,848,842,000 | 35.081081 | 77 | 0.785019 | false |
asreimer/davitpy_asr | gme/ind/dst.py | 3 | 10491 | # Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
.. module:: dst
:synopsis: A module for reading, writing, and storing dst Data
.. moduleauthor:: AJ, 20130131
*********************
**Module**: gme.ind.dst
*********************
**Classes**:
* :class:`gme.ind.dst.dstRec`
**Functions**:
* :func:`gme.ind.dst.readDst`
* :func:`gme.ind.dst.readDstWeb`
* :func:`gme.ind.dst.mapDstMongo`
"""
import gme
class dstRec(gme.base.gmeBase.gmeData):
"""a class to represent a record of dst data. Extends :class:`gme.base.gmeBase.gmeData`. Note that Dst data is available from 1980-present day (or whatever the latest WDC has uploaded is). **The data are 1-hour values**. Information about dst can be found `here <http://wdc.kugi.kyoto-u.ac.jp/dstdir/dst2/onDstindex.html>`_
**Members**:
* **time** (`datetime <http://tinyurl.com/bl352yx>`_): an object identifying which time these data are for
* **dataSet** (str): a string dicating the dataset this is from
* **info** (str): information about where the data come from. *Please be courteous and give credit to data providers when credit is due.*
* **dst** (float): the actual dst value
.. note::
If any of the members have a value of None, this means that they could not be read for that specific time
**Methods**:
* :func:`parseWeb`
**Example**:
::
emptyDstObj = gme.ind.dstRec()
written by AJ, 20130131
"""
def parseWeb(self,line):
"""This method is used to convert a line of dst data from the WDC to a dstRec object
.. note::
In general, users will not need to worry about this.
**Belongs to**: :class:`gme.ind.dst.dstRec`
**Args**:
* **line** (str): the ASCII line from the WDC data file
**Returns**:
* Nothing.
**Example**:
::
myDstObj.parseWeb(webLine)
written by AJ, 20130131
"""
import datetime as dt
cols = line.split()
self.time = dt.datetime(int(cols[0][0:4]),int(cols[0][5:7]),int(cols[0][8:10]), \
int(cols[1][0:2]),int(cols[1][3:5]),int(cols[1][6:8]))
if(float(cols[3]) != 99999.0): self.dst = float(cols[3])
def __init__(self, webLine=None, dbDict=None):
"""the intialization fucntion for a :class:`gme.ind.dst.dstRec` object.
.. note::
In general, users will not need to worry about this.
**Belongs to**: :class:`gme.ind.dst.dstRec`
**Args**:
* [**webLine**] (str): an ASCII line from the datafile from WDC. if this is provided, the object is initialized from it. default=None
* [**dbDict**] (dict): a dictionary read from the mongodb. if this is provided, the object is initialized from it. default = None
**Returns**:
* Nothing.
**Example**:
::
myDstObj = dstRec(webLine=awebLine)
written by AJ, 20130131
"""
#note about where data came from
self.dataSet = 'Dst'
self.time = None
self.info = 'These data were downloaded from WDC For Geomagnetism, Kyoto. *Please be courteous and give credit to data providers when credit is due.*'
self.dst = None
#if we're initializing from an object, do it!
if(webLine != None): self.parseWeb(webLine)
if(dbDict != None): self.parseDb(dbDict)
def readDst(sTime=None,eTime=None,dst=None):
"""This function reads dst data from the mongodb.
**Args**:
* [**sTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the earliest time you want data for, default=None
* [**eTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the latest time you want data for. if this is None, end Time will be 1 day after sTime. default = None
* [**dst**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with dst values in the range [a,b] will be returned. default = None
**Returns**:
* **dstList** (list or None): if data is found, a list of :class:`gme.ind.dst.dstRec` objects matching the input parameters is returned. If no data is found, None is returned.
**Example**:
::
import datetime as dt
dstList = gme.ind.readDst(sTime=dt.datetime(2011,1,1),eTime=dt.datetime(2011,6,1),dst=[-50,50])
written by AJ, 20130131
"""
import datetime as dt
import pydarn.sdio.dbUtils as db
#check all the inputs for validity
assert(sTime == None or isinstance(sTime,dt.datetime)), \
'error, sTime must be a datetime object'
assert(eTime == None or isinstance(eTime,dt.datetime)), \
'error, eTime must be either None or a datetime object'
assert(dst == None or (isinstance(dst,list) and \
isinstance(dst[0],(int,float)) and isinstance(dst[1],(int,float)))), \
'error,dst must None or a list of 2 numbers'
if(eTime == None and sTime != None): eTime = sTime+dt.timedelta(days=1)
qryList = []
#if arguments are provided, query for those
if(sTime != None): qryList.append({'time':{'$gte':sTime}})
if(eTime != None): qryList.append({'time':{'$lte':eTime}})
if(dst != None):
qryList.append({'dst':{'$gte':min(dst)}})
qryList.append({'dst':{'$lte':max(dst)}})
#construct the final query definition
qryDict = {'$and': qryList}
#connect to the database
dstData = db.getDataConn(dbName='gme',collName='dst')
#do the query
if(qryList != []): qry = dstData.find(qryDict)
else: qry = dstData.find()
if(qry.count() > 0):
dstList = []
for rec in qry.sort('time'):
dstList.append(dstRec(dbDict=rec))
print '\nreturning a list with',len(dstList),'records of dst data'
return dstList
#if we didn't find anything on the mongodb
else:
print '\ncould not find requested data in the mongodb'
return None
def readDstWeb(sTime,eTime=None):
"""This function reads dst data from the WDC kyoto website
.. warning::
You should not use this. Use the general function :func:`readDst` instead.
**Args**:
* **sTime** (`datetime <http://tinyurl.com/bl352yx>`_): the earliest time you want data for
* [**eTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the latest time you want data for. if this is None, eTime will be equal to sTime. default = None
**Example**:
::
import datetime as dt
dstList = gme.ind.readDstWeb(dt.datetime(2011,1,1,1,50),eTime=dt.datetime(2011,1,1,10,0))
written by AJ, 20130131
"""
import datetime as dt
import mechanize
assert(isinstance(sTime,dt.datetime)),'error, sTime must be a datetime object'
if(eTime == None): eTime = sTime
assert(isinstance(eTime,dt.datetime)),'error, eTime must be a datetime object'
assert(eTime >= sTime), 'error, eTime < eTime'
sCent = sTime.year/100
sTens = (sTime.year - sCent*100)/10
sYear = sTime.year-sCent*100-sTens*10
sMonth = sTime.strftime("%m")
eCent = eTime.year/100
eTens = (eTime.year - eCent*100)/10
eYear = eTime.year-eCent*100-eTens*10
eMonth = eTime.strftime("%m")
br = mechanize.Browser()
br.set_handle_robots(False) # no robots
br.set_handle_refresh(False) # can sometimes hang without this
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
br.open('http://wdc.kugi.kyoto-u.ac.jp/dstae/index.html')
br.form = list(br.forms())[0]
#fill out the page fields
br.form.find_control('SCent').value = [str(sCent)]
br.form.find_control('STens').value = [str(sTens)]
br.form.find_control('SYear').value = [str(sYear)]
br.form.find_control('SMonth').value = [sMonth]
br.form.find_control('ECent').value = [str(eCent)]
br.form.find_control('ETens').value = [str(eTens)]
br.form.find_control('EYear').value = [str(eYear)]
br.form.find_control('EMonth').value = [eMonth]
br.form.find_control('Output').value = ['DST']
br.form.find_control('Out format').value = ['IAGA2002']
br.form.find_control('Email').value = "[email protected]"
response = br.submit()
#get the data
lines = response.readlines()
dstList = []
for l in lines:
#check for headers
if(l[0] == ' ' or l[0:4] == 'DATE'): continue
cols=l.split()
try: dstList.append(dstRec(webLine=l))
except Exception,e:
print e
print 'problemm assigning initializing dst object'
if(dstList != []): return dstList
else: return None
def mapDstMongo(sYear,eYear=None):
"""This function reads dst data from wdc and puts it in mongodb
.. warning::
In general, nobody except the database admins will need to use this function
**Args**:
* **sYear** (int): the year to begin mapping data
* [**eYear**] (int or None): the end year for mapping data. if this is None, eYear will be sYear
**Returns**:
* Nothing.
**Example**:
::
gme.ind.mapDstMongo(1997)
written by AJ, 20130123
"""
import pydarn.sdio.dbUtils as db
import os, datetime as dt
#check inputs
assert(isinstance(sYear,int)),'error, sYear must be int'
if(eYear == None): eYear=sYear
assert(isinstance(eYear,int)),'error, sYear must be None or int'
assert(eYear >= sYear), 'error, end year greater than start year'
#get data connection
mongoData = db.getDataConn(username=os.environ['DBWRITEUSER'],password=os.environ['DBWRITEPASS'],\
dbAddress=os.environ['SDDB'],dbName='gme',collName='dst')
#set up all of the indices
mongoData.ensure_index('time')
mongoData.ensure_index('dst')
for yr in range(sYear,eYear+1):
#1 year at a time, to not fill up RAM
templist = readDstWeb(dt.datetime(yr,1,1),dt.datetime(yr,12,31))
for rec in templist:
#check if a duplicate record exists
qry = mongoData.find({'time':rec.time})
print rec.time
tempRec = rec.toDbDict()
cnt = qry.count()
#if this is a new record, insert it
if(cnt == 0): mongoData.insert(tempRec)
#if this is an existing record, update it
elif(cnt == 1):
print 'foundone!!'
dbDict = qry.next()
temp = dbDict['_id']
dbDict = tempRec
dbDict['_id'] = temp
mongoData.save(dbDict)
else:
print 'strange, there is more than 1 DST record for',rec.time
del templist
| gpl-3.0 | 7,036,213,003,003,469,000 | 33.741722 | 327 | 0.6721 | false |
primiano/depot_tools | third_party/boto/s3/key.py | 51 | 73164 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import mimetypes
import os
import re
import rfc822
import StringIO
import base64
import binascii
import math
import urllib
import boto.utils
from boto.exception import BotoClientError
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5
try:
from hashlib import md5
except ImportError:
from md5 import md5
class Key(object):
"""
Represents a key (object) in an S3 bucket.
:ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in S3.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | REDUCED_REDUNDANCY | GLACIER
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar version_id: The version ID of this object, if it is a versioned
object.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
"""
DefaultContentType = 'application/octet-stream'
RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Days>%s</Days>
</RestoreRequest>"""
BufferSize = 8192
# The object metadata fields a user can set, other than custom metadata
# fields (i.e., those beginning with a provider-specific prefix like
# x-amz-meta).
base_user_settable_fields = set(["cache-control", "content-disposition",
"content-encoding", "content-language",
"content-md5", "content-type"])
_underscore_base_user_settable_fields = set()
for f in base_user_settable_fields:
_underscore_base_user_settable_fields.add(f.replace('-', '_'))
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.content_disposition = None
self.content_language = None
self.filename = None
self.etag = None
self.is_latest = False
self.last_modified = None
self.owner = None
self.storage_class = 'STANDARD'
self.md5 = None
self.base64md5 = None
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
# If the object is being restored, this attribute will be set to True.
# If the object is restored, it will be set to False. Otherwise this
# value will be None. If the restore is completed (ongoing_restore =
# False), the expiry_date will be populated with the expiry date of the
# restored object.
self.ongoing_restore = None
self.expiry_date = None
def __repr__(self):
if self.bucket:
return '<Key: %s,%s>' % (self.bucket.name, self.name)
else:
return '<Key: None,%s>' % self.name
def __getattr__(self, name):
if name == 'key':
return self.name
else:
raise AttributeError
def __setattr__(self, name, value):
if name == 'key':
self.__dict__['name'] = value
else:
self.__dict__[name] = value
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
from just having a precalculated md5_hexdigest.
"""
digest = binascii.unhexlify(md5_hexdigest)
base64md5 = base64.encodestring(digest)
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
return (md5_hexdigest, base64md5)
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
self.encrypted = resp.getheader(provider.server_side_encryption_header, None)
else:
self.encrypted = None
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it
# means that it represents an explicit version and the user is
# doing a get_contents_*(version_id=<foo>) to retrieve another
# version of the Key. In that case, we don't really want to
# overwrite the version_id in this Key object. Comprende?
if self.version_id is None or force:
self.version_id = resp.getheader(provider.version_id, None)
self.source_version_id = resp.getheader(provider.copy_source_version_id,
None)
if resp.getheader(provider.delete_marker, 'false') == 'true':
self.delete_marker = True
else:
self.delete_marker = False
def handle_restore_headers(self, response):
header = response.getheader('x-amz-restore')
if header is None:
return
parts = header.split(',', 1)
for part in parts:
key, val = [i.strip() for i in part.split('=')]
val = val.replace('"', '')
if key == 'ongoing-request':
self.ongoing_restore = True if val.lower() == 'true' else False
elif key == 'expiry-date':
self.expiry_date = val
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.resp == None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name, value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() == 'etag':
self.etag = value
elif name.lower() == 'content-type':
self.content_type = value
elif name.lower() == 'content-encoding':
self.content_encoding = value
elif name.lower() == 'content-language':
self.content_language = value
elif name.lower() == 'last-modified':
self.last_modified = value
elif name.lower() == 'cache-control':
self.cache_control = value
elif name.lower() == 'content-disposition':
self.content_disposition = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
def open_write(self, headers=None, override_num_retries=None):
"""
Open this key for writing.
Not yet implemented
:type headers: dict
:param headers: Headers to pass in the write request
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries)
else:
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
def close(self, fast=False):
"""
Close this key.
:type fast: bool
:param fast: True if you want the connection to be closed without first
reading the content. This should only be used in cases where subsequent
calls don't need to return the content from the open HTTP connection.
Note: As explained at
http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
callers must read the whole response before sending a new request to the
server. Calling Key.close(fast=True) and making a subsequent request to
the server will work because boto will get an httplib exception and
close/reopen the connection.
"""
if self.resp and not fast:
self.resp.read()
self.resp = None
self.mode = None
self.closed = True
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
self.open_read()
data = self.resp.read(self.BufferSize)
if not data:
self.close()
raise StopIteration
return data
def read(self, size=0):
self.open_read()
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
return data
def change_storage_class(self, new_storage_class, dst_bucket=None,
validate_dst_bucket=True):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key will be used.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
"""
if new_storage_class == 'STANDARD':
return self.copy(self.bucket.name, self.name,
reduced_redundancy=False, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(self.bucket.name, self.name,
reduced_redundancy=True, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False, validate_dst_bucket=True):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the
storage class of the new Key to be REDUCED_REDUNDANCY
regardless of the storage class of the key being copied.
The Reduced Redundancy Storage (RRS) feature of S3,
provides lower redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket,
validate_dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key)
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
def exists(self):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
return bool(self.bucket.lookup(self.name))
def delete(self):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id)
def get_metadata(self, name):
return self.metadata.get(name)
def set_metadata(self, name, value):
self.metadata[name] = value
def update_metadata(self, d):
self.metadata.update(d)
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
if self.bucket != None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
if self.bucket != None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
if self.bucket != None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
if self.bucket != None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
return self.bucket.set_canned_acl(acl_str, self.name, headers)
def get_redirect(self):
"""Return the redirect location configured for this key.
If no redirect is configured (via set_redirect), then None
will be returned.
"""
response = self.bucket.connection.make_request(
'HEAD', self.bucket.name, self.name)
if response.status == 200:
return response.getheader('x-amz-website-redirect-location')
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def set_redirect(self, redirect_location):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
endpoint, a 301 redirect will be issued to the specified
`redirect_location`.
:type redirect_location: string
:param redirect_location: The location to redirect.
"""
headers = {'x-amz-website-redirect-location': redirect_location}
response = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers)
if response.status == 200:
return True
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def make_public(self, headers=None):
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None,
policy=None, reduced_redundancy=False, encrypt_key=False):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds
:type method: string
:param method: The method to use for retrieving the file
(default is GET)
:type headers: dict
:param headers: Any headers to pass along in the request
:type query_auth: bool
:param query_auth:
:type force_http: bool
:param force_http: If True, http will be used instead of https.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type expires_in_absolute: bool
:param expires_in_absolute:
:type version_id: string
:param version_id: The version_id of the object to GET. If specified
this overrides any value in the key.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:rtype: string
:return: The URL to access the key
"""
provider = self.bucket.connection.provider
version_id = version_id or self.version_id
if headers is None:
headers = {}
else:
headers = headers.copy()
# add headers accordingly (usually PUT case)
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers,
expires_in_absolute,
version_id)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except IOError:
spos = None
self.read_from_stream = False
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
http_conn.putrequest(method, path)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
# Calculate all MD5 checksums on the fly, if not already computed
if not self.base64md5:
m = md5()
else:
m = None
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 4 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 4:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
if m:
m.update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
self.size = data_len
if m:
# Use the chunked trailer for the digest
hd = m.hexdigest()
self.md5, self.base64md5 = self.get_md5_from_hexdigest(hd)
if chunked_transfer:
http_conn.send('0\r\n')
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
response = http_conn.getresponse()
body = response.read()
if ((response.status == 500 or response.status == 503 or
response.getheader('location')) and not chunked_transfer):
# we'll try again.
return response
elif response.status >= 200 and response.status <= 299:
self.etag = response.getheader('etag')
if self.etag != '"%s"' % self.md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5')
return response
else:
raise provider.storage_response_error(
response.status, response.reason, body)
if not headers:
headers = {}
else:
headers = headers.copy()
headers['User-Agent'] = UserAgent
if self.storage_class != 'STANDARD':
headers[provider.storage_class_header] = self.storage_class
if 'Content-Encoding' in headers:
self.content_encoding = headers['Content-Encoding']
if 'Content-Language' in headers:
self.content_encoding = headers['Content-Language']
if 'Content-Type' in headers:
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if headers['Content-Type'] is None:
# Delete null Content-Type value to skip sending that header.
del headers['Content-Type']
else:
self.content_type = headers['Content-Type']
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type == None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers,
sender=sender,
query_args=query_args)
self.handle_version_headers(resp, force=True)
def compute_md5(self, fp, size=None):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file
pointer will be reset to the same position before the
method returns.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
inplace into different parts. Less bytes may be available.
:rtype: tuple
:return: A tuple containing the hex digest version of the MD5
hash as the first element and the base64 encoded version
of the plain digest as the second element.
"""
tup = compute_md5(fp, size=size)
# Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
# The internal implementation of compute_md5() needs to return the
# data size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = tup[2]
return tup[0:2]
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None,
size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket != None:
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, size=None, rewind=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents. The data is read from 'fp' from its current position until
'size' bytes have been read or EOF.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will
first check to see if an object exists in the bucket with
the same key. If it does, it won't overwrite it. The
default value is True which will overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will
be rewound to the start before any bytes are read from
it. The default behaviour is False which reads from the
current position of the file pointer (fp).
:rtype: int
:return: The number of bytes written to the key.
"""
provider = self.bucket.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket != None:
if not md5 and provider.supports_chunked_transfer():
# defer md5 calculation to on the fly and
# we don't know anything about size yet.
chunked_transfer = True
self.size = None
else:
chunked_transfer = False
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name == None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
# return number of bytes written.
return self.size
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost. :type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object
will be encrypted on the server-side by S3 and will be
stored in an encrypted form while at rest in S3.
"""
fp = open(filename, 'rb')
try:
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
finally:
fp.close()
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
"""
if isinstance(s, unicode):
s = s.encode("utf-8")
fp = StringIO.StringIO(s)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
return r
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
query_args=None)
def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, query_args=None):
if headers is None:
headers = {}
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = query_args or []
if torrent:
query_args.append('torrent')
m = None
else:
m = md5()
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (key, urllib.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
data_len = 0
if cb:
if self.size is None:
cb_size = 0
else:
cb_size = self.size
if self.size is None and num_cb != -1:
# If size is not available due to chunked transfer for example,
# we'll call the cb for every 1MB of data transferred.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
for bytes in self:
fp.write(bytes)
data_len += len(bytes)
if m:
m.update(bytes)
if cb:
if cb_size > 0 and data_len >= cb_size:
break
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
if m:
self.md5 = m.hexdigest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
self.bucket.connection.debug = save_debug
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
"""
Get a torrent file (see to get_file)
:type fp: file
:param fp: The file pointer of where to put the torrent
:type headers: dict
:param headers: Headers to be passed
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.bucket != None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
fp = open(filename, 'wb')
try:
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers)
except Exception:
os.remove(filename)
raise
finally:
fp.close()
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified != None:
try:
modified_tuple = rfc822.parsedate_tz(self.last_modified)
modified_stamp = int(rfc822.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception:
pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:rtype: string
:returns: The contents of the file as a string
"""
fp = StringIO.StringIO()
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
return fp.getvalue()
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
def _normalize_metadata(self, metadata):
if type(metadata) == set:
norm_metadata = set()
for k in metadata:
norm_metadata.add(k.lower())
else:
norm_metadata = {}
for k in metadata:
norm_metadata[k.lower()] = metadata[k]
return norm_metadata
def _get_remote_metadata(self, headers=None):
"""
Extracts metadata from existing URI into a dict, so we can
overwrite/delete from it to form the new set of metadata to apply to a
key.
"""
metadata = {}
for underscore_name in self._underscore_base_user_settable_fields:
if hasattr(self, underscore_name):
value = getattr(self, underscore_name)
if value:
# Generate HTTP field name corresponding to "_" named field.
field_name = underscore_name.replace('_', '-')
metadata[field_name.lower()] = value
# self.metadata contains custom metadata, which are all user-settable.
prefix = self.provider.metadata_prefix
for underscore_name in self.metadata:
field_name = underscore_name.replace('_', '-')
metadata['%s%s' % (prefix, field_name.lower())] = (
self.metadata[underscore_name])
return metadata
def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl,
headers=None):
metadata_plus = self._normalize_metadata(metadata_plus)
metadata_minus = self._normalize_metadata(metadata_minus)
metadata = self._get_remote_metadata()
metadata.update(metadata_plus)
for h in metadata_minus:
if h in metadata:
del metadata[h]
src_bucket = self.bucket
# Boto prepends the meta prefix when adding headers, so strip prefix in
# metadata before sending back in to copy_key() call.
rewritten_metadata = {}
for h in metadata:
if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')):
rewritten_h = (h.replace('x-goog-meta-', '')
.replace('x-amz-meta-', ''))
else:
rewritten_h = h
rewritten_metadata[rewritten_h] = metadata[h]
metadata = rewritten_metadata
src_bucket.copy_key(self.name, self.bucket.name, self.name,
metadata=metadata, preserve_acl=preserve_acl,
headers=headers)
def restore(self, days, headers=None):
"""Restore an object from an archive.
:type days: int
:param days: The lifetime of the restored object (must
be at least 1 day). If the object is already restored
then this parameter can be used to readjust the lifetime
of the restored object. In this case, the days
param is with respect to the initial time of the request.
If the object has not been restored, this param is with
respect to the completion time of the request.
"""
response = self.bucket.connection.make_request(
'POST', self.bucket.name, self.name,
data=self.RestoreBody % days,
headers=headers, query_args='restore')
if response.status not in (200, 202):
provider = self.bucket.connection.provider
raise provider.storage_response_error(response.status,
response.reason,
response.read())
| bsd-3-clause | 2,289,313,997,783,049,000 | 41.735981 | 89 | 0.578331 | false |
madphysicist/numpy | numpy/matrixlib/tests/test_matrix_linalg.py | 17 | 2059 | """ Test functions for linalg module using the matrix class."""
import numpy as np
from numpy.linalg.tests.test_linalg import (
LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase,
_TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base,
SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases,
PinvCases, DetCases, LstsqCases)
CASES = []
# square test cases
CASES += apply_tag('square', [
LinalgCase("0x0_matrix",
np.empty((0, 0), dtype=np.double).view(np.matrix),
np.empty((0, 1), dtype=np.double).view(np.matrix),
tags={'size-0'}),
LinalgCase("matrix_b_only",
np.array([[1., 2.], [3., 4.]]),
np.matrix([2., 1.]).T),
LinalgCase("matrix_a_and_b",
np.matrix([[1., 2.], [3., 4.]]),
np.matrix([2., 1.]).T),
])
# hermitian test-cases
CASES += apply_tag('hermitian', [
LinalgCase("hmatrix_a_and_b",
np.matrix([[1., 2.], [2., 1.]]),
None),
])
# No need to make generalized or strided cases for matrices.
class MatrixTestCase(LinalgTestCase):
TEST_CASES = CASES
class TestSolveMatrix(SolveCases, MatrixTestCase):
pass
class TestInvMatrix(InvCases, MatrixTestCase):
pass
class TestEigvalsMatrix(EigvalsCases, MatrixTestCase):
pass
class TestEigMatrix(EigCases, MatrixTestCase):
pass
class TestSVDMatrix(SVDCases, MatrixTestCase):
pass
class TestCondMatrix(CondCases, MatrixTestCase):
pass
class TestPinvMatrix(PinvCases, MatrixTestCase):
pass
class TestDetMatrix(DetCases, MatrixTestCase):
pass
class TestLstsqMatrix(LstsqCases, MatrixTestCase):
pass
class _TestNorm2DMatrix(_TestNorm2D):
array = np.matrix
class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase):
pass
class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase):
pass
class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base):
pass
class TestQRMatrix(_TestQR):
array = np.matrix
| bsd-3-clause | 2,752,855,765,455,599,000 | 21.139785 | 78 | 0.663915 | false |
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_signal.py | 39 | 18682 | import unittest
from test import test_support
from contextlib import closing
import gc
import pickle
import select
import signal
import subprocess
import traceback
import sys, os, time, errno
if sys.platform in ('os2', 'riscos'):
raise unittest.SkipTest("Can't test signal on %s" % sys.platform)
class HandlerBCalled(Exception):
pass
def exit_subprocess():
"""Use os._exit(0) to exit the current subprocess.
Otherwise, the test catches the SystemExit and continues executing
in parallel with the original test, so you wind up with an
exponential number of tests running concurrently.
"""
os._exit(0)
def ignoring_eintr(__func, *args, **kwargs):
try:
return __func(*args, **kwargs)
except EnvironmentError as e:
if e.errno != errno.EINTR:
raise
return None
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class InterProcessSignalTests(unittest.TestCase):
MAX_DURATION = 20 # Entire test should last at most 20 sec.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def format_frame(self, frame, limit=None):
return ''.join(traceback.format_stack(frame, limit=limit))
def handlerA(self, signum, frame):
self.a_called = True
if test_support.verbose:
print "handlerA invoked from signal %s at:\n%s" % (
signum, self.format_frame(frame, limit=1))
def handlerB(self, signum, frame):
self.b_called = True
if test_support.verbose:
print "handlerB invoked from signal %s at:\n%s" % (
signum, self.format_frame(frame, limit=1))
raise HandlerBCalled(signum, self.format_frame(frame))
def wait(self, child):
"""Wait for child to finish, ignoring EINTR."""
while True:
try:
child.wait()
return
except OSError as e:
if e.errno != errno.EINTR:
raise
def run_test(self):
# Install handlers. This function runs in a sub-process, so we
# don't worry about re-setting the default handlers.
signal.signal(signal.SIGHUP, self.handlerA)
signal.signal(signal.SIGUSR1, self.handlerB)
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGALRM, signal.default_int_handler)
# Variables the signals will modify:
self.a_called = False
self.b_called = False
# Let the sub-processes know who to send signals to.
pid = os.getpid()
if test_support.verbose:
print "test runner's pid is", pid
child = ignoring_eintr(subprocess.Popen, ['kill', '-HUP', str(pid)])
if child:
self.wait(child)
if not self.a_called:
time.sleep(1) # Give the signal time to be delivered.
self.assertTrue(self.a_called)
self.assertFalse(self.b_called)
self.a_called = False
# Make sure the signal isn't delivered while the previous
# Popen object is being destroyed, because __del__ swallows
# exceptions.
del child
try:
child = subprocess.Popen(['kill', '-USR1', str(pid)])
# This wait should be interrupted by the signal's exception.
self.wait(child)
time.sleep(1) # Give the signal time to be delivered.
self.fail('HandlerBCalled exception not thrown')
except HandlerBCalled:
self.assertTrue(self.b_called)
self.assertFalse(self.a_called)
if test_support.verbose:
print "HandlerBCalled exception caught"
child = ignoring_eintr(subprocess.Popen, ['kill', '-USR2', str(pid)])
if child:
self.wait(child) # Nothing should happen.
try:
signal.alarm(1)
# The race condition in pause doesn't matter in this case,
# since alarm is going to raise a KeyboardException, which
# will skip the call.
signal.pause()
# But if another signal arrives before the alarm, pause
# may return early.
time.sleep(1)
except KeyboardInterrupt:
if test_support.verbose:
print "KeyboardInterrupt (the alarm() went off)"
except:
self.fail("Some other exception woke us from pause: %s" %
traceback.format_exc())
else:
self.fail("pause returned of its own accord, and the signal"
" didn't arrive after another second.")
# Issue 3864. Unknown if this affects earlier versions of freebsd also.
@unittest.skipIf(sys.platform=='freebsd6',
'inter process signals not reliable (do not mix well with threading) '
'on freebsd6')
def test_main(self):
# This function spawns a child process to insulate the main
# test-running process from all the signals. It then
# communicates with that child process over a pipe and
# re-raises information about any exceptions the child
# throws. The real work happens in self.run_test().
os_done_r, os_done_w = os.pipe()
with closing(os.fdopen(os_done_r)) as done_r, \
closing(os.fdopen(os_done_w, 'w')) as done_w:
child = os.fork()
if child == 0:
# In the child process; run the test and report results
# through the pipe.
try:
done_r.close()
# Have to close done_w again here because
# exit_subprocess() will skip the enclosing with block.
with closing(done_w):
try:
self.run_test()
except:
pickle.dump(traceback.format_exc(), done_w)
else:
pickle.dump(None, done_w)
except:
print 'Uh oh, raised from pickle.'
traceback.print_exc()
finally:
exit_subprocess()
done_w.close()
# Block for up to MAX_DURATION seconds for the test to finish.
r, w, x = select.select([done_r], [], [], self.MAX_DURATION)
if done_r in r:
tb = pickle.load(done_r)
if tb:
self.fail(tb)
else:
os.kill(child, signal.SIGKILL)
self.fail('Test deadlocked after %d seconds.' %
self.MAX_DURATION)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class BasicSignalTests(unittest.TestCase):
def trivial_signal_handler(self, *args):
pass
def test_out_of_range_signal_number_raises_error(self):
self.assertRaises(ValueError, signal.getsignal, 4242)
self.assertRaises(ValueError, signal.signal, 4242,
self.trivial_signal_handler)
def test_setting_signal_handler_to_none_raises_error(self):
self.assertRaises(TypeError, signal.signal,
signal.SIGUSR1, None)
def test_getsignal(self):
hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
self.assertEqual(signal.getsignal(signal.SIGHUP),
self.trivial_signal_handler)
signal.signal(signal.SIGHUP, hup)
self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
class WindowsSignalTests(unittest.TestCase):
def test_issue9324(self):
# Updated for issue #10003, adding SIGBREAK
handler = lambda x, y: None
for sig in (signal.SIGABRT, signal.SIGBREAK, signal.SIGFPE,
signal.SIGILL, signal.SIGINT, signal.SIGSEGV,
signal.SIGTERM):
# Set and then reset a handler for signals that work on windows
signal.signal(sig, signal.signal(sig, handler))
with self.assertRaises(ValueError):
signal.signal(-1, handler)
with self.assertRaises(ValueError):
signal.signal(7, handler)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class WakeupSignalTests(unittest.TestCase):
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
def test_wakeup_fd_early(self):
import select
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the sleep,
# before select is called
time.sleep(self.TIMEOUT_FULL)
mid_time = time.time()
self.assertTrue(mid_time - before_time < self.TIMEOUT_HALF)
select.select([self.read], [], [], self.TIMEOUT_FULL)
after_time = time.time()
self.assertTrue(after_time - mid_time < self.TIMEOUT_HALF)
def test_wakeup_fd_during(self):
import select
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the select call
self.assertRaises(select.error, select.select,
[self.read], [], [], self.TIMEOUT_FULL)
after_time = time.time()
self.assertTrue(after_time - before_time < self.TIMEOUT_HALF)
def setUp(self):
import fcntl
self.alrm = signal.signal(signal.SIGALRM, lambda x,y:None)
self.read, self.write = os.pipe()
flags = fcntl.fcntl(self.write, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(self.write, fcntl.F_SETFL, flags)
self.old_wakeup = signal.set_wakeup_fd(self.write)
def tearDown(self):
signal.set_wakeup_fd(self.old_wakeup)
os.close(self.read)
os.close(self.write)
signal.signal(signal.SIGALRM, self.alrm)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class SiginterruptTest(unittest.TestCase):
def setUp(self):
"""Install a no-op signal handler that can be set to allow
interrupts or not, and arrange for the original signal handler to be
re-installed when the test is finished.
"""
self.signum = signal.SIGUSR1
oldhandler = signal.signal(self.signum, lambda x,y: None)
self.addCleanup(signal.signal, self.signum, oldhandler)
def readpipe_interrupted(self):
"""Perform a read during which a signal will arrive. Return True if the
read is interrupted by the signal and raises an exception. Return False
if it returns normally.
"""
# Create a pipe that can be used for the read. Also clean it up
# when the test is over, since nothing else will (but see below for
# the write end).
r, w = os.pipe()
self.addCleanup(os.close, r)
# Create another process which can send a signal to this one to try
# to interrupt the read.
ppid = os.getpid()
pid = os.fork()
if pid == 0:
# Child code: sleep to give the parent enough time to enter the
# read() call (there's a race here, but it's really tricky to
# eliminate it); then signal the parent process. Also, sleep
# again to make it likely that the signal is delivered to the
# parent process before the child exits. If the child exits
# first, the write end of the pipe will be closed and the test
# is invalid.
try:
time.sleep(0.2)
os.kill(ppid, self.signum)
time.sleep(0.2)
finally:
# No matter what, just exit as fast as possible now.
exit_subprocess()
else:
# Parent code.
# Make sure the child is eventually reaped, else it'll be a
# zombie for the rest of the test suite run.
self.addCleanup(os.waitpid, pid, 0)
# Close the write end of the pipe. The child has a copy, so
# it's not really closed until the child exits. We need it to
# close when the child exits so that in the non-interrupt case
# the read eventually completes, otherwise we could just close
# it *after* the test.
os.close(w)
# Try the read and report whether it is interrupted or not to
# the caller.
try:
d = os.read(r, 1)
return False
except OSError, err:
if err.errno != errno.EINTR:
raise
return True
def test_without_siginterrupt(self):
"""If a signal handler is installed and siginterrupt is not called
at all, when that signal arrives, it interrupts a syscall that's in
progress.
"""
i = self.readpipe_interrupted()
self.assertTrue(i)
# Arrival of the signal shouldn't have changed anything.
i = self.readpipe_interrupted()
self.assertTrue(i)
def test_siginterrupt_on(self):
"""If a signal handler is installed and siginterrupt is called with
a true value for the second argument, when that signal arrives, it
interrupts a syscall that's in progress.
"""
signal.siginterrupt(self.signum, 1)
i = self.readpipe_interrupted()
self.assertTrue(i)
# Arrival of the signal shouldn't have changed anything.
i = self.readpipe_interrupted()
self.assertTrue(i)
def test_siginterrupt_off(self):
"""If a signal handler is installed and siginterrupt is called with
a false value for the second argument, when that signal arrives, it
does not interrupt a syscall that's in progress.
"""
signal.siginterrupt(self.signum, 0)
i = self.readpipe_interrupted()
self.assertFalse(i)
# Arrival of the signal shouldn't have changed anything.
i = self.readpipe_interrupted()
self.assertFalse(i)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class ItimerTest(unittest.TestCase):
def setUp(self):
self.hndl_called = False
self.hndl_count = 0
self.itimer = None
self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm)
def tearDown(self):
signal.signal(signal.SIGALRM, self.old_alarm)
if self.itimer is not None: # test_itimer_exc doesn't change this attr
# just ensure that itimer is stopped
signal.setitimer(self.itimer, 0)
def sig_alrm(self, *args):
self.hndl_called = True
if test_support.verbose:
print("SIGALRM handler invoked", args)
def sig_vtalrm(self, *args):
self.hndl_called = True
if self.hndl_count > 3:
# it shouldn't be here, because it should have been disabled.
raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL "
"timer.")
elif self.hndl_count == 3:
# disable ITIMER_VIRTUAL, this function shouldn't be called anymore
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
if test_support.verbose:
print("last SIGVTALRM handler call")
self.hndl_count += 1
if test_support.verbose:
print("SIGVTALRM handler invoked", args)
def sig_prof(self, *args):
self.hndl_called = True
signal.setitimer(signal.ITIMER_PROF, 0)
if test_support.verbose:
print("SIGPROF handler invoked", args)
def test_itimer_exc(self):
# XXX I'm assuming -1 is an invalid itimer, but maybe some platform
# defines it ?
self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0)
# Negative times are treated as zero on some platforms.
if 0:
self.assertRaises(signal.ItimerError,
signal.setitimer, signal.ITIMER_REAL, -1)
def test_itimer_real(self):
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1.0)
if test_support.verbose:
print("\ncall pause()...")
signal.pause()
self.assertEqual(self.hndl_called, True)
# Issue 3864. Unknown if this affects earlier versions of freebsd also.
@unittest.skipIf(sys.platform in ('freebsd6', 'netbsd5'),
'itimer not reliable (does not mix well with threading) on some BSDs.')
def test_itimer_virtual(self):
self.itimer = signal.ITIMER_VIRTUAL
signal.signal(signal.SIGVTALRM, self.sig_vtalrm)
signal.setitimer(self.itimer, 0.3, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# use up some virtual time by doing real work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_vtalrm handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# virtual itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
# Issue 3864. Unknown if this affects earlier versions of freebsd also.
@unittest.skipIf(sys.platform=='freebsd6',
'itimer not reliable (does not mix well with threading) on freebsd6')
def test_itimer_prof(self):
self.itimer = signal.ITIMER_PROF
signal.signal(signal.SIGPROF, self.sig_prof)
signal.setitimer(self.itimer, 0.2, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# do some work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_prof handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# profiling itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
def test_main():
test_support.run_unittest(BasicSignalTests, InterProcessSignalTests,
WakeupSignalTests, SiginterruptTest,
ItimerTest, WindowsSignalTests)
if __name__ == "__main__":
test_main()
| gpl-2.0 | -7,468,738,900,629,216,000 | 36.894523 | 80 | 0.592763 | false |
asgard-lab/neutron | neutron/api/versions.py | 23 | 1958 | # Copyright 2011 Citrix Systems.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
from oslo_log import log as logging
import webob.dec
from neutron.api.views import versions as versions_view
from neutron import wsgi
LOG = logging.getLogger(__name__)
class Versions(object):
@classmethod
def factory(cls, global_config, **local_config):
return cls()
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Respond to a request for all Neutron API versions."""
version_objs = [
{
"id": "v2.0",
"status": "CURRENT",
},
]
if req.path != '/':
language = req.best_match_language()
msg = _('Unknown API version specified')
msg = oslo_i18n.translate(msg, language)
return webob.exc.HTTPNotFound(explanation=msg)
builder = versions_view.get_view_builder(req)
versions = [builder.build(version) for version in version_objs]
response = dict(versions=versions)
metadata = {}
content_type = req.best_match_content_type()
body = (wsgi.Serializer(metadata=metadata).
serialize(response, content_type))
response = webob.Response()
response.content_type = content_type
response.body = wsgi.encode_body(body)
return response
| apache-2.0 | 1,112,696,156,696,895,100 | 30.580645 | 78 | 0.639939 | false |
ArnossArnossi/django | django/contrib/staticfiles/views.py | 581 | 1329 | """
Views and functions for serving static files. These are only to be used during
development, and SHOULD NOT be used in a production setting.
"""
import os
import posixpath
from django.conf import settings
from django.contrib.staticfiles import finders
from django.http import Http404
from django.utils.six.moves.urllib.parse import unquote
from django.views import static
def serve(request, path, insecure=False, **kwargs):
"""
Serve static files below a given point in the directory structure or
from locations inferred from the staticfiles finders.
To use, put a URL pattern such as::
from django.contrib.staticfiles import views
url(r'^(?P<path>.*)$', views.serve)
in your URLconf.
It uses the django.views.static.serve() view to serve the found files.
"""
if not settings.DEBUG and not insecure:
raise Http404
normalized_path = posixpath.normpath(unquote(path)).lstrip('/')
absolute_path = finders.find(normalized_path)
if not absolute_path:
if path.endswith('/') or path == '':
raise Http404("Directory indexes are not allowed here.")
raise Http404("'%s' could not be found" % path)
document_root, path = os.path.split(absolute_path)
return static.serve(request, path, document_root=document_root, **kwargs)
| bsd-3-clause | -524,331,963,554,572,300 | 32.225 | 78 | 0.705794 | false |
educloudalliance/eca-auth-connector | selector/migrations/0001_initial.py | 1 | 3810 | # -*- encoding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Haltu Oy, http://haltu.fi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 2048 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=2048, verbose_name='username', validators=[django.core.validators.RegexValidator(b'^[\\w.@+-]+$', 'Enter a valid username.', b'invalid')])),
('first_name', models.CharField(max_length=2048, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=2048, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=(models.Model,),
),
]
| mit | 8,547,131,683,336,520,000 | 59.47619 | 295 | 0.681102 | false |
hatchery/Genepool2 | genes/ubuntu/traits.py | 2 | 1762 | #!/usr/bin/env python
from functools import wraps
from typing import Callable, Dict, List, Optional, Tuple, TypeVar
from genes.debian.traits import is_debian
from genes.lib.logging import log_error, log_warn
from genes.lib.traits import ErrorLevel
T = TypeVar('T')
def is_ubuntu(versions: Optional[List[str]] = None) -> bool:
"""
An alias for is_debian that uses Ubuntu as the distro.
This function returns True if the OS is Ubuntu and the version is in
the list of versions
:param versions: a list of acceptable versions for Ubuntu.
:return: bool, True if platform is Ubuntu.
"""
return is_debian(versions=versions, distro_name='ubuntu')
def only_ubuntu(error_level: ErrorLevel = ErrorLevel.warn, versions: Optional[List[str]] = None):
"""
Wrap a function and only execute it if the system is ubuntu of the version specified
:param error_level: how to handle execution for systems that aren't ubuntu
:param versions: versions of ubuntu which are allowable
:return: a wrapper function that wraps functions in conditional execution
"""
msg = "This function can only be run on Ubuntu: "
def wrapper(func: Callable[[Tuple, Dict], T]) -> Callable:
@wraps(func)
def run_if_ubuntu(*args: Tuple, **kwargs: Dict) -> Optional[T]:
if is_ubuntu(versions=versions):
return func(*args, **kwargs)
elif error_level == ErrorLevel.warn:
log_warn(msg, func.__name__)
return None
elif error_level == ErrorLevel.error:
log_error(msg, func.__name__)
raise OSError(msg, func.__name__)
else:
return None
return run_if_ubuntu
return wrapper
| mit | 594,642,152,192,244,600 | 35.708333 | 97 | 0.64983 | false |
dschopf/potfit | util/kim/kim_compare_lammps/potfit.py | 3 | 4937 | ################################################################
#
# kim_compare_lammps
#
################################################################
#
# Copyright 2018 the potfit development team
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the “Software”), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall
# be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
# AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# https://www.potfit.net/
#
#################################################################
import logging
import math
import os
import random
from subprocess import run
logger = logging.getLogger('kim_compare_lammps')
class potfit_run(object):
def __init__(self, binary, model, config, directory):
self.binary = binary
self.config = config
self.directory = directory
self.model = model
self.energy = None
self.forces = []
self.__write_parameter_file()
self.__write_config_file()
self.__write_potential_file()
def __write_parameter_file(self):
filename = os.path.join(self.directory, 'param')
with open(filename, 'w') as f:
f.write('ntypes {}\n'.format(self.config.num_atom_types))
f.write('config config\n')
f.write('startpot pot\n')
f.write('endpot pot.end\n')
f.write('tempfile pot.temp\n')
f.write('output_prefix out\n')
f.write('eng_weight 1\n')
f.write('kim_model_name {}\n'.format(self.model['NAME']))
f.write('kim_model_params use_default\n')
def __write_config_file(self):
filename = os.path.join(self.directory, 'config')
with open(filename, 'w') as f:
f.write('#N {} 1\n'.format(self.config.num_atoms()))
f.write('#C {}\n'.format(' '.join(self.model['SPECIES']) if self.config.num_atom_types > 1 else self.model['SPECIES']))
f.write('#X {:.08f} {:.08f} {:.08f}\n'.format(*[x * self.config.scale[0] for x in self.config.box[0]]))
f.write('#Y {:.08f} {:.08f} {:.08f}\n'.format(*[x * self.config.scale[1] for x in self.config.box[1]]))
f.write('#Z {:.08f} {:.08f} {:.08f}\n'.format(*[x * self.config.scale[2] for x in self.config.box[2]]))
f.write('#E 0.0\n')
f.write('#F\n')
for i in range(len(self.config.atoms)):
f.write('{:5}\t{:2.8f}\t{:2.8f}\t{:2.8f}'.format(self.config.atom_types[i] - 1, self.config.atoms[i][0], self.config.atoms[i][1], self.config.atoms[i][2]))
f.write('\t0.0\t0.0\t0.0\n')
def __write_potential_file(self):
filename = os.path.join(self.directory, 'pot')
with open(filename, 'w') as f:
f.write('#F 5 1\n')
f.write('#C {}\n'.format(' '.join(self.model['SPECIES']) if self.config.num_atom_types > 1 else self.model['SPECIES']))
f.write('#E\n\n')
def run(self):
res = run([self.binary, 'param'], capture_output=True, cwd=self.directory)
if res.returncode:
logger.error('Error running potfit: {}'.format(res.returncode))
print(self.directory)
print(res.args)
print(res.stdout.decode())
print(res.stderr.decode())
raise Exception('Error running potfit')
filename = os.path.join(self.directory, 'out.energy')
with open(filename, 'r') as f:
for line in f:
if line[0] == '#':
continue
items = line.split()
if items[0] == '0':
self.energy = float(items[3])
break
filename = os.path.join(self.directory, 'out.force')
with open(filename, 'r') as f:
for line in f:
if line[0] == '#':
continue
items = line.split()
atom = items[1].split(':')
atom_idx = int(atom[0])
if atom_idx >= len(self.forces):
self.forces.append([999, 999, 999])
if atom[1] == 'x':
atom_coord = 0
elif atom[1] == 'y':
atom_coord = 1
else:
atom_coord = 2
self.forces[atom_idx][atom_coord] = float(items[4])
return self.energy, self.forces
def cleanup(self):
pass
if __name__ == '__main__':
print('Please do not run this script directly, use kim_compare_potfit.py instead!')
sys.exit(-1)
| gpl-2.0 | 3,595,333,296,902,550,500 | 36.340909 | 163 | 0.604991 | false |
FlintHill/SUAS-Competition | env/lib/python3.7/site-packages/pip/_vendor/chardet/__init__.py | 270 | 1559 | ######################## BEGIN LICENSE BLOCK ########################
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .compat import PY2, PY3
from .universaldetector import UniversalDetector
from .version import __version__, VERSION
def detect(byte_str):
"""
Detect the encoding of the given byte string.
:param byte_str: The byte sequence to examine.
:type byte_str: ``bytes`` or ``bytearray``
"""
if not isinstance(byte_str, bytearray):
if not isinstance(byte_str, bytes):
raise TypeError('Expected object of type bytes or bytearray, got: '
'{0}'.format(type(byte_str)))
else:
byte_str = bytearray(byte_str)
detector = UniversalDetector()
detector.feed(byte_str)
return detector.close()
| mit | -7,928,416,710,489,227,000 | 38.974359 | 79 | 0.655548 | false |
antoviaque/edx-platform | common/djangoapps/dark_lang/migrations/0001_initial.py | 86 | 1203 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DarkLangConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('released_languages', models.TextField(help_text=b'A comma-separated list of language codes to release to the public.', blank=True)),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
| agpl-3.0 | 2,883,550,345,770,450,400 | 39.1 | 178 | 0.61596 | false |
beezee/GAE-Django-site | djangotoolbox/db/base.py | 46 | 3591 | import datetime
from django.db.backends import BaseDatabaseFeatures, BaseDatabaseOperations, \
BaseDatabaseWrapper, BaseDatabaseClient, BaseDatabaseValidation, \
BaseDatabaseIntrospection
from .creation import NonrelDatabaseCreation
class NonrelDatabaseFeatures(BaseDatabaseFeatures):
can_return_id_from_insert = True
supports_unspecified_pk = False
supports_regex_backreferencing = True
supports_date_lookup_using_string = False
supports_timezones = False
supports_joins = False
distinguishes_insert_from_update = False
supports_select_related = False
supports_deleting_related_objects = False
string_based_auto_field = False
supports_dicts = False
def _supports_transactions(self):
return False
class NonrelDatabaseOperations(BaseDatabaseOperations):
def __init__(self, connection):
self.connection = connection
super(NonrelDatabaseOperations, self).__init__()
def quote_name(self, name):
return name
def value_to_db_date(self, value):
# value is a date here, no need to check it
return value
def value_to_db_datetime(self, value):
# value is a datetime here, no need to check it
return value
def value_to_db_time(self, value):
# value is a time here, no need to check it
return value
def prep_for_like_query(self, value):
return value
def prep_for_iexact_query(self, value):
return value
def check_aggregate_support(self, aggregate):
from django.db.models.sql.aggregates import Count
if not isinstance(aggregate, Count):
raise NotImplementedError("This database does not support %r "
"aggregates" % type(aggregate))
def year_lookup_bounds(self, value):
return [datetime.datetime(value, 1, 1, 0, 0, 0, 0),
datetime.datetime(value+1, 1, 1, 0, 0, 0, 0)]
def pk_default_value(self):
return None
def value_to_db_auto(self, value):
"""
Transform a value to an object compatible with the AutoField required
by the backend driver for auto columns.
"""
if self.connection.features.string_based_auto_field:
if value is None:
return None
return unicode(value)
return super(NonrelDatabaseOperations, self).value_to_db_auto(value)
class NonrelDatabaseClient(BaseDatabaseClient):
pass
class NonrelDatabaseValidation(BaseDatabaseValidation):
pass
class NonrelDatabaseIntrospection(BaseDatabaseIntrospection):
def table_names(self):
"""Returns a list of names of all tables that exist in the database."""
return self.django_table_names()
class FakeCursor(object):
def __getattribute__(self, name):
raise NotImplementedError('Cursors not supported')
def __setattr__(self, name, value):
raise NotImplementedError('Cursors not supported')
class NonrelDatabaseWrapper(BaseDatabaseWrapper):
# These fake operators are required for SQLQuery.as_sql() support.
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
def _cursor(self):
return FakeCursor()
| bsd-3-clause | -2,836,708,845,408,063,000 | 30.778761 | 79 | 0.636313 | false |
nikste/tensorflow | tensorflow/tensorboard/scripts/generate_testdata.py | 39 | 7626 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate some standard test data for debugging TensorBoard.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import bisect
import math
import os
import os.path
import random
import shutil
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.summary.writer import writer as writer_lib
tf.flags.DEFINE_string("target", None, """The directoy where serialized data
will be written""")
flags.DEFINE_boolean("overwrite", False, """Whether to remove and overwrite
TARGET if it already exists.""")
FLAGS = tf.flags.FLAGS
# Hardcode a start time and reseed so script always generates the same data.
_start_time = 0
random.seed(0)
def _MakeHistogramBuckets():
v = 1E-12
buckets = []
neg_buckets = []
while v < 1E20:
buckets.append(v)
neg_buckets.append(-v)
v *= 1.1
# Should include DBL_MAX, but won't bother for test data.
return neg_buckets[::-1] + [0] + buckets
def _MakeHistogram(values):
"""Convert values into a histogram proto using logic from histogram.cc."""
limits = _MakeHistogramBuckets()
counts = [0] * len(limits)
for v in values:
idx = bisect.bisect_left(limits, v)
counts[idx] += 1
limit_counts = [(limits[i], counts[i]) for i in xrange(len(limits))
if counts[i]]
bucket_limit = [lc[0] for lc in limit_counts]
bucket = [lc[1] for lc in limit_counts]
sum_sq = sum(v * v for v in values)
return summary_pb2.HistogramProto(
min=min(values),
max=max(values),
num=len(values),
sum=sum(values),
sum_squares=sum_sq,
bucket_limit=bucket_limit,
bucket=bucket)
def WriteScalarSeries(writer, tag, f, n=5):
"""Write a series of scalar events to writer, using f to create values."""
step = 0
wall_time = _start_time
for i in xrange(n):
v = f(i)
value = summary_pb2.Summary.Value(tag=tag, simple_value=v)
summary = summary_pb2.Summary(value=[value])
event = event_pb2.Event(wall_time=wall_time, step=step, summary=summary)
writer.add_event(event)
step += 1
wall_time += 10
def WriteHistogramSeries(writer, tag, mu_sigma_tuples, n=20):
"""Write a sequence of normally distributed histograms to writer."""
step = 0
wall_time = _start_time
for [mean, stddev] in mu_sigma_tuples:
data = [random.normalvariate(mean, stddev) for _ in xrange(n)]
histo = _MakeHistogram(data)
summary = summary_pb2.Summary(
value=[summary_pb2.Summary.Value(
tag=tag, histo=histo)])
event = event_pb2.Event(wall_time=wall_time, step=step, summary=summary)
writer.add_event(event)
step += 10
wall_time += 100
def WriteImageSeries(writer, tag, n_images=1):
"""Write a few dummy images to writer."""
step = 0
session = session_lib.Session()
p = array_ops.placeholder("uint8", (1, 4, 4, 3))
s = logging_ops.image_summary(tag, p)
for _ in xrange(n_images):
im = np.random.random_integers(0, 255, (1, 4, 4, 3))
summ = session.run(s, feed_dict={p: im})
writer.add_summary(summ, step)
step += 20
session.close()
def WriteAudioSeries(writer, tag, n_audio=1):
"""Write a few dummy audio clips to writer."""
step = 0
session = session_lib.Session()
min_frequency_hz = 440
max_frequency_hz = 880
sample_rate = 4000
duration_frames = sample_rate * 0.5 # 0.5 seconds.
frequencies_per_run = 1
num_channels = 2
p = array_ops.placeholder("float32", (frequencies_per_run, duration_frames,
num_channels))
s = logging_ops.audio_summary(tag, p, sample_rate)
for _ in xrange(n_audio):
# Generate a different frequency for each channel to show stereo works.
frequencies = np.random.random_integers(
min_frequency_hz,
max_frequency_hz,
size=(frequencies_per_run, num_channels))
tiled_frequencies = np.tile(frequencies, (1, duration_frames))
tiled_increments = np.tile(
np.arange(0, duration_frames),
(num_channels, 1)).T.reshape(1, duration_frames * num_channels)
tones = np.sin(2.0 * np.pi * tiled_frequencies * tiled_increments /
sample_rate)
tones = tones.reshape(frequencies_per_run, duration_frames, num_channels)
summ = session.run(s, feed_dict={p: tones})
writer.add_summary(summ, step)
step += 20
session.close()
def GenerateTestData(path):
"""Generates the test data directory."""
run1_path = os.path.join(path, "run1")
os.makedirs(run1_path)
writer1 = writer_lib.FileWriter(run1_path)
WriteScalarSeries(writer1, "foo/square", lambda x: x * x)
WriteScalarSeries(writer1, "bar/square", lambda x: x * x)
WriteScalarSeries(writer1, "foo/sin", math.sin)
WriteScalarSeries(writer1, "foo/cos", math.cos)
WriteHistogramSeries(writer1, "histo1", [[0, 1], [0.3, 1], [0.5, 1], [0.7, 1],
[1, 1]])
WriteImageSeries(writer1, "im1")
WriteImageSeries(writer1, "im2")
WriteAudioSeries(writer1, "au1")
run2_path = os.path.join(path, "run2")
os.makedirs(run2_path)
writer2 = writer_lib.FileWriter(run2_path)
WriteScalarSeries(writer2, "foo/square", lambda x: x * x * 2)
WriteScalarSeries(writer2, "bar/square", lambda x: x * x * 3)
WriteScalarSeries(writer2, "foo/cos", lambda x: math.cos(x) * 2)
WriteHistogramSeries(writer2, "histo1", [[0, 2], [0.3, 2], [0.5, 2], [0.7, 2],
[1, 2]])
WriteHistogramSeries(writer2, "histo2", [[0, 1], [0.3, 1], [0.5, 1], [0.7, 1],
[1, 1]])
WriteImageSeries(writer2, "im1")
WriteAudioSeries(writer2, "au2")
graph_def = graph_pb2.GraphDef()
node1 = graph_def.node.add()
node1.name = "a"
node1.op = "matmul"
node2 = graph_def.node.add()
node2.name = "b"
node2.op = "matmul"
node2.input.extend(["a:0"])
writer1.add_graph(graph_def)
node3 = graph_def.node.add()
node3.name = "c"
node3.op = "matmul"
node3.input.extend(["a:0", "b:0"])
writer2.add_graph(graph_def)
writer1.close()
writer2.close()
def main(unused_argv=None):
target = FLAGS.target
if not target:
print("The --target flag is required.")
return -1
if os.path.exists(target):
if FLAGS.overwrite:
if os.path.isdir(target):
shutil.rmtree(target)
else:
os.remove(target)
else:
print("Refusing to overwrite target %s without --overwrite" % target)
return -2
GenerateTestData(target)
if __name__ == "__main__":
app.run()
| apache-2.0 | 6,928,227,767,710,201,000 | 31.589744 | 80 | 0.656176 | false |
Chemcy/vnpy | vn.api/vn.ksotp/vnksotptd/test/tdtest.py | 21 | 4659 | # encoding: UTF-8
import sys
from time import sleep
from PyQt4 import QtGui
from vnksotptd import *
#----------------------------------------------------------------------
def print_dict(d):
"""按照键值打印一个字典"""
for key,value in d.items():
print key + ':' + str(value)
#----------------------------------------------------------------------
def simple_log(func):
"""简单装饰器用于输出函数名"""
def wrapper(*args, **kw):
print ""
print str(func.__name__)
return func(*args, **kw)
return wrapper
########################################################################
class TestTdApi(TdApi):
"""测试用实例"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(TestTdApi, self).__init__()
#----------------------------------------------------------------------
@simple_log
def onFrontConnected(self):
"""服务器连接"""
pass
#----------------------------------------------------------------------
@simple_log
def onFrontDisconnected(self, n):
"""服务器断开"""
print n
#----------------------------------------------------------------------
@simple_log
def onRspError(self, error, n, last):
"""错误"""
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
print_dict(data)
print_dict(error)
self.brokerID = data['BrokerID']
self.userID = data['UserID']
self.frontID = data['FrontID']
self.sessionID = data['SessionID']
#----------------------------------------------------------------------
@simple_log
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspQrySettlementInfo(self, data, error, n, last):
"""查询结算信息回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspSettlementInfoConfirm(self, data, error, n, last):
"""确认结算信息回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspQryInstrument(self, data, error, n, last):
"""查询合约回报"""
print_dict(data)
print_dict(error)
print n
print last
#----------------------------------------------------------------------
def main():
"""主测试函数,出现堵塞时可以考虑使用sleep"""
reqid = 0
# 创建Qt应用对象,用于事件循环
app = QtGui.QApplication(sys.argv)
# 创建API对象,测试通过
api = TestTdApi()
# 在C++环境中创建MdApi对象,传入参数是希望用来保存.con文件的地址,测试通过
api.createOTPTraderApi('')
# 设置数据流重传方式,测试通过
api.subscribePrivateTopic(1)
api.subscribePublicTopic(1)
# 注册前置机地址,测试通过
api.registerFront("tcp://115.238.106.253:18993")
# 初始化api,连接前置机
api.init()
sleep(8)
# 登陆
loginReq = {} # 创建一个空字典
loginReq['UserID'] = '' # 参数作为字典键值的方式传入
loginReq['Password'] = '' # 键名和C++中的结构体成员名对应
loginReq['BrokerID'] = ''
reqid = reqid + 1 # 请求数必须保持唯一性
i = api.reqUserLogin(loginReq, 1)
sleep(3)
# 查询合约, 测试通过
reqid = reqid + 1
i = api.reqQryInstrument({}, reqid)
## 查询结算, 测试通过
#req = {}
#req['BrokerID'] = api.brokerID
#req['InvestorID'] = api.userID
#reqid = reqid + 1
#i = api.reqQrySettlementInfo(req, reqid)
#sleep(0.5)
## 确认结算, 测试通过
#req = {}
#req['BrokerID'] = api.brokerID
#req['InvestorID'] = api.userID
#reqid = reqid + 1
#i = api.reqSettlementInfoConfirm(req, reqid)
#sleep(0.5)
# 连续运行
app.exec_()
if __name__ == '__main__':
main()
| mit | 5,315,591,118,558,592,000 | 25.541401 | 75 | 0.400768 | false |
abhinavp13/IITBX-edx-platform-dev | common/lib/xmodule/xmodule/x_module.py | 2 | 31496 | import logging
import copy
import yaml
import os
from lxml import etree
from collections import namedtuple
from pkg_resources import resource_listdir, resource_string, resource_isdir
from xmodule.modulestore import Location
from xmodule.modulestore.exceptions import ItemNotFoundError
from xblock.core import XBlock, Scope, String, Integer, Float, ModelType
log = logging.getLogger(__name__)
def dummy_track(_event_type, _event):
pass
class LocationField(ModelType):
"""
XBlock field for storing Location values
"""
def from_json(self, value):
"""
Parse the json value as a Location
"""
return Location(value)
def to_json(self, value):
"""
Store the Location as a url string in json
"""
return value.url()
class HTMLSnippet(object):
"""
A base class defining an interface for an object that is able to present an
html snippet, along with associated javascript and css
"""
js = {}
js_module_name = None
css = {}
@classmethod
def get_javascript(cls):
"""
Return a dictionary containing some of the following keys:
coffee: A list of coffeescript fragments that should be compiled and
placed on the page
js: A list of javascript fragments that should be included on the
page
All of these will be loaded onto the page in the CMS
"""
# cdodge: We've moved the xmodule.coffee script from an outside directory into the xmodule area of common
# this means we need to make sure that all xmodules include this dependency which had been previously implicitly
# fulfilled in a different area of code
coffee = cls.js.setdefault('coffee', [])
fragment = resource_string(__name__, 'js/src/xmodule.coffee')
if fragment not in coffee:
coffee.insert(0, fragment)
return cls.js
@classmethod
def get_css(cls):
"""
Return a dictionary containing some of the following keys:
css: A list of css fragments that should be applied to the html
contents of the snippet
sass: A list of sass fragments that should be applied to the html
contents of the snippet
scss: A list of scss fragments that should be applied to the html
contents of the snippet
"""
return cls.css
def get_html(self):
"""
Return the html used to display this snippet
"""
raise NotImplementedError(
"get_html() must be provided by specific modules - not present in {0}"
.format(self.__class__))
class XModuleFields(object):
display_name = String(
display_name="Display Name",
help="This name appears in the horizontal navigation at the top of the page.",
scope=Scope.settings,
default=None
)
# Please note that in order to be compatible with XBlocks more generally,
# the LMS and CMS shouldn't be using this field. It's only for internal
# consumption by the XModules themselves
location = LocationField(
display_name="Location",
help="This is the location id for the XModule.",
scope=Scope.content,
default=Location(None),
)
class XModule(XModuleFields, HTMLSnippet, XBlock):
''' Implements a generic learning module.
Subclasses must at a minimum provide a definition for get_html in order
to be displayed to users.
See the HTML module for a simple example.
'''
# The default implementation of get_icon_class returns the icon_class
# attribute of the class
#
# This attribute can be overridden by subclasses, and
# the function can also be overridden if the icon class depends on the data
# in the module
icon_class = 'other'
def __init__(self, runtime, descriptor, model_data):
'''
Construct a new xmodule
runtime: An XBlock runtime allowing access to external resources
descriptor: the XModuleDescriptor that this module is an instance of.
model_data: A dictionary-like object that maps field names to values
for those fields.
'''
super(XModule, self).__init__(runtime, model_data)
self._model_data = model_data
self.system = runtime
self.descriptor = descriptor
self.url_name = self.location.name
self.category = self.location.category
self._loaded_children = None
@property
def id(self):
return self.location.url()
@property
def display_name_with_default(self):
'''
Return a display name for the module: use display_name if defined in
metadata, otherwise convert the url name.
'''
name = self.display_name
if name is None:
name = self.url_name.replace('_', ' ')
return name
def get_children(self):
'''
Return module instances for all the children of this module.
'''
if self._loaded_children is None:
child_descriptors = self.get_child_descriptors()
children = [self.system.get_module(descriptor) for descriptor in child_descriptors]
# get_module returns None if the current user doesn't have access
# to the location.
self._loaded_children = [c for c in children if c is not None]
return self._loaded_children
def __unicode__(self):
return '<x_module(id={0})>'.format(self.id)
def get_child_descriptors(self):
'''
Returns the descriptors of the child modules
Overriding this changes the behavior of get_children and
anything that uses get_children, such as get_display_items.
This method will not instantiate the modules of the children
unless absolutely necessary, so it is cheaper to call than get_children
These children will be the same children returned by the
descriptor unless descriptor.has_dynamic_children() is true.
'''
return self.descriptor.get_children()
def get_child_by(self, selector):
"""
Return a child XModuleDescriptor with the specified url_name, if it exists, and None otherwise.
"""
for child in self.get_children():
if selector(child):
return child
return None
def get_display_items(self):
'''
Returns a list of descendent module instances that will display
immediately inside this module.
'''
items = []
for child in self.get_children():
items.extend(child.displayable_items())
return items
def displayable_items(self):
'''
Returns list of displayable modules contained by this module. If this
module is visible, should return [self].
'''
return [self]
def get_icon_class(self):
'''
Return a css class identifying this module in the context of an icon
'''
return self.icon_class
# Functions used in the LMS
def get_score(self):
"""
Score the student received on the problem, or None if there is no
score.
Returns:
dictionary
{'score': integer, from 0 to get_max_score(),
'total': get_max_score()}
NOTE (vshnayder): not sure if this was the intended return value, but
that's what it's doing now. I suspect that we really want it to just
return a number. Would need to change (at least) capa and
modx_dispatch to match if we did that.
"""
return None
def max_score(self):
''' Maximum score. Two notes:
* This is generic; in abstract, a problem could be 3/5 points on one
randomization, and 5/7 on another
* In practice, this is a Very Bad Idea, and (a) will break some code
in place (although that code should get fixed), and (b) break some
analytics we plan to put in place.
'''
return None
def get_progress(self):
''' Return a progress.Progress object that represents how far the
student has gone in this module. Must be implemented to get correct
progress tracking behavior in nesting modules like sequence and
vertical.
If this module has no notion of progress, return None.
'''
return None
def handle_ajax(self, _dispatch, _data):
''' dispatch is last part of the URL.
data is a dictionary-like object with the content of the request'''
return ""
def policy_key(location):
"""
Get the key for a location in a policy file. (Since the policy file is
specific to a course, it doesn't need the full location url).
"""
return '{cat}/{name}'.format(cat=location.category, name=location.name)
Template = namedtuple("Template", "metadata data children")
class ResourceTemplates(object):
@classmethod
def templates(cls):
"""
Returns a list of Template objects that describe possible templates that can be used
to create a module of this type.
If no templates are provided, there will be no way to create a module of
this type
Expects a class attribute template_dir_name that defines the directory
inside the 'templates' resource directory to pull templates from
"""
templates = []
dirname = os.path.join('templates', cls.template_dir_name)
if not resource_isdir(__name__, dirname):
log.warning("No resource directory {dir} found when loading {cls_name} templates".format(
dir=dirname,
cls_name=cls.__name__,
))
return []
for template_file in resource_listdir(__name__, dirname):
if not template_file.endswith('.yaml'):
log.warning("Skipping unknown template file %s" % template_file)
continue
template_content = resource_string(__name__, os.path.join(dirname, template_file))
template = yaml.safe_load(template_content)
templates.append(Template(**template))
return templates
class XModuleDescriptor(XModuleFields, HTMLSnippet, ResourceTemplates, XBlock):
"""
An XModuleDescriptor is a specification for an element of a course. This
could be a problem, an organizational element (a group of content), or a
segment of video, for example.
XModuleDescriptors are independent and agnostic to the current student state
on a problem. They handle the editing interface used by instructors to
create a problem, and can generate XModules (which do know about student
state).
"""
entry_point = "xmodule.v1"
module_class = XModule
# Attributes for inspection of the descriptor
# This indicates whether the xmodule is a problem-type.
# It should respond to max_score() and grade(). It can be graded or ungraded
# (like a practice problem).
has_score = False
# A list of descriptor attributes that must be equal for the descriptors to
# be equal
equality_attributes = ('_model_data', 'location')
# Name of resource directory to load templates from
template_dir_name = "default"
# Class level variable
# True if this descriptor always requires recalculation of grades, for
# example if the score can change via an extrnal service, not just when the
# student interacts with the module on the page. A specific example is
# FoldIt, which posts grade-changing updates through a separate API.
always_recalculate_grades = False
# VS[compat]. Backwards compatibility code that can go away after
# importing 2012 courses.
# A set of metadata key conversions that we want to make
metadata_translations = {
'slug': 'url_name',
'name': 'display_name',
}
# ============================= STRUCTURAL MANIPULATION ===================
def __init__(self, *args, **kwargs):
"""
Construct a new XModuleDescriptor. The only required arguments are the
system, used for interaction with external resources, and the
definition, which specifies all the data needed to edit and display the
problem (but none of the associated metadata that handles recordkeeping
around the problem).
This allows for maximal flexibility to add to the interface while
preserving backwards compatibility.
runtime: A DescriptorSystem for interacting with external resources
model_data: A dictionary-like object that maps field names to values
for those fields.
XModuleDescriptor.__init__ takes the same arguments as xblock.core:XBlock.__init__
"""
super(XModuleDescriptor, self).__init__(*args, **kwargs)
self.system = self.runtime
self.url_name = self.location.name
self.category = self.location.category
self._child_instances = None
@property
def id(self):
return self.location.url()
@property
def display_name_with_default(self):
'''
Return a display name for the module: use display_name if defined in
metadata, otherwise convert the url name.
'''
name = self.display_name
if name is None:
name = self.url_name.replace('_', ' ')
return name
def get_required_module_descriptors(self):
"""Returns a list of XModuleDescritpor instances upon which this module depends, but are
not children of this module"""
return []
def get_children(self):
"""Returns a list of XModuleDescriptor instances for the children of
this module"""
if not self.has_children:
return []
if self._child_instances is None:
self._child_instances = []
for child_loc in self.children:
try:
child = self.system.load_item(child_loc)
except ItemNotFoundError:
log.exception('Unable to load item {loc}, skipping'.format(loc=child_loc))
continue
self._child_instances.append(child)
return self._child_instances
def get_child_by(self, selector):
"""
Return a child XModuleDescriptor with the specified url_name, if it exists, and None otherwise.
"""
for child in self.get_children():
if selector(child):
return child
return None
def xmodule(self, system):
"""
Returns an XModule.
system: Module system
"""
return self.module_class(
system,
self,
system.xblock_model_data(self),
)
def has_dynamic_children(self):
"""
Returns True if this descriptor has dynamic children for a given
student when the module is created.
Returns False if the children of this descriptor are the same
children that the module will return for any student.
"""
return False
# ================================= JSON PARSING ===========================
@staticmethod
def load_from_json(json_data, system, default_class=None):
"""
This method instantiates the correct subclass of XModuleDescriptor based
on the contents of json_data.
json_data must contain a 'location' element, and must be suitable to be
passed into the subclasses `from_json` method as model_data
"""
class_ = XModuleDescriptor.load_class(
json_data['location']['category'],
default_class
)
return class_.from_json(json_data, system)
@classmethod
def from_json(cls, json_data, system):
"""
Creates an instance of this descriptor from the supplied json_data.
This may be overridden by subclasses
json_data: A json object with the keys 'definition' and 'metadata',
definition: A json object with the keys 'data' and 'children'
data: A json value
children: A list of edX Location urls
metadata: A json object with any keys
This json_data is transformed to model_data using the following rules:
1) The model data contains all of the fields from metadata
2) The model data contains the 'children' array
3) If 'definition.data' is a json object, model data contains all of its fields
Otherwise, it contains the single field 'data'
4) Any value later in this list overrides a value earlier in this list
system: A DescriptorSystem for interacting with external resources
"""
model_data = {}
for key, value in json_data.get('metadata', {}).items():
model_data[cls._translate(key)] = value
model_data.update(json_data.get('metadata', {}))
definition = json_data.get('definition', {})
if 'children' in definition:
model_data['children'] = definition['children']
if 'data' in definition:
if isinstance(definition['data'], dict):
model_data.update(definition['data'])
else:
model_data['data'] = definition['data']
model_data['location'] = json_data['location']
return cls(system, model_data)
@classmethod
def _translate(cls, key):
'VS[compat]'
return cls.metadata_translations.get(key, key)
# ================================= XML PARSING ============================
@staticmethod
def load_from_xml(xml_data,
system,
org=None,
course=None,
default_class=None):
"""
This method instantiates the correct subclass of XModuleDescriptor based
on the contents of xml_data.
xml_data must be a string containing valid xml
system is an XMLParsingSystem
org and course are optional strings that will be used in the generated
module's url identifiers
"""
class_ = XModuleDescriptor.load_class(
etree.fromstring(xml_data).tag,
default_class
)
# leave next line, commented out - useful for low-level debugging
# log.debug('[XModuleDescriptor.load_from_xml] tag=%s, class_=%s' % (
# etree.fromstring(xml_data).tag,class_))
return class_.from_xml(xml_data, system, org, course)
@classmethod
def from_xml(cls, xml_data, system, org=None, course=None):
"""
Creates an instance of this descriptor from the supplied xml_data.
This may be overridden by subclasses
xml_data: A string of xml that will be translated into data and children
for this module
system is an XMLParsingSystem
org and course are optional strings that will be used in the generated
module's url identifiers
"""
raise NotImplementedError(
'Modules must implement from_xml to be parsable from xml')
def export_to_xml(self, resource_fs):
"""
Returns an xml string representing this module, and all modules
underneath it. May also write required resources out to resource_fs
Assumes that modules have single parentage (that no module appears twice
in the same course), and that it is thus safe to nest modules as xml
children as appropriate.
The returned XML should be able to be parsed back into an identical
XModuleDescriptor using the from_xml method with the same system, org,
and course
"""
raise NotImplementedError(
'Modules must implement export_to_xml to enable xml export')
# =============================== Testing ==================================
def get_sample_state(self):
"""
Return a list of tuples of instance_state, shared_state. Each tuple
defines a sample case for this module
"""
return [('{}', '{}')]
# =============================== BUILTIN METHODS ==========================
def __eq__(self, other):
eq = (self.__class__ == other.__class__ and
all(getattr(self, attr, None) == getattr(other, attr, None)
for attr in self.equality_attributes))
return eq
def __repr__(self):
return ("{class_}({system!r}, location={location!r},"
" model_data={model_data!r})".format(
class_=self.__class__.__name__,
system=self.system,
location=self.location,
model_data=self._model_data,
))
@property
def non_editable_metadata_fields(self):
"""
Return the list of fields that should not be editable in Studio.
When overriding, be sure to append to the superclasses' list.
"""
# We are not allowing editing of xblock tag and name fields at this time (for any component).
return [XBlock.tags, XBlock.name]
@property
def editable_metadata_fields(self):
"""
Returns the metadata fields to be edited in Studio. These are fields with scope `Scope.settings`.
Can be limited by extending `non_editable_metadata_fields`.
"""
inherited_metadata = getattr(self, '_inherited_metadata', {})
inheritable_metadata = getattr(self, '_inheritable_metadata', {})
metadata_fields = {}
for field in self.fields:
if field.scope != Scope.settings or field in self.non_editable_metadata_fields:
continue
inheritable = False
value = getattr(self, field.name)
default_value = field.default
explicitly_set = field.name in self._model_data
if field.name in inheritable_metadata:
inheritable = True
default_value = field.from_json(inheritable_metadata.get(field.name))
if field.name in inherited_metadata:
explicitly_set = False
# We support the following editors:
# 1. A select editor for fields with a list of possible values (includes Booleans).
# 2. Number editors for integers and floats.
# 3. A generic string editor for anything else (editing JSON representation of the value).
editor_type = "Generic"
values = [] if field.values is None else copy.deepcopy(field.values)
if isinstance(values, tuple):
values = list(values)
if isinstance(values, list):
if len(values) > 0:
editor_type = "Select"
for index, choice in enumerate(values):
json_choice = copy.deepcopy(choice)
if isinstance(json_choice, dict) and 'value' in json_choice:
json_choice['value'] = field.to_json(json_choice['value'])
else:
json_choice = field.to_json(json_choice)
values[index] = json_choice
elif isinstance(field, Integer):
editor_type = "Integer"
elif isinstance(field, Float):
editor_type = "Float"
metadata_fields[field.name] = {'field_name': field.name,
'type': editor_type,
'display_name': field.display_name,
'value': field.to_json(value),
'options': values,
'default_value': field.to_json(default_value),
'inheritable': inheritable,
'explicitly_set': explicitly_set,
'help': field.help}
return metadata_fields
class DescriptorSystem(object):
def __init__(self, load_item, resources_fs, error_tracker, **kwargs):
"""
load_item: Takes a Location and returns an XModuleDescriptor
resources_fs: A Filesystem object that contains all of the
resources needed for the course
error_tracker: A hook for tracking errors in loading the descriptor.
Used for example to get a list of all non-fatal problems on course
load, and display them to the user.
A function of (error_msg). errortracker.py provides a
handy make_error_tracker() function.
Patterns for using the error handler:
try:
x = access_some_resource()
check_some_format(x)
except SomeProblem as err:
msg = 'Grommet {0} is broken: {1}'.format(x, str(err))
log.warning(msg) # don't rely on tracker to log
# NOTE: we generally don't want content errors logged as errors
self.system.error_tracker(msg)
# work around
return 'Oops, couldn't load grommet'
OR, if not in an exception context:
if not check_something(thingy):
msg = "thingy {0} is broken".format(thingy)
log.critical(msg)
self.system.error_tracker(msg)
NOTE: To avoid duplication, do not call the tracker on errors
that you're about to re-raise---let the caller track them.
"""
self.load_item = load_item
self.resources_fs = resources_fs
self.error_tracker = error_tracker
class XMLParsingSystem(DescriptorSystem):
def __init__(self, load_item, resources_fs, error_tracker, process_xml, policy, **kwargs):
"""
load_item, resources_fs, error_tracker: see DescriptorSystem
policy: a policy dictionary for overriding xml metadata
process_xml: Takes an xml string, and returns a XModuleDescriptor
created from that xml
"""
DescriptorSystem.__init__(self, load_item, resources_fs, error_tracker,
**kwargs)
self.process_xml = process_xml
self.policy = policy
class ModuleSystem(object):
'''
This is an abstraction such that x_modules can function independent
of the courseware (e.g. import into other types of courseware, LMS,
or if we want to have a sandbox server for user-contributed content)
ModuleSystem objects are passed to x_modules to provide access to system
functionality.
Note that these functions can be closures over e.g. a django request
and user, or other environment-specific info.
'''
def __init__(self,
ajax_url,
track_function,
get_module,
render_template,
replace_urls,
xblock_model_data,
user=None,
filestore=None,
debug=False,
xqueue=None,
publish=None,
node_path="",
anonymous_student_id='',
course_id=None,
open_ended_grading_interface=None,
s3_interface=None,
cache=None,
can_execute_unsafe_code=None,
):
'''
Create a closure around the system environment.
ajax_url - the url where ajax calls to the encapsulating module go.
track_function - function of (event_type, event), intended for logging
or otherwise tracking the event.
TODO: Not used, and has inconsistent args in different
files. Update or remove.
get_module - function that takes a descriptor and returns a corresponding
module instance object. If the current user does not have
access to that location, returns None.
render_template - a function that takes (template_file, context), and
returns rendered html.
user - The user to base the random number generator seed off of for this
request
filestore - A filestore ojbect. Defaults to an instance of OSFS based
at settings.DATA_DIR.
xqueue - Dict containing XqueueInterface object, as well as parameters
for the specific StudentModule:
xqueue = {'interface': XQueueInterface object,
'callback_url': Callback into the LMS,
'queue_name': Target queuename in Xqueue}
replace_urls - TEMPORARY - A function like static_replace.replace_urls
that capa_module can use to fix up the static urls in
ajax results.
anonymous_student_id - Used for tracking modules with student id
course_id - the course_id containing this module
publish(event) - A function that allows XModules to publish events (such as grade changes)
xblock_model_data - A dict-like object containing the all data available to this
xblock
cache - A cache object with two methods:
.get(key) returns an object from the cache or None.
.set(key, value, timeout_secs=None) stores a value in the cache with a timeout.
can_execute_unsafe_code - A function returning a boolean, whether or
not to allow the execution of unsafe, unsandboxed code.
'''
self.ajax_url = ajax_url
self.xqueue = xqueue
self.track_function = track_function
self.filestore = filestore
self.get_module = get_module
self.render_template = render_template
self.DEBUG = self.debug = debug
self.seed = user.id if user is not None else 0
self.replace_urls = replace_urls
self.node_path = node_path
self.anonymous_student_id = anonymous_student_id
self.course_id = course_id
self.user_is_staff = user is not None and user.is_staff
self.xblock_model_data = xblock_model_data
if publish is None:
publish = lambda e: None
self.publish = publish
self.open_ended_grading_interface = open_ended_grading_interface
self.s3_interface = s3_interface
self.cache = cache or DoNothingCache()
self.can_execute_unsafe_code = can_execute_unsafe_code or (lambda: False)
def get(self, attr):
''' provide uniform access to attributes (like etree).'''
return self.__dict__.get(attr)
def set(self, attr, val):
'''provide uniform access to attributes (like etree)'''
self.__dict__[attr] = val
def __repr__(self):
return repr(self.__dict__)
def __str__(self):
return str(self.__dict__)
class DoNothingCache(object):
"""A duck-compatible object to use in ModuleSystem when there's no cache."""
def get(self, _key):
return None
def set(self, key, value, timeout=None):
pass
| agpl-3.0 | -8,704,171,430,949,502,000 | 35.243959 | 120 | 0.596171 | false |
akosyakov/intellij-community | plugins/hg4idea/testData/bin/mercurial/osutil.py | 90 | 5363 | # osutil.py - pure Python version of osutil.c
#
# Copyright 2009 Matt Mackall <[email protected]> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os
import stat as statmod
def _mode_to_kind(mode):
if statmod.S_ISREG(mode):
return statmod.S_IFREG
if statmod.S_ISDIR(mode):
return statmod.S_IFDIR
if statmod.S_ISLNK(mode):
return statmod.S_IFLNK
if statmod.S_ISBLK(mode):
return statmod.S_IFBLK
if statmod.S_ISCHR(mode):
return statmod.S_IFCHR
if statmod.S_ISFIFO(mode):
return statmod.S_IFIFO
if statmod.S_ISSOCK(mode):
return statmod.S_IFSOCK
return mode
def listdir(path, stat=False, skip=None):
'''listdir(path, stat=False) -> list_of_tuples
Return a sorted list containing information about the entries
in the directory.
If stat is True, each element is a 3-tuple:
(name, type, stat object)
Otherwise, each element is a 2-tuple:
(name, type)
'''
result = []
prefix = path
if not prefix.endswith(os.sep):
prefix += os.sep
names = os.listdir(path)
names.sort()
for fn in names:
st = os.lstat(prefix + fn)
if fn == skip and statmod.S_ISDIR(st.st_mode):
return []
if stat:
result.append((fn, _mode_to_kind(st.st_mode), st))
else:
result.append((fn, _mode_to_kind(st.st_mode)))
return result
if os.name != 'nt':
posixfile = open
else:
import ctypes, msvcrt
_kernel32 = ctypes.windll.kernel32
_DWORD = ctypes.c_ulong
_LPCSTR = _LPSTR = ctypes.c_char_p
_HANDLE = ctypes.c_void_p
_INVALID_HANDLE_VALUE = _HANDLE(-1).value
# CreateFile
_FILE_SHARE_READ = 0x00000001
_FILE_SHARE_WRITE = 0x00000002
_FILE_SHARE_DELETE = 0x00000004
_CREATE_ALWAYS = 2
_OPEN_EXISTING = 3
_OPEN_ALWAYS = 4
_GENERIC_READ = 0x80000000
_GENERIC_WRITE = 0x40000000
_FILE_ATTRIBUTE_NORMAL = 0x80
# open_osfhandle flags
_O_RDONLY = 0x0000
_O_RDWR = 0x0002
_O_APPEND = 0x0008
_O_TEXT = 0x4000
_O_BINARY = 0x8000
# types of parameters of C functions used (required by pypy)
_kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
_DWORD, _DWORD, _HANDLE]
_kernel32.CreateFileA.restype = _HANDLE
def _raiseioerror(name):
err = ctypes.WinError()
raise IOError(err.errno, '%s: %s' % (name, err.strerror))
class posixfile(object):
'''a file object aiming for POSIX-like semantics
CPython's open() returns a file that was opened *without* setting the
_FILE_SHARE_DELETE flag, which causes rename and unlink to abort.
This even happens if any hardlinked copy of the file is in open state.
We set _FILE_SHARE_DELETE here, so files opened with posixfile can be
renamed and deleted while they are held open.
Note that if a file opened with posixfile is unlinked, the file
remains but cannot be opened again or be recreated under the same name,
until all reading processes have closed the file.'''
def __init__(self, name, mode='r', bufsize=-1):
if 'b' in mode:
flags = _O_BINARY
else:
flags = _O_TEXT
m0 = mode[0]
if m0 == 'r' and '+' not in mode:
flags |= _O_RDONLY
access = _GENERIC_READ
else:
# work around http://support.microsoft.com/kb/899149 and
# set _O_RDWR for 'w' and 'a', even if mode has no '+'
flags |= _O_RDWR
access = _GENERIC_READ | _GENERIC_WRITE
if m0 == 'r':
creation = _OPEN_EXISTING
elif m0 == 'w':
creation = _CREATE_ALWAYS
elif m0 == 'a':
creation = _OPEN_ALWAYS
flags |= _O_APPEND
else:
raise ValueError("invalid mode: %s" % mode)
fh = _kernel32.CreateFileA(name, access,
_FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
None, creation, _FILE_ATTRIBUTE_NORMAL, None)
if fh == _INVALID_HANDLE_VALUE:
_raiseioerror(name)
fd = msvcrt.open_osfhandle(fh, flags)
if fd == -1:
_kernel32.CloseHandle(fh)
_raiseioerror(name)
f = os.fdopen(fd, mode, bufsize)
# unfortunately, f.name is '<fdopen>' at this point -- so we store
# the name on this wrapper. We cannot just assign to f.name,
# because that attribute is read-only.
object.__setattr__(self, 'name', name)
object.__setattr__(self, '_file', f)
def __iter__(self):
return self._file
def __getattr__(self, name):
return getattr(self._file, name)
def __setattr__(self, name, value):
'''mimics the read-only attributes of Python file objects
by raising 'TypeError: readonly attribute' if someone tries:
f = posixfile('foo.txt')
f.name = 'bla' '''
return self._file.__setattr__(name, value)
| apache-2.0 | 3,775,535,050,962,136,600 | 30.547059 | 79 | 0.573187 | false |
nathanielvarona/airflow | tests/always/test_example_dags.py | 3 | 2575 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
from glob import glob
from airflow.models import DagBag
from tests.test_utils.asserts import assert_queries_count
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
NO_DB_QUERY_EXCEPTION = ["/airflow/example_dags/example_subdag_operator.py"]
class TestExampleDags(unittest.TestCase):
def test_should_be_importable(self):
example_dags = list(glob(f"{ROOT_FOLDER}/airflow/**/example_dags/example_*.py", recursive=True))
assert 0 != len(example_dags)
for filepath in example_dags:
relative_filepath = os.path.relpath(filepath, ROOT_FOLDER)
with self.subTest(f"File {relative_filepath} should contain dags"):
dagbag = DagBag(
dag_folder=filepath,
include_examples=False,
)
assert 0 == len(dagbag.import_errors), f"import_errors={str(dagbag.import_errors)}"
assert len(dagbag.dag_ids) >= 1
def test_should_not_do_database_queries(self):
example_dags = glob(f"{ROOT_FOLDER}/airflow/**/example_dags/example_*.py", recursive=True)
example_dags = [
dag_file
for dag_file in example_dags
if any(not dag_file.endswith(e) for e in NO_DB_QUERY_EXCEPTION)
]
assert 0 != len(example_dags)
for filepath in example_dags:
relative_filepath = os.path.relpath(filepath, ROOT_FOLDER)
with self.subTest(f"File {relative_filepath} shouldn't do database queries"):
with assert_queries_count(0):
DagBag(
dag_folder=filepath,
include_examples=False,
)
| apache-2.0 | 7,480,050,119,579,135,000 | 41.213115 | 104 | 0.655922 | false |
teamfx/openjfx-9-dev-rt | modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/tool/commands/perfalizer.py | 1 | 8619 | # Copyright (c) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.tool.bot.irc_command import IRCCommand
from webkitpy.tool.bot.irc_command import Help
from webkitpy.tool.bot.irc_command import Hi
from webkitpy.tool.bot.irc_command import Restart
from webkitpy.tool.bot.ircbot import IRCBot
from webkitpy.tool.bot.patchanalysistask import PatchAnalysisTask, PatchAnalysisTaskDelegate, UnableToApplyPatch
from webkitpy.tool.bot.sheriff import Sheriff
from webkitpy.tool.commands.queues import AbstractQueue
from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler
_log = logging.getLogger(__name__)
class PerfalizerTask(PatchAnalysisTask):
def __init__(self, tool, patch, logger):
PatchAnalysisTask.__init__(self, self, patch)
self._port = tool.port_factory.get()
self._tool = tool
self._logger = logger
def _copy_build_product_without_patch(self):
filesystem = self._tool.filesystem
configuration = filesystem.basename(self._port._build_path())
self._build_directory = filesystem.dirname(self._port._build_path())
self._build_directory_without_patch = self._build_directory + 'WithoutPatch'
try:
filesystem.rmtree(self._build_directory_without_patch)
filesystem.copytree(filesystem.join(self._build_directory, configuration),
filesystem.join(self._build_directory_without_patch, configuration))
return True
except:
return False
def run(self):
if not self._patch.committer() and not self._patch.attacher().can_commit:
self._logger('The patch %d is not authorized by a commmitter' % self._patch.id())
return False
self._logger('Preparing to run performance tests for the attachment %d...' % self._patch.id())
if not self._clean() or not self._update():
return False
head_revision = self._tool.scm().head_svn_revision()
self._logger('Building WebKit at r%s without the patch' % head_revision)
if not self._build_without_patch():
return False
if not self._port.check_build(needs_http=False):
self._logger('Failed to build DumpRenderTree.')
return False
if not self._copy_build_product_without_patch():
self._logger('Failed to copy the build product from %s to %s' % (self._build_directory, self._build_directory_without_patch))
return False
self._logger('Building WebKit at r%s with the patch' % head_revision)
if not self._apply() or not self._build():
return False
if not self._port.check_build(needs_http=False):
self._logger('Failed to build DumpRenderTree.')
return False
filesystem = self._tool.filesystem
if filesystem.exists(self._json_path()):
filesystem.remove(self._json_path())
self._logger("Running performance tests...")
if self._run_perf_test(self._build_directory_without_patch, 'without %d' % self._patch.id()) < 0:
self._logger('Failed to run performance tests without the patch.')
return False
if self._run_perf_test(self._build_directory, 'with %d' % self._patch.id()) < 0:
self._logger('Failed to run performance tests with the patch.')
return False
if not filesystem.exists(self._results_page_path()):
self._logger('Failed to generate the results page.')
return False
results_page = filesystem.read_text_file(self._results_page_path())
self._tool.bugs.add_attachment_to_bug(self._patch.bug_id(), results_page,
description="Performance tests results for %d" % self._patch.id(), mimetype='text/html')
self._logger("Uploaded the results on the bug %d" % self._patch.bug_id())
return True
def parent_command(self):
return "perfalizer"
def run_webkit_patch(self, args):
webkit_patch_args = [self._tool.path()]
webkit_patch_args.extend(args)
return self._tool.executive.run_and_throw_if_fail(webkit_patch_args, cwd=self._tool.scm().checkout_root)
def _json_path(self):
return self._tool.filesystem.join(self._build_directory, 'PerformanceTestResults.json')
def _results_page_path(self):
return self._tool.filesystem.join(self._build_directory, 'PerformanceTestResults.html')
def _run_perf_test(self, build_path, description):
filesystem = self._tool.filesystem
script_path = filesystem.join(filesystem.dirname(self._tool.path()), 'run-perf-tests')
perf_test_runner_args = [script_path, '--no-build', '--no-show-results', '--build-directory', build_path,
'--output-json-path', self._json_path(), '--description', description]
return self._tool.executive.run_and_throw_if_fail(perf_test_runner_args, cwd=self._tool.scm().checkout_root)
def run_command(self, command):
self.run_webkit_patch(command)
def command_passed(self, message, patch):
pass
def command_failed(self, message, script_error, patch):
self._logger(message)
def refetch_patch(self, patch):
return self._tool.bugs.fetch_attachment(patch.id())
def build_style(self):
return "release"
class PerfTest(IRCCommand):
def execute(self, nick, args, tool, sheriff):
if not args:
tool.irc().post(nick + ": Please specify an attachment/patch id")
return
patch_id = args[0]
patch = tool.bugs.fetch_attachment(patch_id)
if not patch:
tool.irc().post(nick + ": Could not fetch the patch")
return
task = PerfalizerTask(tool, patch, lambda message: tool.irc().post('%s: %s' % (nick, message)))
task.run()
class Perfalizer(AbstractQueue, StepSequenceErrorHandler):
name = "perfalizer"
watchers = AbstractQueue.watchers + ["[email protected]"]
_commands = {
"help": Help,
"hi": Hi,
"restart": Restart,
"test": PerfTest,
}
# AbstractQueue methods
def begin_work_queue(self):
AbstractQueue.begin_work_queue(self)
self._sheriff = Sheriff(self._tool, self)
self._irc_bot = IRCBot("perfalizer", self._tool, self._sheriff, self._commands)
self._tool.ensure_irc_connected(self._irc_bot.irc_delegate())
def work_item_log_path(self, failure_map):
return None
def _is_old_failure(self, revision):
return self._tool.status_server.svn_revision(revision)
def next_work_item(self):
self._irc_bot.process_pending_messages()
return
def process_work_item(self, failure_map):
return True
def handle_unexpected_error(self, failure_map, message):
_log.error(message)
# StepSequenceErrorHandler methods
@classmethod
def handle_script_error(cls, tool, state, script_error):
# Ideally we would post some information to IRC about what went wrong
# here, but we don't have the IRC password in the child process.
pass
| gpl-2.0 | -1,788,775,883,608,845,300 | 39.275701 | 137 | 0.670611 | false |
le9i0nx/ansible | lib/ansible/utils/module_docs_fragments/k8s_name_options.py | 80 | 2450 | #
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Options for selecting or identifying a specific K8s object
class ModuleDocFragment(object):
DOCUMENTATION = '''
options:
api_version:
description:
- Use to specify the API version. Use to create, delete, or discover an object without providing a full
resource definition. Use in conjunction with I(kind), I(name), and I(namespace) to identify a
specific object. If I(resource definition) is provided, the I(apiVersion) from the I(resource_definition)
will override this option.
default: v1
aliases:
- api
- version
kind:
description:
- Use to specify an object model. Use to create, delete, or discover an object without providing a full
resource definition. Use in conjunction with I(api_version), I(name), and I(namespace) to identify a
specific object. If I(resource definition) is provided, the I(kind) from the I(resource_definition)
will override this option.
name:
description:
- Use to specify an object name. Use to create, delete, or discover an object without providing a full
resource definition. Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a
specific object. If I(resource definition) is provided, the I(metadata.name) value from the
I(resource_definition) will override this option.
namespace:
description:
- Use to specify an object namespace. Useful when creating, deleting, or discovering an object without
providing a full resource definition. Use in conjunction with I(api_version), I(kind), and I(name)
to identify a specfic object. If I(resource definition) is provided, the I(metadata.namespace) value
from the I(resource_definition) will override this option.
'''
| gpl-3.0 | -4,009,714,034,053,895,700 | 44.37037 | 111 | 0.733878 | false |
akhileshpillai/treeherder | tests/webapp/api/test_resultset_api.py | 2 | 12700 | import copy
from django.core.urlresolvers import reverse
from rest_framework.test import APIClient
from tests import test_utils
from treeherder.client import TreeherderResultSetCollection
from treeherder.model.models import (FailureClassification,
Job,
JobNote)
from treeherder.webapp.api import utils
def test_resultset_list(webapp, eleven_jobs_stored, jm, test_project):
"""
test retrieving a list of ten json blobs from the jobs-list
endpoint. ``full`` set to false, so it doesn't return revisions.
"""
resp = webapp.get(
reverse("resultset-list", kwargs={"project": jm.project}))
results = resp.json['results']
meta = resp.json['meta']
assert resp.status_int == 200
assert isinstance(results, list)
assert len(results) == 10
exp_keys = set([
u'id',
u'repository_id',
u'author',
u'comments',
u'revision_hash',
u'revision',
u'revisions',
u'revision_count',
u'revisions_uri',
u'push_timestamp',
])
for rs in results:
assert set(rs.keys()) == exp_keys
assert(meta == {
u'count': 10,
u'filter_params': {},
u'repository':
test_project
})
def test_resultset_list_bad_project(webapp, jm):
"""
test retrieving a list of ten json blobs from the jobs-list
endpoint.
"""
resp = webapp.get(
reverse("resultset-list", kwargs={"project": "foo"}),
expect_errors=True
)
assert resp.status_int == 404
assert resp.json == {"detail": "No project with name foo"}
def test_resultset_list_empty_rs_still_show(webapp, test_repository,
sample_resultset, jm):
"""
test retrieving a resultset list, when the resultset has no jobs.
should show.
"""
jm.store_result_set_data(sample_resultset)
resp = webapp.get(
reverse("resultset-list", kwargs={"project": jm.project}),
)
assert resp.status_int == 200
assert len(resp.json['results']) == 10
def test_resultset_list_single_short_revision(webapp, eleven_jobs_stored, jm, test_project):
"""
test retrieving a resultset list, filtered by single short revision
"""
resp = webapp.get(
reverse("resultset-list", kwargs={"project": jm.project}),
{"revision": "45f8637cb9f7"}
)
assert resp.status_int == 200
results = resp.json['results']
meta = resp.json['meta']
assert len(results) == 1
assert set([rs["revision"] for rs in results]) == {"45f8637cb9f78f19cb8463ff174e81756805d8cf"}
assert(meta == {
u'count': 1,
u'revision': u'45f8637cb9f7',
u'filter_params': {
u'revisions_short_revision': "45f8637cb9f7"
},
u'repository': test_project}
)
def test_resultset_list_single_long_revision(webapp, eleven_jobs_stored, jm, test_project):
"""
test retrieving a resultset list, filtered by a single long revision
"""
resp = webapp.get(
reverse("resultset-list", kwargs={"project": jm.project}),
{"revision": "45f8637cb9f78f19cb8463ff174e81756805d8cf"}
)
assert resp.status_int == 200
results = resp.json['results']
meta = resp.json['meta']
assert len(results) == 1
assert set([rs["revision"] for rs in results]) == {"45f8637cb9f78f19cb8463ff174e81756805d8cf"}
assert(meta == {
u'count': 1,
u'revision': u'45f8637cb9f78f19cb8463ff174e81756805d8cf',
u'filter_params': {
u'revisions_long_revision': u'45f8637cb9f78f19cb8463ff174e81756805d8cf'
},
u'repository': test_project}
)
def test_resultset_list_single_long_revision_stored_long(webapp, test_repository,
sample_resultset, jm,
test_project):
"""
test retrieving a resultset list with store long revision, filtered by a single long revision
"""
# store a resultset with long revision
resultset = copy.deepcopy(sample_resultset[0])
resultset["revisions"][0]["revision"] = "21fb3eed1b5f3456789012345678901234567890"
jm.store_result_set_data([resultset])
resp = webapp.get(
reverse("resultset-list", kwargs={"project": jm.project}),
{"revision": "21fb3eed1b5f3456789012345678901234567890"}
)
assert resp.status_int == 200
results = resp.json['results']
meta = resp.json['meta']
assert len(results) == 1
assert set([rs["revision"] for rs in results]) == {"21fb3eed1b5f3456789012345678901234567890"}
assert(meta == {
u'count': 1,
u'revision': u'21fb3eed1b5f3456789012345678901234567890',
u'filter_params': {
u'revisions_long_revision': u'21fb3eed1b5f3456789012345678901234567890'
},
u'repository': test_project}
)
def test_resultset_list_filter_by_revision(webapp, eleven_jobs_stored, jm, test_project):
"""
test retrieving a resultset list, filtered by a revision range
"""
resp = webapp.get(
reverse("resultset-list", kwargs={"project": jm.project}),
{"fromchange": "130965d3df6c", "tochange": "f361dcb60bbe"}
)
assert resp.status_int == 200
results = resp.json['results']
meta = resp.json['meta']
assert len(results) == 4
assert set([rs["revision"] for rs in results]) == {
u'130965d3df6c9a1093b4725f3b877eaef80d72bc',
u'7f417c3505e3d2599ac9540f02e3dbee307a3963',
u'a69390334818373e2d7e6e9c8d626a328ed37d47',
u'f361dcb60bbedaa01257fbca211452972f7a74b2'
}
assert(meta == {
u'count': 4,
u'fromchange': u'130965d3df6c',
u'filter_params': {
u'push_timestamp__gte': 1384363842,
u'push_timestamp__lte': 1384365942
},
u'repository': test_project,
u'tochange': u'f361dcb60bbe'}
)
def test_resultset_list_filter_by_date(webapp, test_repository,
sample_resultset, jm, test_project):
"""
test retrieving a resultset list, filtered by a date range
"""
sample_resultset[3]["push_timestamp"] = utils.to_timestamp("2013-08-09")
sample_resultset[4]["push_timestamp"] = utils.to_timestamp("2013-08-10")
sample_resultset[5]["push_timestamp"] = utils.to_timestamp("2013-08-11")
sample_resultset[6]["push_timestamp"] = utils.to_timestamp("2013-08-12")
sample_resultset[7]["push_timestamp"] = utils.to_timestamp("2013-08-13")
jm.store_result_set_data(sample_resultset)
resp = webapp.get(
reverse("resultset-list", kwargs={"project": jm.project}),
{"startdate": "2013-08-10", "enddate": "2013-08-13"}
)
assert resp.status_int == 200
results = resp.json['results']
meta = resp.json['meta']
assert len(results) == 4
assert set([rs["revision"] for rs in results]) == {
u'ce17cad5d554cfffddee13d1d8421ae9ec5aad82',
u'7f417c3505e3d2599ac9540f02e3dbee307a3963',
u'a69390334818373e2d7e6e9c8d626a328ed37d47',
u'f361dcb60bbedaa01257fbca211452972f7a74b2'
}
assert(meta == {
u'count': 4,
u'enddate': u'2013-08-13',
u'filter_params': {
u'push_timestamp__gte': 1376118000.0,
u'push_timestamp__lt': 1376463600.0
},
u'repository': test_project,
u'startdate': u'2013-08-10'}
)
def test_resultset_list_without_jobs(webapp, test_repository,
sample_resultset, jm, test_project):
"""
test retrieving a resultset list without jobs
"""
jm.store_result_set_data(sample_resultset)
resp = webapp.get(
reverse("resultset-list", kwargs={"project": jm.project})
)
assert resp.status_int == 200
results = resp.json['results']
assert len(results) == 10
assert all([('platforms' not in result) for result in results])
meta = resp.json['meta']
assert meta == {
u'count': len(results),
u'filter_params': {},
u'repository': test_project
}
def test_resultset_detail(webapp, eleven_jobs_stored, jm):
"""
test retrieving a resultset from the resultset-detail
endpoint.
"""
rs_list = jm.get_result_set_list(0, 10)
rs = rs_list[0]
resp = webapp.get(
reverse("resultset-detail",
kwargs={"project": jm.project, "pk": int(rs["id"])})
)
assert resp.status_int == 200
assert isinstance(resp.json, dict)
assert resp.json["id"] == rs["id"]
def test_result_set_detail_not_found(webapp, jm):
"""
test retrieving a HTTP 404 from the resultset-detail
endpoint.
"""
resp = webapp.get(
reverse("resultset-detail",
kwargs={"project": jm.project, "pk": -32767}),
expect_errors=True
)
assert resp.status_int == 404
def test_result_set_detail_bad_project(webapp, jm):
"""
test retrieving a HTTP 404 from the resultset-detail
endpoint.
"""
resp = webapp.get(
reverse("resultset-detail",
kwargs={"project": "foo", "pk": -32767}),
expect_errors=True
)
assert resp.status_int == 404
assert resp.json == {"detail": "No project with name foo"}
def test_resultset_create(jm, test_repository, sample_resultset,
mock_post_json):
"""
test posting data to the resultset endpoint via webtest.
extected result are:
- return code 200
- return message successful
- 1 resultset stored in the jobs schema
"""
# store the first two, so we submit all, but should properly not re-
# add the others.
jm.store_result_set_data(sample_resultset[:2])
trsc = TreeherderResultSetCollection()
exp_revision_hashes = set()
for rs in sample_resultset:
rs.update({'author': 'John Doe'})
result_set = trsc.get_resultset(rs)
trsc.add(result_set)
exp_revision_hashes.add(rs["revision"])
resp = test_utils.post_collection(jm.project, trsc)
act_revision_hashes = {x["long_revision"] for x in resp.json["resultsets"]}
assert exp_revision_hashes == act_revision_hashes
stored_objs = jm.get_dhub().execute(
proc="jobs_test.selects.resultset_by_long_revision",
placeholders=[sample_resultset[0]['revision']]
)
assert len(stored_objs) == 1
assert stored_objs[0]['long_revision'] == sample_resultset[0]['revision']
def test_resultset_cancel_all(jm, resultset_with_three_jobs,
pulse_action_consumer, test_user):
"""
Issue cancellation of a resultset with three unfinished jobs.
"""
client = APIClient()
client.force_authenticate(user=test_user)
# Ensure all jobs are pending..
jobs = jm.get_job_list(0, 3)
for job in jobs:
assert job['state'] == 'pending'
url = reverse("resultset-cancel-all",
kwargs={"project": jm.project, "pk": resultset_with_three_jobs})
client.post(url)
# Ensure all jobs are cancelled..
jobs = jm.get_job_list(0, 3)
for job in jobs:
assert job['state'] == 'completed'
assert job['result'] == 'usercancel'
for _ in range(0, 3):
message = pulse_action_consumer.get(block=True, timeout=2)
content = message.payload
assert content['action'] == 'cancel'
assert content['project'] == jm.project
def test_resultset_status(jm, webapp, eleven_jobs_stored, test_user):
"""
test retrieving the status of a resultset
"""
# create a failure classification corresponding to "not successful"
failure_classification = FailureClassification.objects.create(
id=2, name="fixed by commit")
rs_list = jm.get_result_set_list(0, 10)
rs = rs_list[0]
resp = webapp.get(
reverse("resultset-status",
kwargs={"project": jm.project, "pk": int(rs["id"])})
)
assert resp.status_int == 200
assert isinstance(resp.json, dict)
assert resp.json == {'success': 1}
# the first ten resultsets have one job each, so resultset.id == job.id
JobNote.objects.create(job=Job.objects.get(project_specific_id=rs["id"]),
failure_classification=failure_classification,
user=test_user,
text='A random note')
resp = webapp.get(
reverse("resultset-status",
kwargs={"project": jm.project, "pk": int(rs["id"])})
)
assert resp.status_int == 200
assert isinstance(resp.json, dict)
assert resp.json == {}
| mpl-2.0 | -5,041,567,084,023,381,000 | 30.829574 | 98 | 0.61063 | false |
mescobal/geined | consolidado.py | 1 | 3865 | #!/usr/bin/env python
#! -*- coding: utf-8 -*-
"""Listado y creación del consolidado anual"""
import cgi
import cgitb; cgitb.enable()
import funciones
import datos
import pagina
import htm
import csv
import StringIO
def listado():
"""Listado de consolidado"""
pag = pagina.Pagina("Consolidado anual", 4)
print(htm.encabezado("Consolidado anual", "Administración financiera",
"geined.py?accion=financiero"))
print("<table class='tabla_barra'><tr><td>")
print(htm.button("Exportar a Planilla", "consolidado.py?accion=exportar"))
print("</td><td>")
htm.formulario("calccon.py")
print("Seleccione año:")
print(htm.hidden("accion", "calcular"))
htm.seleccionar_ano()
print('<input type="submit" value="Recalcular" />')
htm.fin_formulario()
print("</td>")
print("</tr></table>")
htm.encabezado_tabla(["Rubro", "Nombre", "Enero", "Febrero", "Marzo",
"Abril", "Mayo", "Junio", "Julio", "Agosto", "Setiembre", "Octubre",
"Noviembre", "Diciembre", "Total"])
consolidado = datos.Tabla("consolidado")
cuentas = datos.Tabla("cuentas")
consolidado.orden = "rubro"
consolidado.filtrar()
meses = ["enero", "febrero", "marzo", "abril", "mayo", "junio", "julio",
"agosto", "setiembre", "octubre", "noviembre", "diciembre"]
for fila in consolidado.resultado:
print("<tr class='fila_datos'>")
print(htm.td(fila['rubro']))
cuentas.buscar("rubro", fila["rubro"])
print(htm.td(cuentas.registro["nombre"]))
total = 0
for item in meses:
print(htm.td(funciones.moneda(fila[item]), "right"))
total = total + fila[item]
print(htm.td(funciones.moneda(total), "right"))
print('</tr>')
htm.fin_tabla()
pag.fin()
def exportar():
"""Exportar consolidado en formato CSV"""
salida = StringIO.StringIO()
expcsv = csv.DictWriter(salida, ["Rubro", "Nombre", "Enero", "Febrero",
"Marzo", "Abril", "Mayo", "Junio",
"Julio",
"Agosto", "Setiembre", "Octubre",
"Noviembre", "Diciembre", "Total"])
consolidado = datos.Tabla("consolidado")
consolidado.orden = "rubro"
consolidado.filtrar()
filename = "consolidado.csv"
meses = ["enero", "febrero", "marzo", "abril", "mayo", "junio", "julio",
"agosto", "setiembre", "octubre", "noviembre", "diciembre"]
expcsv.writeheader()
for fila in consolidado.resultado:
# Recuperar variables */
total = 0
for item in meses:
total = total + fila[item]
expcsv.writerow({"Rubro":str(fila["rubro"]), "Nombre":fila["nombre"],
"Enero":fila["enero"], "Febrero":fila["febrero"],
"Marzo":fila["marzo"], "Abril":fila["abril"], "Mayo":fila["mayo"],
"Junio":fila["junio"], "Julio":fila["julio"],
"Agosto":fila["agosto"],
"Setiembre":fila["setiembre"], "Octubre":fila["octubre"],
"Noviembre":fila["noviembre"], "Diciembre":fila["diciembre"],
"Total":total})
headers = '\r\n'.join([
"Content-type: %s;",
"Content-Disposition: attachment; filename=%s",
"Content-Title: %s",
"Content-Length: %i",
"\r\n", # empty line to end headers
])
length = len(salida.getvalue())
print(headers % ('text/csv', filename, filename, length))
print(salida.getvalue())
salida.close()
listado()
def main():
"""Principal"""
form = cgi.FieldStorage()
accion = form.getvalue("accion", "listado")
if accion == "listado":
listado()
elif accion == "exportar":
exportar()
if __name__ == "__main__":
main()
| gpl-3.0 | -1,405,250,731,700,855,600 | 37.62 | 78 | 0.559037 | false |
hvnsweeting/Diamond | src/collectors/nagios/test/testnagios.py | 35 | 2557 | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from nagios import NagiosStatsCollector
################################################################################
class TestNagiosStatsCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NagiosStatsCollector', {
'interval': 10,
'bin': 'true',
'use_sudo': False
})
self.collector = NagiosStatsCollector(config, None)
def test_import(self):
self.assertTrue(NagiosStatsCollector)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_communicate = patch(
'subprocess.Popen.communicate',
Mock(return_value=(
self.getFixture('nagiostat').getvalue(),
'')))
patch_communicate.start()
self.collector.collect()
patch_communicate.stop()
metrics = {
'AVGACTHSTLAT': 196,
'AVGACTSVCLAT': 242,
'AVGACTHSTEXT': 4037,
'AVGACTSVCEXT': 340,
'NUMHSTUP': 63,
'NUMHSTDOWN': 0,
'NUMHSTUNR': 0,
'NUMSVCOK': 1409,
'NUMSVCWARN': 3,
'NUMSVCUNKN': 0,
'NUMSVCCRIT': 7,
'NUMHSTACTCHK5M': 56,
'NUMHSTPSVCHK5M': 0,
'NUMSVCACTCHK5M': 541,
'NUMSVCPSVCHK5M': 0,
'NUMACTHSTCHECKS5M': 56,
'NUMOACTHSTCHECKS5M': 1,
'NUMCACHEDHSTCHECKS5M': 1,
'NUMSACTHSTCHECKS5M': 55,
'NUMPARHSTCHECKS5M': 55,
'NUMSERHSTCHECKS5M': 0,
'NUMPSVHSTCHECKS5M': 0,
'NUMACTSVCCHECKS5M': 1101,
'NUMOACTSVCCHECKS5M': 0,
'NUMCACHEDSVCCHECKS5M': 0,
'NUMSACTSVCCHECKS5M': 1101,
'NUMPSVSVCCHECKS5M': 0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
| mit | -597,908,211,141,537,700 | 30.9625 | 80 | 0.507626 | false |
NewpTone/stacklab-nova | debian/tmp/usr/lib/python2.7/dist-packages/nova/api/openstack/__init__.py | 6 | 6628 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for OpenStack API controllers.
"""
import routes
import webob.dec
import webob.exc
from nova.api.openstack import wsgi
from nova.openstack.common import log as logging
from nova import utils
from nova import wsgi as base_wsgi
LOG = logging.getLogger(__name__)
class FaultWrapper(base_wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
_status_to_type = {}
@staticmethod
def status_to_type(status):
if not FaultWrapper._status_to_type:
for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError):
FaultWrapper._status_to_type[clazz.code] = clazz
return FaultWrapper._status_to_type.get(
status, webob.exc.HTTPInternalServerError)()
def _error(self, inner, req):
LOG.exception(_("Caught error: %s"), unicode(inner))
safe = getattr(inner, 'safe', False)
headers = getattr(inner, 'headers', None)
status = getattr(inner, 'code', 500)
if status is None:
status = 500
msg_dict = dict(url=req.url, status=status)
LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict)
outer = self.status_to_type(status)
if headers:
outer.headers = headers
# NOTE(johannes): We leave the explanation empty here on
# purpose. It could possibly have sensitive information
# that should not be returned back to the user. See
# bugs 868360 and 874472
# NOTE(eglynn): However, it would be over-conservative and
# inconsistent with the EC2 API to hide every exception,
# including those that are safe to expose, see bug 1021373
if safe:
outer.explanation = '%s: %s' % (inner.__class__.__name__,
unicode(inner))
return wsgi.Fault(outer)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
return self._error(ex, req)
class APIMapper(routes.Mapper):
def routematch(self, url=None, environ=None):
if url == "":
result = self._match("", environ)
return result[0], result[1]
return routes.Mapper.routematch(self, url, environ)
class ProjectMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
if not ('parent_resource' in kwargs):
kwargs['path_prefix'] = '{project_id}/'
else:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
p_member)
routes.Mapper.resource(self, member_name,
collection_name,
**kwargs)
class APIRouter(base_wsgi.Router):
"""
Routes requests on the OpenStack API to the appropriate controller
and method.
"""
ExtensionManager = None # override in subclasses
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory, :class:`nova.wsgi.Router` doesn't have one"""
return cls()
def __init__(self, ext_mgr=None):
if ext_mgr is None:
if self.ExtensionManager:
ext_mgr = self.ExtensionManager()
else:
raise Exception(_("Must specify an ExtensionManager class"))
mapper = ProjectMapper()
self.resources = {}
self._setup_routes(mapper, ext_mgr)
self._setup_ext_routes(mapper, ext_mgr)
self._setup_extensions(ext_mgr)
super(APIRouter, self).__init__(mapper)
def _setup_ext_routes(self, mapper, ext_mgr):
for resource in ext_mgr.get_resources():
LOG.debug(_('Extended resource: %s'),
resource.collection)
inherits = None
if resource.inherits:
inherits = self.resources.get(resource.inherits)
if not resource.controller:
resource.controller = inherits.controller
wsgi_resource = wsgi.Resource(resource.controller,
inherits=inherits)
self.resources[resource.collection] = wsgi_resource
kargs = dict(
controller=wsgi_resource,
collection=resource.collection_actions,
member=resource.member_actions)
if resource.parent:
kargs['parent_resource'] = resource.parent
mapper.resource(resource.collection, resource.collection, **kargs)
if resource.custom_routes_fn:
resource.custom_routes_fn(mapper, wsgi_resource)
def _setup_extensions(self, ext_mgr):
for extension in ext_mgr.get_controller_extensions():
ext_name = extension.extension.name
collection = extension.collection
controller = extension.controller
if collection not in self.resources:
LOG.warning(_('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource') %
locals())
continue
LOG.debug(_('Extension %(ext_name)s extending resource: '
'%(collection)s') % locals())
resource = self.resources[collection]
resource.register_actions(controller)
resource.register_extensions(controller)
def _setup_routes(self, mapper, ext_mgr):
raise NotImplementedError
| apache-2.0 | 8,907,776,850,187,555,000 | 36.235955 | 78 | 0.599578 | false |
hyperized/ansible | lib/ansible/module_utils/facts/network/openbsd.py | 232 | 1600 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.network.base import NetworkCollector
from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
class OpenBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the OpenBSD Network Class.
It uses the GenericBsdIfconfigNetwork.
"""
platform = 'OpenBSD'
# OpenBSD 'ifconfig -a' does not have information about aliases
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'):
return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options)
# Return macaddress instead of lladdr
def parse_lladdr_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
current_if['type'] = 'ether'
class OpenBSDNetworkCollector(NetworkCollector):
_fact_class = OpenBSDNetwork
_platform = 'OpenBSD'
| gpl-3.0 | -4,103,136,341,010,956,300 | 37.095238 | 95 | 0.7425 | false |
yujikato/DIRAC | src/DIRAC/DataManagementSystem/DB/FileCatalogComponents/DirectoryManager/DirectoryTreeBase.py | 2 | 41293 | """ DIRAC DirectoryTree base class """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import six
import time
import threading
import os
import stat
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.Utilities import getIDSelectString
DEBUG = 0
#############################################################################
class DirectoryTreeBase(object):
def __init__(self, database=None):
self.db = database
self.lock = threading.Lock()
self.treeTable = ''
############################################################################
#
# THE FOLLOWING METHODS NEED TO ME IMPLEMENTED IN THE DERIVED CLASS
#
############################################################################
def findDir(self, path, connection=False):
""" Find directory ID for the given path
"""
return S_ERROR("To be implemented on derived class")
def findDirs(self, paths, connection=False):
""" Find DirIDs for the given path list
"""
return S_ERROR("To be implemented on derived class")
def makeDir(self, path):
return S_ERROR("To be implemented on derived class")
def removeDir(self, path):
return S_ERROR("To be implemented on derived class")
def getChildren(self, path, connection=False):
return S_ERROR("To be implemented on derived class")
def getDirectoryPath(self, dirID):
""" Get directory name by directory ID
"""
return S_ERROR("To be implemented on derived class")
def countSubdirectories(self, dirId, includeParent=True):
return S_ERROR("To be implemented on derived class")
def getSubdirectoriesByID(self, dirID, requestString=False, includeParent=False):
""" Get all the subdirectories of the given directory at a given level
"""
return S_ERROR("To be implemented on derived class")
##########################################################################
def _getConnection(self, connection):
if connection:
return connection
res = self.db._getConnection()
if res['OK']:
return res['Value']
gLogger.warn("Failed to get MySQL connection", res['Message'])
return connection
def getTreeTable(self):
""" Get the string of the Directory Tree type
"""
return self.treeTable
def setDatabase(self, database):
self.db = database
def makeDirectory(self, path, credDict, status=0):
"""Create a new directory. The return value is the dictionary
containing all the parameters of the newly created directory
"""
if path[0] != '/':
return S_ERROR('Not an absolute path')
# Strip off the trailing slash if necessary
if len(path) > 1 and path[-1] == '/':
path = path[:-1]
if path == '/':
# Create the root directory
l_uid = 0
l_gid = 0
else:
result = self.db.ugManager.getUserAndGroupID(credDict)
if not result['OK']:
return result
(l_uid, l_gid) = result['Value']
dirDict = {}
result = self.makeDir(path)
if not result['OK']:
return result
dirID = result['Value']
if result['NewDirectory']:
req = "INSERT INTO FC_DirectoryInfo (DirID,UID,GID,CreationDate,ModificationDate,Mode,Status) Values "
req = req + "(%d,%d,%d,UTC_TIMESTAMP(),UTC_TIMESTAMP(),%d,%d)" % (dirID, l_uid, l_gid, self.db.umask, status)
result = self.db._update(req)
if result['OK']:
resGet = self.getDirectoryParameters(dirID)
if resGet['OK']:
dirDict = resGet['Value']
else:
return S_OK(dirID)
if not dirDict:
self.removeDir(path)
return S_ERROR('Failed to create directory %s' % path)
return S_OK(dirID)
#####################################################################
def makeDirectories(self, path, credDict):
"""Make all the directories recursively in the path. The return value
is the dictionary containing all the parameters of the newly created
directory
"""
if not path or path[0] != '/':
return S_ERROR('Not an absolute path')
result = self.existsDir(path)
if not result['OK']:
return result
result = result['Value']
if result['Exists']:
return S_OK(result['DirID'])
if path == '/':
result = self.makeDirectory(path, credDict)
return result
parentDir = os.path.dirname(path)
result = self.existsDir(parentDir)
if not result['OK']:
return result
result = result['Value']
if result['Exists']:
result = self.makeDirectory(path, credDict)
else:
result = self.makeDirectories(parentDir, credDict)
if not result['OK']:
return result
result = self.makeDirectory(path, credDict)
return result
#####################################################################
def exists(self, lfns):
successful = {}
failed = {}
for lfn in lfns:
res = self.findDir(lfn)
if not res['OK']:
failed[lfn] = res['Message']
if not res['Value']:
successful[lfn] = False
else:
successful[lfn] = lfn
return S_OK({'Successful': successful, 'Failed': failed})
def existsDir(self, path):
""" Check the existence of the directory path
"""
result = self.findDir(path)
if not result['OK']:
return result
if result['Value']:
result = S_OK(int(result['Value']))
result['Exists'] = True
result['DirID'] = result['Value']
else:
result = S_OK(0)
result['Exists'] = False
return result
#####################################################################
def isDirectory(self, paths):
""" Checking for existence of directories
"""
successful = {}
failed = {}
for dir in paths:
result = self.existsDir(dir)
if not result['OK']:
failed[dir] = result['Message']
elif result['Value']['Exists']:
successful[dir] = True
else:
successful[dir] = False
return S_OK({'Successful': successful, 'Failed': failed})
#####################################################################
def createDirectory(self, dirs, credDict):
""" Checking for existence of directories
"""
successful = {}
failed = {}
for dir in dirs:
result = self.makeDirectories(dir, credDict)
if not result['OK']:
failed[dir] = result['Message']
else:
successful[dir] = True
return S_OK({'Successful': successful, 'Failed': failed})
#####################################################################
def isEmpty(self, path):
""" Find out if the given directory is empty
"""
# Check if there are subdirectories
result = self.getChildren(path)
if not result['OK']:
return result
childIDs = result['Value']
if childIDs:
return S_OK(False)
# Check if there are files
result = self.__getDirID(path)
if not result['OK']:
return result
dirID = result['Value']
result = self.db.fileManager.getFilesInDirectory(dirID)
if not result['OK']:
return result
files = result['Value']
if files:
return S_OK(False)
return S_OK(True)
#####################################################################
def removeDirectory(self, dirs, force=False):
"""Remove an empty directory from the catalog """
successful = {}
failed = {}
# Check if requested directories exist in the catalog
result = self.findDirs(dirs)
if not result['OK']:
return result
dirDict = result['Value']
for d in dirs:
if d not in dirDict:
successful[d] = "Directory does not exist"
for dir in dirDict:
result = self.isEmpty(dir)
if not result['OK']:
return result
if not result['Value']:
failed[dir] = 'Failed to remove non-empty directory'
continue
result = self.removeDir(dir)
if not result['OK']:
failed[dir] = result['Message']
else:
successful[dir] = result
return S_OK({'Successful': successful, 'Failed': failed})
#####################################################################
def __getDirID(self, path):
""" Get directory ID from the given path or already evaluated ID
"""
if isinstance(path, six.string_types):
result = self.findDir(path)
if not result['OK']:
return result
dirID = result['Value']
if not dirID:
return S_ERROR('%s: not found' % str(path))
return S_OK(dirID)
else:
return S_OK(path)
#####################################################################
def getDirectoryParameters(self, path):
""" Get the given directory parameters
"""
result = self.__getDirID(path)
if not result['OK']:
return result
dirID = result['Value']
query = "SELECT DirID,UID,GID,Status,Mode,CreationDate,ModificationDate from FC_DirectoryInfo"
query = query + " WHERE DirID=%d" % dirID
resQuery = self.db._query(query)
if not resQuery['OK']:
return resQuery
if not resQuery['Value']:
return S_ERROR('Directory not found')
dirDict = {}
dirDict['DirID'] = int(resQuery['Value'][0][0])
uid = int(resQuery['Value'][0][1])
dirDict['UID'] = uid
owner = 'unknown'
result = self.db.ugManager.getUserName(uid)
if result['OK']:
owner = result['Value']
dirDict['Owner'] = owner
gid = int(resQuery['Value'][0][2])
dirDict['GID'] = int(resQuery['Value'][0][2])
group = 'unknown'
result = self.db.ugManager.getGroupName(gid)
if result['OK']:
group = result['Value']
dirDict['OwnerGroup'] = group
dirDict['Status'] = int(resQuery['Value'][0][3])
dirDict['Mode'] = int(resQuery['Value'][0][4])
dirDict['CreationDate'] = resQuery['Value'][0][5]
dirDict['ModificationDate'] = resQuery['Value'][0][6]
return S_OK(dirDict)
#####################################################################
def _setDirectoryParameter(self, path, pname, pvalue):
""" Set a numerical directory parameter
:param mixed path: Directory path or paths as a string or directory ID as int,
list/tuple of ints or a string to select directory IDs
:param str pname: parameter name
:param int pvalue: parameter value
"""
result = getIDSelectString(path)
if not result['OK'] and isinstance(path, six.string_types):
result = self.__getDirID(path)
if not result['OK']:
return result
dirID = result['Value']
result = getIDSelectString(dirID)
if not result['OK']:
return result
dirIDString = result['Value']
req = "UPDATE FC_DirectoryInfo SET %s=%d, " \
"ModificationDate=UTC_TIMESTAMP() WHERE DirID IN ( %s )" % \
(pname, pvalue, dirIDString)
result = self.db._update(req)
return result
#####################################################################
def _setDirectoryGroup(self, path, gname):
""" Set the directory group
:param mixed path: directory path as a string or int or list of ints or select statement
:param mixt group: new group as a string or int gid
"""
result = self.db.ugManager.findGroup(gname)
if not result['OK']:
return result
gid = result['Value']
return self._setDirectoryParameter(path, 'GID', gid)
#####################################################################
def _setDirectoryOwner(self, path, owner):
""" Set the directory owner
:param mixed path: directory path as a string or int or list of ints or select statement
:param mixt owner: new user as a string or int uid
"""
result = self.db.ugManager.findUser(owner)
if not result['OK']:
return result
uid = result['Value']
return self._setDirectoryParameter(path, 'UID', uid)
#####################################################################
def changeDirectoryOwner(self, paths, recursive=False):
""" Bulk setting of the directory owner
:param dictionary paths: dictionary < lfn : owner >
"""
return self._changeDirectoryParameter(paths,
self._setDirectoryOwner,
self.db.fileManager.setFileOwner,
recursive=recursive)
#####################################################################
def changeDirectoryGroup(self, paths, recursive=False):
""" Bulk setting of the directory group
:param dictionary paths: dictionary < lfn : group >
"""
return self._changeDirectoryParameter(paths,
self._setDirectoryGroup,
self.db.fileManager.setFileGroup,
recursive=recursive)
#####################################################################
def _setDirectoryMode(self, path, mode):
""" set the directory mode
:param mixed path: directory path as a string or int or list of ints or select statement
:param int mode: new mode
"""
return self._setDirectoryParameter(path, 'Mode', mode)
#####################################################################
def changeDirectoryMode(self, paths, recursive=False):
""" Bulk setting of the directory mode
:param dictionary paths: dictionary < lfn : mode >
"""
return self._changeDirectoryParameter(paths,
self._setDirectoryMode,
self.db.fileManager.setFileMode,
recursive=recursive)
#####################################################################
def _changeDirectoryParameter(self, paths,
directoryFunction,
fileFunction,
recursive=False):
""" Bulk setting of the directory parameter with recursion for all the subdirectories and files
:param dictionary paths: dictionary < lfn : value >, where value is the value of parameter to be set
:param function directoryFunction: function to change directory(ies) parameter
:param function fileFunction: function to change file(s) parameter
:param bool recursive: flag to apply the operation recursively
"""
arguments = paths
successful = {}
failed = {}
for path, attribute in arguments.items():
result = directoryFunction(path, attribute)
if not result['OK']:
failed[path] = result['Message']
continue
if recursive:
result = self.__getDirID(path)
if not result['OK']:
failed[path] = result['Message']
continue
dirID = result['Value']
result = self.getSubdirectoriesByID(dirID, requestString=True, includeParent=True)
if not result['OK']:
failed[path] = result['Message']
continue
subDirQuery = result['Value']
result = self.db.fileManager.getFileIDsInDirectory(subDirQuery, requestString=True)
if not result['OK']:
failed[path] = result['Message']
continue
fileQuery = result['Value']
result = directoryFunction(subDirQuery, attribute)
if not result['OK']:
failed[path] = result['Message']
continue
result = fileFunction(fileQuery, attribute)
if not result['OK']:
failed[path] = result['Message']
else:
successful[path] = True
else:
successful[path] = True
return S_OK({'Successful': successful, 'Failed': failed})
#####################################################################
def setDirectoryStatus(self, path, status):
""" set the directory status
"""
return self._setDirectoryParameter(path, 'Status', status)
def getPathPermissions(self, lfns, credDict):
""" Get permissions for the given user/group to manipulate the given lfns
"""
successful = {}
failed = {}
for path in lfns:
result = self.getDirectoryPermissions(path, credDict)
if not result['OK']:
failed[path] = result['Message']
else:
successful[path] = result['Value']
return S_OK({'Successful': successful, 'Failed': failed})
#####################################################################
def getDirectoryPermissions(self, path, credDict):
""" Get permissions for the given user/group to manipulate the given directory
"""
result = self.db.ugManager.getUserAndGroupID(credDict)
if not result['OK']:
return result
uid, gid = result['Value']
result = self.getDirectoryParameters(path)
if not result['OK']:
if "not found" in result['Message'] or "not exist" in result['Message']:
# If the directory does not exist, check the nearest parent for the permissions
if path == '/':
# Nothing yet exists, starting from the scratch
resultDict = {}
resultDict['Write'] = True
resultDict['Read'] = True
resultDict['Execute'] = True
return S_OK(resultDict)
else:
pDir = os.path.dirname(path)
if not pDir:
return S_ERROR('Illegal Path')
if pDir == path:
# If pDir == path, then we're stuck in a loop
# There is probably a "//" in the path
return S_ERROR('Bad Path (double /?)')
result = self.getDirectoryPermissions(pDir, credDict)
return result
else:
return result
dUid = result['Value']['UID']
dGid = result['Value']['GID']
mode = result['Value']['Mode']
owner = uid == dUid
group = gid == dGid
resultDict = {}
if self.db.globalReadAccess:
resultDict['Read'] = True
else:
resultDict['Read'] = (owner and mode & stat.S_IRUSR > 0)\
or (group and mode & stat.S_IRGRP > 0)\
or mode & stat.S_IROTH > 0
resultDict['Write'] = (owner and mode & stat.S_IWUSR > 0)\
or (group and mode & stat.S_IWGRP > 0)\
or mode & stat.S_IWOTH > 0
resultDict['Execute'] = (owner and mode & stat.S_IXUSR > 0)\
or (group and mode & stat.S_IXGRP > 0)\
or mode & stat.S_IXOTH > 0
return S_OK(resultDict)
def getFileIDsInDirectoryWithLimits(self, dirID, credDict, startItem=1, maxItems=25):
""" Get file IDs for the given directory
"""
dirs = dirID
if not isinstance(dirID, list):
dirs = [dirID]
if not dirs:
dirs = [-1]
dirListString = ','.join([str(dir) for dir in dirs])
req = "SELECT COUNT( DirID ) FROM FC_Files USE INDEX (DirID) WHERE DirID IN ( %s )" % dirListString
result = self.db._query(req)
if not result['OK']:
return result
totalRecords = result['Value'][0][0]
if not totalRecords:
result = S_OK([])
result['TotalRecords'] = totalRecords
return result
req = "SELECT FileID FROM FC_Files WHERE DirID IN ( %s ) LIMIT %s, %s " % (dirListString, startItem, maxItems)
result = self.db._query(req)
if not result['OK']:
return result
result = S_OK([fileId[0] for fileId in result['Value']])
result['TotalRecords'] = totalRecords
return result
def getFileLFNsInDirectory(self, dirID, credDict):
""" Get file lfns for the given directory or directory list
"""
dirs = dirID
if not isinstance(dirID, list):
dirs = [dirID]
dirListString = ','.join([str(dir) for dir in dirs])
treeTable = self.getTreeTable()
req = "SELECT CONCAT(D.DirName,'/',F.FileName) FROM FC_Files as F, %s as D WHERE D.DirID IN ( %s ) and D.DirID=F.DirID"
req = req % (treeTable, dirListString)
result = self.db._query(req)
if not result['OK']:
return result
lfnList = [x[0] for x in result['Value']]
return S_OK(lfnList)
def getFileLFNsInDirectoryByDirectory(self, dirIDList, credDict):
""" Get file LFNs and IDs for the given directory or directory list
:param list dirIDList: List of directory IDs
:param dict credDict: dictionary of user credentials
:return: S_OK/S_ERROR with Value dictionary {"DirLFNDict": dirLfnDict, "IDLFNDict": idLfnDict}
where dirLfnDict has the structure <directory_name>:<list of contained file names>,
idLfnDict has structure <fileID>:<LFN>
"""
dirs = dirIDList
if not isinstance(dirIDList, list):
dirs = [dirIDList]
dirListString = ','.join([str(dir_) for dir_ in dirs])
treeTable = self.getTreeTable()
req = "SELECT D.DirName,F.FileName,F.FileID FROM FC_Files as F, %s as D WHERE D.DirID IN ( %s ) and D.DirID=F.DirID"
req = req % (treeTable, dirListString)
result = self.db._query(req)
if not result['OK']:
return result
dirLfnDict = {}
idLfnDict = {}
for dir_, fname, fileID in result['Value']:
dirLfnDict.setdefault(dir_, []).append(fname)
idLfnDict[fileID] = dir_ + '/' + fname
return S_OK({"DirLFNDict": dirLfnDict,
"IDLFNDict": idLfnDict})
def _getDirectoryContents(self, path, details=False):
""" Get contents of a given directory
"""
result = self.findDir(path)
if not result['OK']:
return result
directoryID = result['Value']
directories = {}
files = {}
links = {}
result = self.getChildren(path)
if not result['OK']:
return result
# Get subdirectories
dirIDList = result['Value']
for dirID in dirIDList:
result = self.getDirectoryPath(dirID)
if not result['OK']:
return result
dirName = result['Value']
if details:
result = self.getDirectoryParameters(dirID)
if not result['OK']:
directories[dirName] = False
else:
directories[dirName] = result['Value']
else:
directories[dirName] = True
result = self.db.fileManager.getFilesInDirectory(directoryID, verbose=details)
if not result['OK']:
return result
files = result['Value']
result = self.db.datasetManager.getDatasetsInDirectory(directoryID, verbose=details)
if not result['OK']:
return result
datasets = result['Value']
pathDict = {'Files': files, 'SubDirs': directories, 'Links': links, 'Datasets': datasets}
return S_OK(pathDict)
def listDirectory(self, lfns, verbose=False):
""" Get the directory listing
"""
successful = {}
failed = {}
for path in lfns:
result = self._getDirectoryContents(path, details=verbose)
if not result['OK']:
failed[path] = result['Message']
else:
successful[path] = result['Value']
return S_OK({'Successful': successful, 'Failed': failed})
def getDirectoryReplicas(self, lfns, allStatus=False):
""" Get replicas for files in the given directories
"""
successful = {}
failed = {}
for path in lfns:
result = self.findDir(path)
if not result['OK']:
failed[path] = result['Message']
continue
directoryID = result['Value']
result = self.db.fileManager.getDirectoryReplicas(directoryID, path, allStatus)
if not result['OK']:
failed[path] = result['Message']
continue
fileDict = result['Value']
successful[path] = {}
for fileName in fileDict:
successful[path][fileName] = fileDict[fileName]
return S_OK({'Successful': successful, 'Failed': failed})
def getDirectorySize(self, lfns, longOutput=False, rawFileTables=False, recursiveSum=True):
"""
Get the total size of the requested directories. If longOutput flag
is True, get also physical size per Storage Element
:param bool longOutput: if True, also fetches the physical size per SE
:param bool rawFileTables: if True, uses the File table instead of the pre-computed values
:param bool recursiveSum: if True (default), takes into account subdirectories
"""
start = time.time()
result = self.db._getConnection()
if not result['OK']:
return result
connection = result['Value']
if rawFileTables:
resultLogical = self._getDirectoryLogicalSize(lfns, recursiveSum=recursiveSum, connection=connection)
else:
resultLogical = self._getDirectoryLogicalSizeFromUsage(lfns, recursiveSum=recursiveSum, connection=connection)
if not resultLogical['OK']:
connection.close()
return resultLogical
resultDict = resultLogical['Value']
if not resultDict['Successful']:
connection.close()
return resultLogical
if longOutput:
# Continue with only successful directories
if rawFileTables:
resultPhysical = self._getDirectoryPhysicalSize(
resultDict['Successful'], recursiveSum=recursiveSum, connection=connection)
else:
resultPhysical = self._getDirectoryPhysicalSizeFromUsage(
resultDict['Successful'], recursiveSum=recursiveSum, connection=connection)
if not resultPhysical['OK']:
resultDict['QueryTime'] = time.time() - start
result = S_OK(resultDict)
result['Message'] = "Failed to get the physical size on storage"
connection.close()
return result
for lfn in resultPhysical['Value']['Successful']:
resultDict['Successful'][lfn]['PhysicalSize'] = resultPhysical['Value']['Successful'][lfn]
connection.close()
resultDict['QueryTime'] = time.time() - start
return S_OK(resultDict)
def _getDirectoryLogicalSizeFromUsage(self, lfns, recursiveSum=True, connection=None):
""" Get the total "logical" size of the requested directories
:param recursiveSum: If false, don't take subdir into account
"""
if not recursiveSum:
return S_ERROR("Not implemented")
successful = {}
failed = {}
for path in lfns:
result = self.findDir(path)
if not result['OK']:
failed[path] = "Directory not found"
continue
if not result['Value']:
failed[path] = "Directory not found"
continue
dirID = result['Value']
req = "SELECT SESize, SEFiles FROM FC_DirectoryUsage WHERE SEID=0 AND DirID=%d" % dirID
result = self.db._query(req, connection)
if not result['OK']:
failed[path] = result['Message']
elif not result['Value']:
successful[path] = {"LogicalSize": 0, "LogicalFiles": 0, 'LogicalDirectories': 0}
elif result['Value'][0][0]:
successful[path] = {"LogicalSize": int(result['Value'][0][0]),
"LogicalFiles": int(result['Value'][0][1])}
result = self.countSubdirectories(dirID, includeParent=False)
if result['OK']:
successful[path]['LogicalDirectories'] = result['Value']
else:
successful[path]['LogicalDirectories'] = -1
else:
successful[path] = {"LogicalSize": 0, "LogicalFiles": 0, 'LogicalDirectories': 0}
return S_OK({'Successful': successful, 'Failed': failed})
def _getDirectoryLogicalSize(self, lfns, recursiveSum=True, connection=None):
""" Get the total "logical" size of the requested directories
:param bool recursiveSum: If false, don't take subdir into account
"""
if not recursiveSum:
return S_ERROR("Not implemented")
successful = {}
failed = {}
treeTable = self.getTreeTable()
for path in lfns:
if path == "/":
req = "SELECT SUM(Size),COUNT(*) FROM FC_Files"
reqDir = "SELECT count(*) FROM %s" % treeTable
else:
result = self.findDir(path)
if not result['OK']:
failed[path] = "Directory not found"
continue
if not result['Value']:
failed[path] = "Directory not found"
continue
dirID = result['Value']
result = self.getSubdirectoriesByID(dirID, requestString=True, includeParent=True)
if not result['OK']:
failed[path] = result['Message']
continue
else:
dirString = result['Value']
req = "SELECT SUM(F.Size),COUNT(*) FROM FC_Files as F JOIN (%s) as T WHERE F.DirID=T.DirID" % dirString
reqDir = dirString.replace('SELECT DirID FROM', 'SELECT count(*) FROM')
result = self.db._query(req, connection)
if not result['OK']:
failed[path] = result['Message']
elif not result['Value']:
successful[path] = {"LogicalSize": 0, "LogicalFiles": 0, 'LogicalDirectories': 0}
elif result['Value'][0][0]:
successful[path] = {"LogicalSize": int(result['Value'][0][0]),
"LogicalFiles": int(result['Value'][0][1])}
result = self.db._query(reqDir, connection)
if result['OK'] and result['Value']:
successful[path]['LogicalDirectories'] = result['Value'][0][0] - 1
else:
successful[path]['LogicalDirectories'] = -1
else:
successful[path] = {"LogicalSize": 0, "LogicalFiles": 0, 'LogicalDirectories': 0}
return S_OK({'Successful': successful, 'Failed': failed})
def _getDirectoryPhysicalSizeFromUsage(self, lfns, recursiveSum=True, connection=None):
""" Get the total size of the requested directories
:param recursiveSum: If false, don't take subdir into account
"""
if not recursiveSum:
return S_ERROR("Not implemented")
successful = {}
failed = {}
for path in lfns:
result = self.findDir(path)
if not result['OK']:
failed[path] = "Directory not found"
continue
if not result['Value']:
failed[path] = "Directory not found"
continue
dirID = result['Value']
req = "SELECT S.SEID, S.SEName, D.SESize, D.SEFiles FROM FC_DirectoryUsage as D, FC_StorageElements as S"
req += " WHERE S.SEID=D.SEID AND D.DirID=%d" % dirID
result = self.db._query(req, connection)
if not result['OK']:
failed[path] = result['Message']
elif not result['Value']:
successful[path] = {}
elif result['Value'][0][0]:
seDict = {}
totalSize = 0
totalFiles = 0
for seID, seName, seSize, seFiles in result['Value']:
if seSize or seFiles:
seDict[seName] = {'Size': seSize, 'Files': seFiles}
totalSize += seSize
totalFiles += seFiles
else:
req = 'DELETE FROM FC_DirectoryUsage WHERE SEID=%d AND DirID=%d' % (seID, dirID)
result = self.db._update(req)
if not result['OK']:
gLogger.error('Failed to delete entry from FC_DirectoryUsage', result['Message'])
seDict['TotalSize'] = int(totalSize)
seDict['TotalFiles'] = int(totalFiles)
successful[path] = seDict
else:
successful[path] = {}
return S_OK({'Successful': successful, 'Failed': failed})
def _getDirectoryPhysicalSizeFromUsage_old(self, lfns, connection):
""" Get the total size of the requested directories
"""
successful = {}
failed = {}
for path in lfns:
if path == '/':
req = "SELECT S.SEName, D.SESize, D.SEFiles FROM FC_DirectoryUsage as D, FC_StorageElements as S"
req += " WHERE S.SEID=D.SEID"
else:
result = self.findDir(path)
if not result['OK']:
failed[path] = "Directory not found"
continue
if not result['Value']:
failed[path] = "Directory not found"
continue
dirID = result['Value']
result = self.getSubdirectoriesByID(dirID, requestString=True, includeParent=True)
if not result['OK']:
return result
subDirString = result['Value']
req = "SELECT S.SEName, D.SESize, D.SEFiles FROM FC_DirectoryUsage as D, FC_StorageElements as S"
req += " JOIN (%s) AS F" % subDirString
req += " WHERE S.SEID=D.SEID AND D.DirID=F.DirID"
result = self.db._query(req, connection)
if not result['OK']:
failed[path] = result['Message']
elif not result['Value']:
successful[path] = {}
elif result['Value'][0][0]:
seDict = {}
totalSize = 0
totalFiles = 0
for seName, seSize, seFiles in result['Value']:
sfDict = seDict.get(seName, {'Size': 0, 'Files': 0})
sfDict['Size'] += seSize
sfDict['Files'] += seFiles
seDict[seName] = sfDict
totalSize += seSize
totalFiles += seFiles
seDict['TotalSize'] = int(totalSize)
seDict['TotalFiles'] = int(totalFiles)
successful[path] = seDict
else:
successful[path] = {}
return S_OK({'Successful': successful, 'Failed': failed})
def _getDirectoryPhysicalSize(self, lfns, recursiveSum=True, connection=None):
""" Get the total size of the requested directories
:param recursiveSum: If false, don't take subdir into account
"""
if not recursiveSum:
return S_ERROR("Not implemented")
successful = {}
failed = {}
for path in lfns:
if path == '/':
req = "SELECT SUM(F.Size),COUNT(F.Size),S.SEName from FC_Files as F, FC_Replicas as R, FC_StorageElements as S "
req += "WHERE R.SEID=S.SEID AND F.FileID=R.FileID "
req += "GROUP BY S.SEID"
else:
result = self.findDir(path)
if not result['OK']:
failed[path] = "Directory not found"
continue
if not result['Value']:
failed[path] = "Directory not found"
continue
dirID = result['Value']
result = self.getSubdirectoriesByID(dirID, requestString=True, includeParent=True)
if not result['OK']:
failed[path] = result['Message']
continue
else:
dirString = result['Value']
req = "SELECT SUM(F.Size),COUNT(F.Size),S.SEName from FC_Files as F, FC_Replicas as R, FC_StorageElements as S JOIN (%s) as T " % dirString
req += "WHERE R.SEID=S.SEID AND F.FileID=R.FileID AND F.DirID=T.DirID "
req += "GROUP BY S.SEID"
result = self.db._query(req, connection)
if not result['OK']:
failed[path] = result['Message']
elif not result['Value']:
successful[path] = {}
elif result['Value'][0][0]:
seDict = {}
totalSize = 0
totalFiles = 0
for size, files, seName in result['Value']:
seDict[seName] = {"Size": int(size), "Files": int(files)}
totalSize += size
totalFiles += files
seDict['TotalSize'] = int(totalSize)
seDict['TotalFiles'] = int(totalFiles)
successful[path] = seDict
else:
successful[path] = {}
return S_OK({'Successful': successful, 'Failed': failed})
def _rebuildDirectoryUsage(self):
""" Recreate and replenish the Storage Usage tables
"""
req = "DROP TABLE IF EXISTS FC_DirectoryUsage_backup"
result = self.db._update(req)
req = "RENAME TABLE FC_DirectoryUsage TO FC_DirectoryUsage_backup"
result = self.db._update(req)
req = "CREATE TABLE `FC_DirectoryUsage` LIKE `FC_DirectoryUsage_backup`"
result = self.db._update(req)
if not result['OK']:
return result
result = self.__rebuildDirectoryUsageLeaves()
if not result['OK']:
return result
result = self.db.dtree.findDir('/')
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directory / not found')
dirID = result['Value']
result = self.__rebuildDirectoryUsage(dirID)
gLogger.verbose('Finished rebuilding Directory Usage')
return result
def __rebuildDirectoryUsageLeaves(self):
""" Rebuild DirectoryUsage entries for directories having files
"""
req = 'SELECT DISTINCT(DirID) FROM FC_Files'
result = self.db._query(req)
if not result['OK']:
return result
dirIDs = [x[0] for x in result['Value']]
gLogger.verbose('Starting rebuilding Directory Usage, number of visible directories %d' % len(dirIDs))
insertFields = ['DirID', 'SEID', 'SESize', 'SEFiles', 'LastUpdate']
insertCount = 0
insertValues = []
count = 0
empty = 0
for dirID in dirIDs:
count += 1
# Get the physical size
req = "SELECT SUM(F.Size),COUNT(F.Size),R.SEID from FC_Files as F, FC_Replicas as R "
req += "WHERE F.FileID=R.FileID AND F.DirID=%d GROUP BY R.SEID" % int(dirID)
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
empty += 1
for seSize, seFiles, seID in result['Value']:
insertValues = [dirID, seID, seSize, seFiles, 'UTC_TIMESTAMP()']
result = self.db.insertFields('FC_DirectoryUsage', insertFields, insertValues)
if not result['OK']:
if "Duplicate" in result['Message']:
req = "UPDATE FC_DirectoryUsage SET SESize=%d, SEFiles=%d, LastUpdate=UTC_TIMESTAMP()" % (seSize, seFiles)
req += " WHERE DirID=%s AND SEID=%s" % (dirID, seID)
result = self.db._update(req)
if not result['OK']:
return result
return result
# Get the logical size
req = "SELECT SUM(Size),COUNT(Size) from FC_Files WHERE DirID=%d " % int(dirID)
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Empty directory')
seSize, seFiles = result['Value'][0]
insertValues = [dirID, 0, seSize, seFiles, 'UTC_TIMESTAMP()']
result = self.db.insertFields('FC_DirectoryUsage', insertFields, insertValues)
if not result['OK']:
if "Duplicate" in result['Message']:
req = "UPDATE FC_DirectoryUsage SET SESize=%d, SEFiles=%d, LastUpdate=UTC_TIMESTAMP()" % (seSize, seFiles)
req += " WHERE DirID=%s AND SEID=0" % dirID
result = self.db._update(req)
if not result['OK']:
return result
else:
return result
gLogger.verbose("Processed %d directories, %d empty " % (count, empty))
return S_OK()
def __rebuildDirectoryUsage(self, directoryID):
""" Rebuild DirectoryUsage entries recursively for the given path
"""
result = self.getChildren(directoryID)
if not result['OK']:
return result
dirIDs = result['Value']
resultDict = {}
for dirID in dirIDs:
result = self.__rebuildDirectoryUsage(dirID)
if not result['OK']:
return result
dirDict = result['Value']
for seID in dirDict:
resultDict.setdefault(seID, {'Size': 0, 'Files': 0})
resultDict[seID]['Size'] += dirDict[seID]['Size']
resultDict[seID]['Files'] += dirDict[seID]['Files']
insertFields = ['DirID', 'SEID', 'SESize', 'SEFiles', 'LastUpdate']
insertValues = []
for seID in resultDict:
size = resultDict[seID]['Size']
files = resultDict[seID]['Files']
req = "UPDATE FC_DirectoryUsage SET SESize=SESize+%d, SEFiles=SEFiles+%d WHERE DirID=%d AND SEID=%d"
req = req % (size, files, directoryID, seID)
result = self.db._update(req)
if not result['OK']:
return result
if not result['Value']:
insertValues = [directoryID, seID, size, files, 'UTC_TIMESTAMP()']
result = self.db.insertFields('FC_DirectoryUsage', insertFields, insertValues)
if not result['OK']:
return result
req = "SELECT SEID,SESize,SEFiles from FC_DirectoryUsage WHERE DirID=%d" % directoryID
result = self.db._query(req)
if not result['OK']:
return result
resultDict = {}
for seid, size, files in result['Value']:
resultDict[seid] = {'Size': size, 'Files': files}
return S_OK(resultDict)
def getDirectoryCounters(self, connection=False):
""" Get the total number of directories
"""
conn = self._getConnection(connection)
resultDict = {}
req = "SELECT COUNT(*) from FC_DirectoryInfo"
res = self.db._query(req, connection)
if not res['OK']:
return res
resultDict['Directories'] = res['Value'][0][0]
treeTable = self.getTreeTable()
req = "SELECT COUNT(DirID) FROM %s WHERE Parent NOT IN ( SELECT DirID from %s )" % (treeTable, treeTable)
req += " AND DirID <> 1"
res = self.db._query(req, connection)
if not res['OK']:
return res
resultDict['Orphan Directories'] = res['Value'][0][0]
req = "SELECT COUNT(DirID) FROM %s WHERE DirID NOT IN ( SELECT Parent from %s )" % (treeTable, treeTable)
req += " AND DirID NOT IN ( SELECT DirID from FC_Files ) "
res = self.db._query(req, connection)
if not res['OK']:
return res
resultDict['Empty Directories'] = res['Value'][0][0]
req = "SELECT COUNT(DirID) FROM %s WHERE DirID NOT IN ( SELECT DirID FROM FC_DirectoryInfo )" % treeTable
res = self.db._query(req, connection)
if not res['OK']:
return res
resultDict['DirTree w/o DirInfo'] = res['Value'][0][0]
req = "SELECT COUNT(DirID) FROM FC_DirectoryInfo WHERE DirID NOT IN ( SELECT DirID FROM %s )" % treeTable
res = self.db._query(req, connection)
if not res['OK']:
return res
resultDict['DirInfo w/o DirTree'] = res['Value'][0][0]
return S_OK(resultDict)
| gpl-3.0 | -1,730,127,434,162,081,000 | 33.211268 | 149 | 0.588187 | false |
Dev-Cloud-Platform/Dev-Cloud | dev_cloud/cc1/src/wi/tests/main_test.py | 2 | 5683 | # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
# -*- coding: utf-8 -*-
"""@package src.wi.tests.main_test
@author Piotr Wójcik
@author Krzysztof Danielowski
@date 11.10.2012
"""
from wi.tests import WiTestCase
import unittest
class MainTests(WiTestCase, unittest.TestCase):
def _test_news_create(self):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
driver.get(self.base_url + "/news/")
self.wait_for_text("//a[@id='main_create_news']", ["Create a news entry"])
driver.find_element_by_id("main_create_news").click()
self.wait_for_text("//div[@id='dialog-div']/form/div/fieldset/div/span", ["Topic"])
driver.find_element_by_id("id_topic").clear()
driver.find_element_by_id("id_topic").send_keys("witest")
driver.find_element_by_id("id_content").clear()
driver.find_element_by_id("id_content").send_keys("test")
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["News entry added."])
driver.find_element_by_link_text("Logout").click()
def _test_news_create_fail_required(self):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
driver.get(self.base_url + "/news/")
self.wait_for_text("//a[@id='main_create_news']", ["Create a news entry"])
driver.find_element_by_id("main_create_news").click()
self.wait_for_text("//div[@id='dialog-div']/form/div/fieldset/div/span", ["Topic"])
driver.find_element_by_id("id_topic").clear()
driver.find_element_by_id("id_content").clear()
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_text("//div[@id='dialog-div']/form/div/fieldset/div[1]/ul/li", ["This field is required."])
self.wait_for_text("//div[@id='dialog-div']/form/div/fieldset/div[2]/ul/li", ["This field is required."])
driver.find_element_by_link_text("Logout").click()
def _test_news_create_sticky(self):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
driver.get(self.base_url + "/news/")
self.wait_for_text("//a[@id='main_create_news']", ["Create a news entry"])
driver.find_element_by_id("main_create_news").click()
self.wait_for_text("//div[@id='dialog-div']/form/div/fieldset/div/span", ["Topic"])
driver.find_element_by_id("id_topic").clear()
driver.find_element_by_id("id_topic").send_keys("witest")
driver.find_element_by_id("id_content").clear()
driver.find_element_by_id("id_content").send_keys("test")
driver.find_element_by_id("id_sticky").click()
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["News entry added."])
driver.find_element_by_link_text("Logout").click()
def _test_news_edit(self, topic):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
driver.get(self.base_url + "/news/")
self.wait_for_text("//div[@id='item-list']/div/div[2]", ["witest"])
driver.find_element_by_id("main_edit_news").click()
self.wait_for_text("//div[@id='dialog-div']/form/div/fieldset/div/span", ["Topic"])
driver.find_element_by_id("id_topic").clear()
driver.find_element_by_id("id_topic").send_keys(topic)
driver.find_element_by_id("id_content").clear()
driver.find_element_by_id("id_content").send_keys("test2")
driver.find_element_by_id("id_sticky").click()
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["News entry edited."])
driver.find_element_by_link_text("Logout").click()
def _test_news_remove(self, topic):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
driver.get(self.base_url + "/news/")
self.wait_for_text("//div[@id='item-list']/div/div[2]", [topic])
driver.find_element_by_id("main_remove_news").click()
self.wait_for_text("//div[@id='dialog-div']/p", ["Do you want to delete news entry"])
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["You have successfully removed news entry"])
driver.find_element_by_link_text("Logout").click()
def test_1_simple(self):
self._test_news_create()
topic = 'witest'
self._test_news_edit(topic)
self._test_news_remove(topic)
def test_2_fails(self):
self._test_news_create_fail_required()
def test_3_utf8_edit(self):
self._test_news_create()
topic = u'ąśłęąĄŁŁ'
self._test_news_edit(topic)
self._test_news_remove(topic)
| apache-2.0 | -475,982,100,918,563,500 | 34.685535 | 113 | 0.633944 | false |
yanlend/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause | 8,982,145,473,431,464,000 | 27.761194 | 72 | 0.655423 | false |
jetty-project/FrameworkBenchmarks | toolset/benchmark/test_types/plaintext_type.py | 8 | 2358 | from toolset.benchmark.test_types.framework_test_type import FrameworkTestType
from toolset.benchmark.test_types.verifications import basic_body_verification, verify_headers
from time import sleep
class PlaintextTestType(FrameworkTestType):
def __init__(self, config):
self.plaintext_url = ""
kwargs = {
'name': 'plaintext',
'requires_db': False,
'accept_header': self.accept('plaintext'),
'args': ['plaintext_url']
}
FrameworkTestType.__init__(self, config, **kwargs)
def verify(self, base_url):
url = base_url + self.plaintext_url
headers, body = self.request_headers_and_body(url)
_, problems = basic_body_verification(body, url, is_json_check=False)
if len(problems) > 0:
return problems
# Case insensitive
body = body.lower()
expected = "hello, world!"
extra_bytes = len(body) - len(expected)
if expected not in body:
return [('fail', "Could not find 'Hello, World!' in response.",
url)]
if extra_bytes > 0:
problems.append(
('warn',
("Server is returning %s more bytes than are required. "
"This may negatively affect benchmark performance." %
extra_bytes), url))
problems += verify_headers(self.request_headers_and_body, headers, url, should_be='plaintext')
if len(problems) == 0:
return [('pass', '', url)]
else:
return problems
def get_url(self):
return self.plaintext_url
def get_script_name(self):
return 'pipeline.sh'
def get_script_variables(self, name, url):
return {
'max_concurrency':
max(self.config.concurrency_levels),
'name':
name,
'duration':
self.config.duration,
'levels':
" ".join("{}".format(item)
for item in self.config.pipeline_concurrency_levels),
'server_host':
self.config.server_host,
'url':
url,
'pipeline':
16,
'accept':
"text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
}
| bsd-3-clause | -7,511,875,494,952,499,000 | 30.864865 | 102 | 0.540712 | false |
girving/tensorflow | tensorflow/python/ops/cond_v2.py | 1 | 19004 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""cond_v2 and gradient.
This is a version of cond that emits a single If op, as well as the gradient
function for If ops produced by cond_v2. This will eventually replace the
current tf.cond implementation once it reaches feature and performance parity.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import function
from tensorflow.python.framework import function_def_to_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import gradients_impl
# NOTE(skyewm): TensorFlow uses protected class methods and fields to signify
# that they aren't part of the official public API. These protected members
# often need to be used by implementation code however. Rather than litter the
# code with pylint comments, we ignore protected access violations for
# readability.
# pylint: disable=protected-access
def cond_v2(pred, true_fn, false_fn, name="cond"):
"""Like tf.cond, except emits a single If op."""
if isinstance(pred, bool):
raise TypeError("pred must not be a Python bool", pred)
if not name:
name = "cond"
with ops.name_scope(name) as scope:
with ops.name_scope(None):
# Find the outer most graph for uniquing function names.
# TODO(jpienaar): Make this work in eager mode.
graph = ops.get_default_graph()
while isinstance(graph, function.FuncGraph):
graph = graph.outer_graph
true_name = graph.unique_name(("%strue" % scope).replace("/", "_"))
false_name = graph.unique_name(("%sfalse" % scope).replace("/", "_"))
true_graph = function.func_graph_from_py_func(
true_name, true_fn, [], {})
false_graph = function.func_graph_from_py_func(
false_name, false_fn, [], {})
_check_same_outputs(true_graph, false_graph)
# Add inputs to true_graph and false_graph to make them match. Note that
# this modifies true_graph and false_graph.
cond_inputs = _make_inputs_match(true_graph, false_graph,
true_graph.external_captures,
false_graph.external_captures)
# Add all intermediate tensors as function outputs so they're available for
# the gradient computation.
true_intermediates = _get_intermediates(true_graph)
false_intermediates = _get_intermediates(false_graph)
# Save the original number of outputs to return to the caller.
num_cond_outputs = len(true_graph.outputs)
# Make the number/type of new intermediate outputs match.
extra_true_outputs, extra_false_outputs = _pad_params(
true_graph, false_graph, true_intermediates, false_intermediates)
true_graph.outputs.extend(extra_true_outputs)
false_graph.outputs.extend(extra_false_outputs)
# Create the If op.
tensors = gen_functional_ops._if( # pylint: disable=protected-access
pred,
cond_inputs, [t.dtype for t in true_graph.outputs],
_create_new_tf_function(true_graph),
_create_new_tf_function(false_graph),
output_shapes=_get_output_shapes(true_graph.outputs,
false_graph.outputs),
name=scope)
# Set the flag to enable lowering on the `if` op if necessary
# Lowering allows cond_v2 to avoid some of the limitations of Functions,
# allowing users to specify devices & colocation inside of cond_v2 branches,
# and enabling non-strict evaluation & partial pruning of cond_v2 branches.
# This brings cond_v2 closer to feature parity with tf.cond.
#
# However, we do not lower `If` in the XLA context because it is easier for
# XLA to apply its own optimizations when dealing with un-lowered `If`
# operators than with lowered switch/merge control flow.
#
# TODO(b/110167197) this approach requires cond_v2 to have at least 1 output
if_op = tensors[0].op
if not control_flow_util.IsInXLAContext(if_op):
# pylint: disable=protected-access
if_op._set_attr("_lower_using_switch_merge",
attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
result = tuple(tensors[:num_cond_outputs])
if len(result) == 1:
return result[0]
else:
return result
@ops.RegisterGradient("If")
def _IfGrad(op, *grads): # pylint: disable=invalid-name
"""The gradient of an If op produced by cond_v2."""
true_graph, false_graph = _get_func_graphs(op)
# Note: op.graph != ops.get_default_graph() when we are computing the gradient
# of a nested cond.
assert true_graph.outer_graph == op.graph
assert false_graph.outer_graph == op.graph
# Create grad functions that compute the gradient of the true/false forward
# graphs. These functions will capture tensors from the forward pass
# functions.
true_grad_graph = _create_grad_func(
true_graph, grads, _get_grad_fn_name(true_graph))
false_grad_graph = _create_grad_func(
false_graph, grads, _get_grad_fn_name(false_graph))
assert ([t.dtype for t in true_grad_graph.outputs] ==
[t.dtype for t in false_grad_graph.outputs])
# Resolve references to forward graph tensors in grad graphs and ensure
# they are in-scope, i.e., belong to one of outer graphs of the grad graph.
true_grad_inputs = _resolve_grad_inputs(true_graph, true_grad_graph)
false_grad_inputs = _resolve_grad_inputs(false_graph, false_grad_graph)
# Make the inputs to true_grad_graph and false_grad_graph match. Note that
# this modifies true_grad_graph and false_grad_graph.
grad_inputs = _make_inputs_match(true_grad_graph, false_grad_graph,
true_grad_inputs, false_grad_inputs)
# Add all intermediate tensors as function outputs so they're available for
# higher-order gradient computations.
true_grad_intermediates = _get_intermediates(true_grad_graph)
false_grad_intermediates = _get_intermediates(false_grad_graph)
# Save the original number of gradient outputs to return.
num_grad_outputs = len(true_grad_graph.outputs)
# Make the number/type of new intermediate outputs match.
extra_true_grad_outputs, extra_false_grad_outputs = _pad_params(
true_grad_graph, false_grad_graph,
true_grad_intermediates, false_grad_intermediates)
true_grad_graph.outputs.extend(extra_true_grad_outputs)
false_grad_graph.outputs.extend(extra_false_grad_outputs)
# Create the gradient If op.
tensors = gen_functional_ops._if(
op.inputs[0],
grad_inputs, [t.dtype for t in true_grad_graph.outputs],
_create_new_tf_function(true_grad_graph),
_create_new_tf_function(false_grad_graph),
output_shapes=_get_output_shapes(true_grad_graph.outputs,
false_grad_graph.outputs))
# The predicate has no gradient.
return [None] + tensors[:num_grad_outputs]
def _get_func_graphs(if_op):
"""Returns `FuncGraph`s for the input op branches.
Args:
if_op: The _If Operation.
Returns:
A 2-tuple of the `FuncGraph`s of the then_branch and else_branch.
"""
def _get_func_graph_for_branch(branch_name):
"""Generates and returns a FuncGraph for the given branch."""
inputs = if_op.inputs[1:] # First input is pred.
input_shapes = [t.shape for t in inputs]
func_name = if_op.get_attr(branch_name).name
fdef = if_op.graph._get_function(func_name).definition
# `if_op.graph` may not be the same as `ops.get_default_graph()` e.g.
# in the case of nested if ops or when the gradient is being computed
# from inside a Defun. We build the `func_graph` with `if_op.graph` as its
# `outer_graph`. This resembles how the `FuncGraph` was built in the
# forward pass. We need this so that we can resolve references to tensors
# in `func_graph` from its gradient graph in `_resolve_grad_inputs`.
with if_op.graph.as_default():
func_graph = function_def_to_graph.function_def_to_graph(
fdef, input_shapes)
func_graph.captures = collections.OrderedDict(zip(inputs,
func_graph.inputs))
# Set the if op so that the gradient code can use it.
func_graph._if = if_op
return func_graph
return (_get_func_graph_for_branch("then_branch"),
_get_func_graph_for_branch("else_branch"))
def _grad_fn(func_graph, grads):
"""The gradient function for each conditional branch.
This function builds the gradient graph of the corresponding forward-pass
conditional branch in `func_graph`. This is done by differentiating
func_graph's outputs w.r.t. its inputs.
Args:
func_graph: function.FuncGraph. The corresponding forward-pass function.
grads: The list of input gradient Tensors.
Returns:
The output gradient Tensors.
"""
# Filter out untrainable function outputs.
# NOTE(skyewm): If we don't do this, the untrainable tensors can sometimes
# cause _GradientsHelper to raise an exception (e.g. the implementation
# doesn't expect 'ys' to contain boolean tensors).
assert len(func_graph.outputs) == len(grads)
ys = []
grad_ys = []
for y, grad_y in zip(func_graph.outputs, grads):
if not gradients_impl._IsTrainable(y):
continue
ys.append(y)
grad_ys.append(grad_y)
# Build the gradient graph. Note that this builds the gradient computation of
# func_graph in the current graph, which requires capturing tensors from
# func_graph. The captured func_graph tensors are resolved to external tensors
# in _resolve_grad_inputs.
result = gradients_impl._GradientsHelper(
ys, func_graph.inputs, grad_ys=grad_ys,
src_graph=func_graph)
# Functions can't return None; replace Nones with zero tensors.
# TODO(b/80444525): don't return anything here and make _IfGrad return None if
# both branches have zero gradient.
for i in range(len(result)):
if result[i] is None:
result[i] = array_ops.zeros_like(func_graph.inputs[i])
return result
def _create_grad_func(func_graph, grads, name):
"""Returns the FuncGraph representation of _grad_fn."""
return function.func_graph_from_py_func(
name, lambda: _grad_fn(func_graph, grads), [], {})
def _resolve_grad_inputs(cond_graph, grad_graph):
"""Returns the tensors to pass as inputs to `grad_graph`.
The `grad_graph` may have external references to
1. Its outer graph containing the input gradients. These references are kept
as is.
2. Tensors in the forward pass graph. These tensors may not be "live"
when the gradient is being computed. We replace such references by their
corresponding tensor in `cond_graph.outer_graph`. In the case of nested
control flow or functions, the gradient logic handling
`grad_graph.outer_graph` will make sure the tensor from
`cond_graph.outer_graph` is also correctly captured.
Args:
cond_graph: function.FuncGraph. The forward-pass function.
grad_graph: function.FuncGraph. The gradients function.
Returns:
A list of inputs tensors to be passed to grad_graph.
"""
new_inputs = []
for t in grad_graph.external_captures:
# `t` must either be in `grad_graph.outer_graph` or in the forward
# `cond_graph`.
if t.graph != grad_graph.outer_graph:
assert t.graph == cond_graph
# `internal_captures` are not treated as intermediates and hence not added
# to If op outputs. So we get the outer tensor corresponding to those
# from the list of `external_captures`.
try:
t = t.graph._if.outputs[t.graph.outputs.index(t)]
except ValueError:
index = t.graph.internal_captures.index(t)
t = t.graph.external_captures[index]
# Note: We rely on the capturing logic of the gradient If op graph to
# correctly capture the tensors in `cond_graph.outer_graph`. Both cond_v2
# and while_v2 handle this while building their gradient functions.
assert t.graph == cond_graph.outer_graph
new_inputs.append(t)
return new_inputs
def _create_new_tf_function(func_graph):
"""Converts func_graph to a TF_Function and adds it to the current graph.
Args:
func_graph: function.FuncGraph
Returns:
The name of the new TF_Function.
"""
func = function._EagerDefinedFunction(
func_graph.name, func_graph, func_graph.inputs, func_graph.outputs, {})
func.add_to_graph(func_graph.outer_graph)
return func_graph.name
def _get_intermediates(func_graph):
"""Returns all tensors in `func_graph` that aren't inputs or outputs."""
intermediates = []
for op in func_graph.get_operations():
for t in op.outputs:
if t in func_graph.inputs: continue
if t in func_graph.outputs: continue
intermediates.append(t)
return intermediates
def _separate_unique_inputs(true_inputs, false_inputs):
"""Separates tensors appearing only in true_inputs or false_inputs, or both.
Args:
true_inputs: list of Tensors
false_inputs: list of Tensors
Returns:
Three lists of Tensors:
1. The tensors that appear in both true_inputs and false_inputs
2. The tensors that only appear in true_inputs
3. The tensors that only appear in false_inputs
"""
true_inputs = set(true_inputs)
false_inputs = set(false_inputs)
shared_inputs = true_inputs.intersection(false_inputs)
true_only_inputs = true_inputs - false_inputs
false_only_inputs = false_inputs - true_inputs
return list(shared_inputs), list(true_only_inputs), list(false_only_inputs)
def _pad_params(true_graph, false_graph, true_params, false_params):
"""Returns new param lists that have matching signatures.
This is done by mirroring each param list in the other using dummy params.
There is no merging of params.
Args:
true_graph: function.FuncGraph
false_graph: function.FuncGraph
true_params: a list of Tensors from true_graph
false_params: a list of Tensors from false_graph
Returns:
A new list of Tensors in true_graph and a new list of Tensors in
false_graph. The two lists have the same number of Tensors, with matching
types and shapes across the lists.
"""
new_true_params = (true_params +
_create_dummy_params(true_graph, false_params))
new_false_inputs = (_create_dummy_params(false_graph, true_params)
+ false_params)
return new_true_params, new_false_inputs
def _make_inputs_match(true_graph, false_graph, true_inputs, false_inputs):
"""Modifies true_graph and false_graph so they have the same input signature.
This method reorders and/or adds parameters to true_graph and false_graph so
they have the same input signature, and updates the 'inputs' and 'captured'
fields of both graphs accordingly. It uses the input tensors from the outer
graph to avoid duplicating shared arguments.
Args:
true_graph: function.FuncGraph
false_graph: function.FuncGraph
true_inputs: a list of Tensors in the outer graph. The inputs for
true_graph.
false_inputs: a list of Tensors in the outer graph. The inputs for
false_graph.
Returns:
A new list of Tensors from the outer graph that are the new inputs for both
true_graph and false_graph. This is a deduped version of true_inputs +
false_inputs.
"""
shared_inputs, true_only_inputs, false_only_inputs = _separate_unique_inputs(
true_inputs, false_inputs)
new_inputs = shared_inputs + true_only_inputs + false_only_inputs
true_input_to_param = dict(zip(true_inputs, true_graph.inputs))
false_input_to_param = dict(zip(false_inputs, false_graph.inputs))
true_graph.inputs = (
[true_input_to_param[t] for t in shared_inputs] +
[true_input_to_param[t] for t in true_only_inputs] +
_create_dummy_params(true_graph, false_only_inputs))
false_graph.inputs = (
[false_input_to_param[t] for t in shared_inputs] +
_create_dummy_params(false_graph, true_only_inputs) +
[false_input_to_param[t] for t in false_only_inputs])
# Rewrite the FuncGraphs' state to reflect the new inputs.
true_graph.captures = collections.OrderedDict(zip(new_inputs,
true_graph.inputs))
false_graph.captures = collections.OrderedDict(zip(new_inputs,
false_graph.inputs))
return new_inputs
def _create_dummy_params(func_graph, template_tensors):
"""Creates tensors in func_graph to represent template_tensors.
Args:
func_graph: function.FuncGraph.
template_tensors: a list of tensors in the outer graph.
Returns:
A list of tensors in func_graph.
"""
with func_graph.as_default():
return [gen_functional_ops.fake_param(dtype=t.dtype, shape=t.shape)
for t in template_tensors]
def _get_grad_fn_name(func_graph):
"""Returns a unique name to use for the grad function of `func_graph`.
Ensures this name is unique in the entire hierarchy.
Args:
func_graph: The FuncGraph.
Returns:
A string, the name to use for the gradient function.
"""
name = "%s_grad" % func_graph.name
outer_most_graph = func_graph
while isinstance(outer_most_graph, function.FuncGraph):
outer_most_graph = outer_most_graph.outer_graph
return outer_most_graph.unique_name(name)
def _check_same_outputs(true_graph, false_graph):
"""Raises an error if true_graph and false_graph have different outputs."""
true_output_types = [t.dtype for t in true_graph.outputs]
false_output_types = [t.dtype for t in false_graph.outputs]
if (len(true_graph.outputs) != len(false_graph.outputs) or
true_output_types != false_output_types):
raise ValueError(
"true_fn() and false_fn() must return the same number and type of "
"arguments, got:\n"
" true_fn: %s\n"
" false_fn: %s" % (true_output_types, false_output_types))
def _get_output_shapes(true_graph_outputs, false_graph_outputs):
output_shapes = [
t_out.shape.most_specific_compatible_shape(f_out.shape)
for t_out, f_out in zip(true_graph_outputs, false_graph_outputs)
]
return output_shapes
| apache-2.0 | -1,024,098,045,345,669,900 | 37.862986 | 80 | 0.691696 | false |
onelogin/python-saml | src/onelogin/saml2/metadata.py | 1 | 12340 | # -*- coding: utf-8 -*-
""" OneLogin_Saml2_Metadata class
Copyright (c) 2010-2021 OneLogin, Inc.
MIT License
Metadata class of OneLogin's Python Toolkit.
"""
from time import gmtime, strftime, time
from datetime import datetime
from defusedxml.minidom import parseString
from onelogin.saml2.constants import OneLogin_Saml2_Constants
from onelogin.saml2.utils import OneLogin_Saml2_Utils
class OneLogin_Saml2_Metadata(object):
"""
A class that contains methods related to the metadata of the SP
"""
TIME_VALID = 172800 # 2 days
TIME_CACHED = 604800 # 1 week
@staticmethod
def builder(sp, authnsign=False, wsign=False, valid_until=None, cache_duration=None, contacts=None, organization=None):
"""
Builds the metadata of the SP
:param sp: The SP data
:type sp: string
:param authnsign: authnRequestsSigned attribute
:type authnsign: string
:param wsign: wantAssertionsSigned attribute
:type wsign: string
:param valid_until: Metadata's expiry date
:type valid_until: string|DateTime|Timestamp
:param cache_duration: Duration of the cache in seconds
:type cache_duration: int|string
:param contacts: Contacts info
:type contacts: dict
:param organization: Organization info
:type organization: dict
"""
if valid_until is None:
valid_until = int(time()) + OneLogin_Saml2_Metadata.TIME_VALID
if not isinstance(valid_until, basestring):
if isinstance(valid_until, datetime):
valid_until_time = valid_until.timetuple()
else:
valid_until_time = gmtime(valid_until)
valid_until_str = strftime(r'%Y-%m-%dT%H:%M:%SZ', valid_until_time)
else:
valid_until_str = valid_until
if cache_duration is None:
cache_duration = OneLogin_Saml2_Metadata.TIME_CACHED
if not isinstance(cache_duration, basestring):
cache_duration_str = 'PT%sS' % cache_duration # 'P'eriod of 'T'ime x 'S'econds
else:
cache_duration_str = cache_duration
if contacts is None:
contacts = {}
if organization is None:
organization = {}
str_attribute_consuming_service = ''
if 'attributeConsumingService' in sp and len(sp['attributeConsumingService']):
attr_cs_desc_str = ''
if "serviceDescription" in sp['attributeConsumingService']:
attr_cs_desc_str = """ <md:ServiceDescription xml:lang="en">%s</md:ServiceDescription>
""" % sp['attributeConsumingService']['serviceDescription']
requested_attribute_data = []
for req_attribs in sp['attributeConsumingService']['requestedAttributes']:
req_attr_nameformat_str = req_attr_friendlyname_str = req_attr_isrequired_str = ''
req_attr_aux_str = ' />'
if 'nameFormat' in req_attribs.keys() and req_attribs['nameFormat']:
req_attr_nameformat_str = " NameFormat=\"%s\"" % req_attribs['nameFormat']
if 'friendlyName' in req_attribs.keys() and req_attribs['friendlyName']:
req_attr_friendlyname_str = " FriendlyName=\"%s\"" % req_attribs['friendlyName']
if 'isRequired' in req_attribs.keys() and req_attribs['isRequired']:
req_attr_isrequired_str = " isRequired=\"%s\"" % 'true' if req_attribs['isRequired'] else 'false'
if 'attributeValue' in req_attribs.keys() and req_attribs['attributeValue']:
if isinstance(req_attribs['attributeValue'], basestring):
req_attribs['attributeValue'] = [req_attribs['attributeValue']]
req_attr_aux_str = ">"
for attrValue in req_attribs['attributeValue']:
req_attr_aux_str += """
<saml:AttributeValue xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion">%(attributeValue)s</saml:AttributeValue>""" % \
{
'attributeValue': attrValue
}
req_attr_aux_str += """
</md:RequestedAttribute>"""
requested_attribute = """ <md:RequestedAttribute Name="%(req_attr_name)s"%(req_attr_nameformat_str)s%(req_attr_friendlyname_str)s%(req_attr_isrequired_str)s%(req_attr_aux_str)s""" % \
{
'req_attr_name': req_attribs['name'],
'req_attr_nameformat_str': req_attr_nameformat_str,
'req_attr_friendlyname_str': req_attr_friendlyname_str,
'req_attr_isrequired_str': req_attr_isrequired_str,
'req_attr_aux_str': req_attr_aux_str
}
requested_attribute_data.append(requested_attribute)
str_attribute_consuming_service = """ <md:AttributeConsumingService index="1">
<md:ServiceName xml:lang="en">%(service_name)s</md:ServiceName>
%(attr_cs_desc)s%(requested_attribute_str)s
</md:AttributeConsumingService>
""" % \
{
'service_name': sp['attributeConsumingService']['serviceName'],
'attr_cs_desc': attr_cs_desc_str,
'requested_attribute_str': '\n'.join(requested_attribute_data)
}
sls = ''
if 'singleLogoutService' in sp and 'url' in sp['singleLogoutService']:
sls = """ <md:SingleLogoutService Binding="%(binding)s"
Location="%(location)s" />\n""" % \
{
'binding': sp['singleLogoutService']['binding'],
'location': sp['singleLogoutService']['url'],
}
str_authnsign = 'true' if authnsign else 'false'
str_wsign = 'true' if wsign else 'false'
str_organization = ''
if len(organization) > 0:
organization_names = []
organization_displaynames = []
organization_urls = []
for (lang, info) in organization.items():
organization_names.append(""" <md:OrganizationName xml:lang="%s">%s</md:OrganizationName>""" % (lang, info['name']))
organization_displaynames.append(""" <md:OrganizationDisplayName xml:lang="%s">%s</md:OrganizationDisplayName>""" % (lang, info['displayname']))
organization_urls.append(""" <md:OrganizationURL xml:lang="%s">%s</md:OrganizationURL>""" % (lang, info['url']))
org_data = '\n'.join(organization_names) + '\n' + '\n'.join(organization_displaynames) + '\n' + '\n'.join(organization_urls)
str_organization = """ <md:Organization>
%(org)s
</md:Organization>\n""" % {'org': org_data}
str_contacts = ''
if len(contacts) > 0:
contacts_info = []
for (ctype, info) in contacts.items():
contact = """ <md:ContactPerson contactType="%(type)s">
<md:GivenName>%(name)s</md:GivenName>
<md:EmailAddress>%(email)s</md:EmailAddress>
</md:ContactPerson>""" % \
{
'type': ctype,
'name': info['givenName'],
'email': info['emailAddress'],
}
contacts_info.append(contact)
str_contacts = '\n'.join(contacts_info) + '\n'
metadata = u"""<?xml version="1.0"?>
<md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata"
%(valid)s
%(cache)s
entityID="%(entity_id)s">
<md:SPSSODescriptor AuthnRequestsSigned="%(authnsign)s" WantAssertionsSigned="%(wsign)s" protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
%(sls)s <md:NameIDFormat>%(name_id_format)s</md:NameIDFormat>
<md:AssertionConsumerService Binding="%(binding)s"
Location="%(location)s"
index="1" />
%(attribute_consuming_service)s </md:SPSSODescriptor>
%(organization)s%(contacts)s</md:EntityDescriptor>""" % \
{
'valid': ('validUntil="%s"' % valid_until_str) if valid_until_str else '',
'cache': ('cacheDuration="%s"' % cache_duration_str) if cache_duration_str else '',
'entity_id': sp['entityId'],
'authnsign': str_authnsign,
'wsign': str_wsign,
'name_id_format': sp['NameIDFormat'],
'binding': sp['assertionConsumerService']['binding'],
'location': sp['assertionConsumerService']['url'],
'sls': sls,
'organization': str_organization,
'contacts': str_contacts,
'attribute_consuming_service': str_attribute_consuming_service
}
return metadata
@staticmethod
def sign_metadata(metadata, key, cert, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1, digest_algorithm=OneLogin_Saml2_Constants.SHA1):
"""
Signs the metadata with the key/cert provided
:param metadata: SAML Metadata XML
:type metadata: string
:param key: x509 key
:type key: string
:param cert: x509 cert
:type cert: string
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
:param digest_algorithm: Digest algorithm method
:type digest_algorithm: string
:returns: Signed Metadata
:rtype: string
"""
return OneLogin_Saml2_Utils.add_sign(metadata, key, cert, False, sign_algorithm, digest_algorithm)
@staticmethod
def add_x509_key_descriptors(metadata, cert=None, add_encryption=True):
"""
Adds the x509 descriptors (sign/encryption) to the metadata
The same cert will be used for sign/encrypt
:param metadata: SAML Metadata XML
:type metadata: string
:param cert: x509 cert
:type cert: string
:param add_encryption: Determines if the KeyDescriptor[use="encryption"] should be added.
:type add_encryption: boolean
:returns: Metadata with KeyDescriptors
:rtype: string
"""
if cert is None or cert == '':
return metadata
try:
xml = parseString(metadata.encode('utf-8'), forbid_dtd=True, forbid_entities=True, forbid_external=True)
except Exception as e:
raise Exception('Error parsing metadata. ' + e.message)
formated_cert = OneLogin_Saml2_Utils.format_cert(cert, False)
x509_certificate = xml.createElementNS(OneLogin_Saml2_Constants.NS_DS, 'ds:X509Certificate')
content = xml.createTextNode(formated_cert)
x509_certificate.appendChild(content)
key_data = xml.createElementNS(OneLogin_Saml2_Constants.NS_DS, 'ds:X509Data')
key_data.appendChild(x509_certificate)
key_info = xml.createElementNS(OneLogin_Saml2_Constants.NS_DS, 'ds:KeyInfo')
key_info.appendChild(key_data)
key_descriptor = xml.createElementNS(OneLogin_Saml2_Constants.NS_DS, 'md:KeyDescriptor')
entity_descriptor = xml.getElementsByTagName('md:EntityDescriptor')[0]
sp_sso_descriptor = entity_descriptor.getElementsByTagName('md:SPSSODescriptor')[0]
sp_sso_descriptor.insertBefore(key_descriptor.cloneNode(True), sp_sso_descriptor.firstChild)
if add_encryption:
sp_sso_descriptor.insertBefore(key_descriptor.cloneNode(True), sp_sso_descriptor.firstChild)
signing = xml.getElementsByTagName('md:KeyDescriptor')[0]
signing.setAttribute('use', 'signing')
signing.appendChild(key_info)
signing.setAttribute('xmlns:ds', OneLogin_Saml2_Constants.NS_DS)
if add_encryption:
encryption = xml.getElementsByTagName('md:KeyDescriptor')[1]
encryption.setAttribute('use', 'encryption')
encryption.appendChild(key_info.cloneNode(True))
encryption.setAttribute('xmlns:ds', OneLogin_Saml2_Constants.NS_DS)
return xml.toxml()
| mit | 3,680,094,676,359,582,700 | 42.298246 | 210 | 0.58517 | false |
czhengsci/pymatgen | pymatgen/io/lammps/tests/test_output.py | 2 | 5248 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
import os
import unittest
import numpy as np
from pymatgen.io.lammps.output import LammpsRun, LammpsLog, LammpsDump
__author__ = 'Kiran Mathew'
__email__ = '[email protected]'
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
"test_files", "lammps")
class TestLammpsDump(unittest.TestCase):
def setUp(self):
dump_file_1 = os.path.join(test_dir, "dump_1")
dump_file_2 = os.path.join(test_dir, "dump_2")
self.dump1 = LammpsDump.from_file(dump_file_1)
self.dump2 = LammpsDump.from_file(dump_file_2)
dump_file_nvt = os.path.join(test_dir, "nvt.dump")
self.dump_nvt = LammpsDump.from_file(dump_file_nvt)
def test_non_atoms_data(self):
self.assertEqual(self.dump1.box_bounds, [[0.0, 25.0],
[0.0, 25.0],
[0.0, 25.0]])
self.assertEqual(self.dump1.natoms, 123)
self.assertEqual(self.dump1.timesteps, [0.0])
self.assertEqual(self.dump1.box_bounds, self.dump2.box_bounds)
self.assertEqual(self.dump1.natoms, self.dump2.natoms)
self.assertEqual(self.dump1.timesteps, self.dump2.timesteps)
def test_atoms_data(self):
self.assertEqual(self.dump1.natoms, len(self.dump1.atoms_data))
self.assertEqual(self.dump2.natoms, len(self.dump2.atoms_data))
ans1 = [float(x) for x in "1 2 1 3 3 2 4 1 3 5 4 3 5 2 4 6 1 3 5 7 5 4 " \
"6 3 5 7 2 4 6 1 3 5 7 6 5 7 4 6 3 5 7 2 4 6 " \
"1 3 5 7 7 6 5 7 4 6 3 5 7 2 4 6 1 3 5 7".split()]
np.testing.assert_almost_equal(self.dump1.atoms_data[115], ans1,
decimal=6)
np.testing.assert_almost_equal(self.dump2.atoms_data[57],
[99, 2, 0.909816, 0.883438, 0.314853],
decimal=6)
def test_timesteps_and_atoms_data(self):
self.assertEqual(self.dump_nvt.natoms * len(self.dump_nvt.timesteps),
len(self.dump_nvt.atoms_data))
class TestLammpsRun(unittest.TestCase):
@classmethod
def setUpClass(cls):
data_file = os.path.join(test_dir, "nvt.data")
traj_file = os.path.join(test_dir, "nvt.dump")
log_file = os.path.join(test_dir, "nvt.log")
cls.lmps_log = LammpsLog(log_file=log_file)
cls.lammpsrun = LammpsRun(data_file, traj_file, log_file)
def test_lammps_log(self):
fields = "step vol temp press ke pe etotal enthalpy evdwl ecoul epair " \
"ebond eangle edihed eimp " \
"emol elong etail lx ly lz xy xz yz density"
fields = fields.split()
thermo_data_ans = np.loadtxt(
os.path.join(test_dir, "thermo_data.txt"))
thermo_data = self.lammpsrun.log.thermo_data
self.assertEqual(sorted(list(thermo_data.keys())), sorted(fields))
self.assertEqual(self.lammpsrun.log.nmdsteps + 1,
len(thermo_data['step']))
data = [thermo_data[k] for k in fields]
np.testing.assert_almost_equal(data, np.transpose(thermo_data_ans), decimal=10)
def test_lammps_trajectory(self):
fields = "Atoms_id atom_type x y z vx vy vz mol mass"
fields = fields.split()
timestep_ans = 82
trajectory_ans = np.loadtxt(os.path.join(test_dir,
"trajectory_timestep_82_sorted.txt"))
begin = int(timestep_ans / 2) * self.lammpsrun.natoms
end = (int(timestep_ans / 2) + 1) * self.lammpsrun.natoms
trajectory = self.lammpsrun.trajectory[begin:end]
# atom ids in the trajectory starts from 0
np.testing.assert_almost_equal(trajectory[:][fields[0]],
trajectory_ans[:, 0] - 1, decimal=10)
for i, fld in enumerate(fields[1:]):
np.testing.assert_almost_equal(trajectory[:][fld],
trajectory_ans[:, i + 1],
decimal=10)
def test_get_structures_from_trajectory(self):
structures = self.lammpsrun.get_structures_from_trajectory()
self.assertEqual(len(structures), len(self.lammpsrun.timesteps))
def test_get_displacements(self):
structure, disp = self.lammpsrun.get_displacements()
self.assertEqual(disp.shape[0], len(structure))
self.assertEqual(disp.shape[1], len(self.lammpsrun.timesteps) - 1)
self.assertEqual(disp.shape[2], 3)
self.assertAlmostEqual(disp[-1, -1, -1], 0.077079999999999788)
def test_serialization(self):
d = self.lammpsrun.as_dict()
lmps_run = LammpsRun.from_dict(d)
self.assertDictEqual(d, lmps_run.as_dict())
d2 = self.lmps_log.as_dict()
lmps_log = LammpsLog.from_dict(d2)
self.assertDictEqual(d2, lmps_log.as_dict())
if __name__ == "__main__":
unittest.main()
| mit | 8,858,521,344,486,607,000 | 42.371901 | 87 | 0.577553 | false |
albertomurillo/ansible | lib/ansible/modules/cloud/azure/azure_rm_webappslot.py | 9 | 40667 | #!/usr/bin/python
#
# Copyright (c) 2018 Yunge Zhu, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_webappslot
version_added: "2.8"
short_description: Manage Azure Web App slot.
description:
- Create, update and delete Azure Web App slot.
options:
resource_group:
description:
- Name of the resource group to which the resource belongs.
required: True
name:
description:
- Unique name of the deployment slot to create or update.
required: True
webapp_name:
description:
- Web app name which this deployment slot belongs to.
required: True
location:
description:
- Resource location. If not set, location from the resource group will be used as default.
configuration_source:
description:
- Source slot to clone configurations from when creating slot. Use webapp's name to refer to the production slot.
auto_swap_slot_name:
description:
- Used to configure target slot name to auto swap, or disable auto swap.
- Set it target slot name to auto swap.
- Set it to False to disable auto slot swap.
swap:
description:
- Swap deployment slots of a web app.
suboptions:
action:
description:
- Swap types.
- preview is to apply target slot settings on source slot first.
- swap is to complete swapping.
- reset is to reset the swap.
choices:
- preview
- swap
- reset
default: preview
target_slot:
description:
- Name of target slot to swap. If set to None, then swap with production slot.
preserve_vnet:
description:
- True to preserve virtual network to the slot during swap. Otherwise False.
type: bool
default: True
frameworks:
description:
- Set of run time framework settings. Each setting is a dictionary.
- See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
suboptions:
name:
description:
- Name of the framework.
- Supported framework list for Windows web app and Linux web app is different.
- For Windows web app, supported names(June 2018) java, net_framework, php, python, node. Multiple framework can be set at same time.
- For Linux web app, supported names(June 2018) java, ruby, php, dotnetcore, node. Only one framework can be set.
- Java framework is mutually exclusive with others.
choices:
- java
- net_framework
- php
- python
- ruby
- dotnetcore
- node
version:
description:
- Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info.
- net_framework supported value sample, 'v4.0' for .NET 4.6 and 'v3.0' for .NET 3.5.
- php supported value sample, 5.5, 5.6, 7.0.
- python supported value sample, e.g., 5.5, 5.6, 7.0.
- node supported value sample, 6.6, 6.9.
- dotnetcore supported value sample, 1.0, 1,1, 1.2.
- ruby supported value sample, 2.3.
- java supported value sample, 1.8, 1.9 for windows web app. 8 for linux web app.
settings:
description:
- List of settings of the framework.
suboptions:
java_container:
description: Name of Java container. This is supported by specific framework C(java) only. e.g. Tomcat, Jetty.
java_container_version:
description:
- Version of Java container. This is supported by specific framework C(java) only.
- For Tomcat, e.g. 8.0, 8.5, 9.0. For Jetty, e.g. 9.1, 9.3.
container_settings:
description: Web app slot container settings.
suboptions:
name:
description: Name of container. eg. "imagename:tag"
registry_server_url:
description: Container registry server url. eg. mydockerregistry.io
registry_server_user:
description: The container registry server user name.
registry_server_password:
description:
- The container registry server password.
startup_file:
description:
- The slot startup file.
- This only applies for linux web app slot.
app_settings:
description:
- Configure web app slot application settings. Suboptions are in key value pair format.
purge_app_settings:
description:
- Purge any existing application settings. Replace slot application settings with app_settings.
type: bool
deployment_source:
description:
- Deployment source for git
suboptions:
url:
description:
- Repository url of deployment source.
branch:
description:
- The branch name of the repository.
app_state:
description:
- Start/Stop/Restart the slot.
type: str
choices:
- started
- stopped
- restarted
default: started
state:
description:
- Assert the state of the Web App deployment slot.
- Use C(present) to create or update a slot and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Yunge Zhu(@yungezz)"
'''
EXAMPLES = '''
- name: Create a webapp slot
azure_rm_webappslot:
resource_group: myResourceGroup
webapp_name: myJavaWebApp
name: stage
configuration_source: myJavaWebApp
app_settings:
testkey: testvalue
- name: swap the slot with production slot
azure_rm_webappslot:
resource_group: myResourceGroup
webapp_name: myJavaWebApp
name: stage
swap:
action: swap
- name: stop the slot
azure_rm_webappslot:
resource_group: myResourceGroup
webapp_name: myJavaWebApp
name: stage
app_state: stopped
- name: udpate a webapp slot app settings
azure_rm_webappslot:
resource_group: myResourceGroup
webapp_name: myJavaWebApp
name: stage
app_settings:
testkey: testvalue2
- name: udpate a webapp slot frameworks
azure_rm_webappslot:
resource_group: myResourceGroup
webapp_name: myJavaWebApp
name: stage
frameworks:
- name: "node"
version: "10.1"
'''
RETURN = '''
id:
description: Id of current slot.
returned: always
type: str
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/testapp/slots/stage1
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from msrest.serialization import Model
from azure.mgmt.web.models import (
site_config, app_service_plan, Site,
AppServicePlan, SkuDescription, NameValuePair
)
except ImportError:
# This is handled in azure_rm_common
pass
swap_spec = dict(
action=dict(
type='str',
choices=[
'preview',
'swap',
'reset'
],
default='preview'
),
target_slot=dict(
type='str'
),
preserve_vnet=dict(
type='bool',
default=True
)
)
container_settings_spec = dict(
name=dict(type='str', required=True),
registry_server_url=dict(type='str'),
registry_server_user=dict(type='str'),
registry_server_password=dict(type='str', no_log=True)
)
deployment_source_spec = dict(
url=dict(type='str'),
branch=dict(type='str')
)
framework_settings_spec = dict(
java_container=dict(type='str', required=True),
java_container_version=dict(type='str', required=True)
)
framework_spec = dict(
name=dict(
type='str',
required=True,
choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']),
version=dict(type='str', required=True),
settings=dict(type='dict', options=framework_settings_spec)
)
def webapp_to_dict(webapp):
return dict(
id=webapp.id,
name=webapp.name,
location=webapp.location,
client_cert_enabled=webapp.client_cert_enabled,
enabled=webapp.enabled,
reserved=webapp.reserved,
client_affinity_enabled=webapp.client_affinity_enabled,
server_farm_id=webapp.server_farm_id,
host_names_disabled=webapp.host_names_disabled,
https_only=webapp.https_only if hasattr(webapp, 'https_only') else None,
skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None,
ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None,
state=webapp.state,
tags=webapp.tags if webapp.tags else None
)
def slot_to_dict(slot):
return dict(
id=slot.id,
resource_group=slot.resource_group,
server_farm_id=slot.server_farm_id,
target_swap_slot=slot.target_swap_slot,
enabled_host_names=slot.enabled_host_names,
slot_swap_status=slot.slot_swap_status,
name=slot.name,
location=slot.location,
enabled=slot.enabled,
reserved=slot.reserved,
host_names_disabled=slot.host_names_disabled,
state=slot.state,
repository_site_name=slot.repository_site_name,
default_host_name=slot.default_host_name,
kind=slot.kind,
site_config=slot.site_config,
tags=slot.tags if slot.tags else None
)
class Actions:
NoAction, CreateOrUpdate, UpdateAppSettings, Delete = range(4)
class AzureRMWebAppSlots(AzureRMModuleBase):
"""Configuration class for an Azure RM Web App slot resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
webapp_name=dict(
type='str',
required=True
),
location=dict(
type='str'
),
configuration_source=dict(
type='str'
),
auto_swap_slot_name=dict(
type='raw'
),
swap=dict(
type='dict',
options=swap_spec
),
frameworks=dict(
type='list',
elements='dict',
options=framework_spec
),
container_settings=dict(
type='dict',
options=container_settings_spec
),
deployment_source=dict(
type='dict',
options=deployment_source_spec
),
startup_file=dict(
type='str'
),
app_settings=dict(
type='dict'
),
purge_app_settings=dict(
type='bool',
default=False
),
app_state=dict(
type='str',
choices=['started', 'stopped', 'restarted'],
default='started'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
mutually_exclusive = [['container_settings', 'frameworks']]
self.resource_group = None
self.name = None
self.webapp_name = None
self.location = None
self.auto_swap_slot_name = None
self.swap = None
self.tags = None
self.startup_file = None
self.configuration_source = None
self.clone = False
# site config, e.g app settings, ssl
self.site_config = dict()
self.app_settings = dict()
self.app_settings_strDic = None
# siteSourceControl
self.deployment_source = dict()
# site, used at level creation, or update.
self.site = None
# property for internal usage, not used for sdk
self.container_settings = None
self.purge_app_settings = False
self.app_state = 'started'
self.results = dict(
changed=False,
id=None,
)
self.state = None
self.to_do = Actions.NoAction
self.frameworks = None
# set site_config value from kwargs
self.site_config_updatable_frameworks = ["net_framework_version",
"java_version",
"php_version",
"python_version",
"linux_fx_version"]
self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java']
self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java']
super(AzureRMWebAppSlots, self).__init__(derived_arg_spec=self.module_arg_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
if key == "scm_type":
self.site_config[key] = kwargs[key]
old_response = None
response = None
to_be_updated = False
# set location
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
# get web app
webapp_response = self.get_webapp()
if not webapp_response:
self.fail("Web app {0} does not exist in resource group {1}.".format(self.webapp_name, self.resource_group))
# get slot
old_response = self.get_slot()
# set is_linux
is_linux = True if webapp_response['reserved'] else False
if self.state == 'present':
if self.frameworks:
# java is mutually exclusive with other frameworks
if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks):
self.fail('Java is mutually exclusive with other frameworks.')
if is_linux:
if len(self.frameworks) != 1:
self.fail('Can specify one framework only for Linux web app.')
if self.frameworks[0]['name'] not in self.supported_linux_frameworks:
self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name']))
self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper()
if self.frameworks[0]['name'] == 'java':
if self.frameworks[0]['version'] != '8':
self.fail("Linux web app only supports java 8.")
if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \
self.frameworks[0]['settings']['java_container'].lower() != 'tomcat':
self.fail("Linux web app only supports tomcat container.")
if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \
self.frameworks[0]['settings']['java_container'].lower() == 'tomcat':
self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8'
else:
self.site_config['linux_fx_version'] = 'JAVA|8-jre8'
else:
for fx in self.frameworks:
if fx.get('name') not in self.supported_windows_frameworks:
self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name')))
else:
self.site_config[fx.get('name') + '_version'] = fx.get('version')
if 'settings' in fx and fx['settings'] is not None:
for key, value in fx['settings'].items():
self.site_config[key] = value
if not self.app_settings:
self.app_settings = dict()
if self.container_settings:
linux_fx_version = 'DOCKER|'
if self.container_settings.get('registry_server_url'):
self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
linux_fx_version += self.container_settings['registry_server_url'] + '/'
linux_fx_version += self.container_settings['name']
self.site_config['linux_fx_version'] = linux_fx_version
if self.container_settings.get('registry_server_user'):
self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user']
if self.container_settings.get('registry_server_password'):
self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password']
# set auto_swap_slot_name
if self.auto_swap_slot_name and isinstance(self.auto_swap_slot_name, str):
self.site_config['auto_swap_slot_name'] = self.auto_swap_slot_name
if self.auto_swap_slot_name is False:
self.site_config['auto_swap_slot_name'] = None
# init site
self.site = Site(location=self.location, site_config=self.site_config)
# check if the slot already present in the webapp
if not old_response:
self.log("Web App slot doesn't exist")
to_be_updated = True
self.to_do = Actions.CreateOrUpdate
self.site.tags = self.tags
# if linux, setup startup_file
if self.startup_file:
self.site_config['app_command_line'] = self.startup_file
# set app setting
if self.app_settings:
app_settings = []
for key in self.app_settings.keys():
app_settings.append(NameValuePair(name=key, value=self.app_settings[key]))
self.site_config['app_settings'] = app_settings
# clone slot
if self.configuration_source:
self.clone = True
else:
# existing slot, do update
self.log("Web App slot already exists")
self.log('Result: {0}'.format(old_response))
update_tags, self.site.tags = self.update_tags(old_response.get('tags', None))
if update_tags:
to_be_updated = True
# check if site_config changed
old_config = self.get_configuration_slot(self.name)
if self.is_site_config_changed(old_config):
to_be_updated = True
self.to_do = Actions.CreateOrUpdate
self.app_settings_strDic = self.list_app_settings_slot(self.name)
# purge existing app_settings:
if self.purge_app_settings:
to_be_updated = True
self.to_do = Actions.UpdateAppSettings
self.app_settings_strDic = dict()
# check if app settings changed
if self.purge_app_settings or self.is_app_settings_changed():
to_be_updated = True
self.to_do = Actions.UpdateAppSettings
if self.app_settings:
for key in self.app_settings.keys():
self.app_settings_strDic[key] = self.app_settings[key]
elif self.state == 'absent':
if old_response:
self.log("Delete Web App slot")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_slot()
self.log('Web App slot deleted')
else:
self.log("Web app slot {0} not exists.".format(self.name))
if to_be_updated:
self.log('Need to Create/Update web app')
self.results['changed'] = True
if self.check_mode:
return self.results
if self.to_do == Actions.CreateOrUpdate:
response = self.create_update_slot()
self.results['id'] = response['id']
if self.clone:
self.clone_slot()
if self.to_do == Actions.UpdateAppSettings:
self.update_app_settings_slot()
slot = None
if response:
slot = response
if old_response:
slot = old_response
if slot:
if (slot['state'] != 'Stopped' and self.app_state == 'stopped') or \
(slot['state'] != 'Running' and self.app_state == 'started') or \
self.app_state == 'restarted':
self.results['changed'] = True
if self.check_mode:
return self.results
self.set_state_slot(self.app_state)
if self.swap:
self.results['changed'] = True
if self.check_mode:
return self.results
self.swap_slot()
return self.results
# compare site config
def is_site_config_changed(self, existing_config):
for fx_version in self.site_config_updatable_frameworks:
if self.site_config.get(fx_version):
if not getattr(existing_config, fx_version) or \
getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper():
return True
if self.auto_swap_slot_name is False and existing_config.auto_swap_slot_name is not None:
return True
elif self.auto_swap_slot_name and self.auto_swap_slot_name != getattr(existing_config, 'auto_swap_slot_name', None):
return True
return False
# comparing existing app setting with input, determine whether it's changed
def is_app_settings_changed(self):
if self.app_settings:
if len(self.app_settings_strDic) != len(self.app_settings):
return True
if self.app_settings_strDic != self.app_settings:
return True
return False
# comparing deployment source with input, determine wheather it's changed
def is_deployment_source_changed(self, existing_webapp):
if self.deployment_source:
if self.deployment_source.get('url') \
and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']:
return True
if self.deployment_source.get('branch') \
and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']:
return True
return False
def create_update_slot(self):
'''
Creates or updates Web App slot with the specified configuration.
:return: deserialized Web App instance state dictionary
'''
self.log(
"Creating / Updating the Web App slot {0}".format(self.name))
try:
response = self.web_client.web_apps.create_or_update_slot(resource_group_name=self.resource_group,
slot=self.name,
name=self.webapp_name,
site_envelope=self.site)
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Web App slot instance.')
self.fail("Error creating the Web App slot: {0}".format(str(exc)))
return slot_to_dict(response)
def delete_slot(self):
'''
Deletes specified Web App slot in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Web App slot {0}".format(self.name))
try:
response = self.web_client.web_apps.delete_slot(resource_group_name=self.resource_group,
name=self.webapp_name,
slot=self.name)
except CloudError as e:
self.log('Error attempting to delete the Web App slot.')
self.fail(
"Error deleting the Web App slots: {0}".format(str(e)))
return True
def get_webapp(self):
'''
Gets the properties of the specified Web App.
:return: deserialized Web App instance state dictionary
'''
self.log(
"Checking if the Web App instance {0} is present".format(self.webapp_name))
response = None
try:
response = self.web_client.web_apps.get(resource_group_name=self.resource_group,
name=self.webapp_name)
# Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
if response is not None:
self.log("Response : {0}".format(response))
self.log("Web App instance : {0} found".format(response.name))
return webapp_to_dict(response)
except CloudError as ex:
pass
self.log("Didn't find web app {0} in resource group {1}".format(
self.webapp_name, self.resource_group))
return False
def get_slot(self):
'''
Gets the properties of the specified Web App slot.
:return: deserialized Web App slot state dictionary
'''
self.log(
"Checking if the Web App slot {0} is present".format(self.name))
response = None
try:
response = self.web_client.web_apps.get_slot(resource_group_name=self.resource_group,
name=self.webapp_name,
slot=self.name)
# Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
if response is not None:
self.log("Response : {0}".format(response))
self.log("Web App slot: {0} found".format(response.name))
return slot_to_dict(response)
except CloudError as ex:
pass
self.log("Does not find web app slot {0} in resource group {1}".format(self.name, self.resource_group))
return False
def list_app_settings(self):
'''
List webapp application settings
:return: deserialized list response
'''
self.log("List webapp application setting")
try:
response = self.web_client.web_apps.list_application_settings(
resource_group_name=self.resource_group, name=self.webapp_name)
self.log("Response : {0}".format(response))
return response.properties
except CloudError as ex:
self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format(
self.name, self.resource_group, str(ex)))
def list_app_settings_slot(self, slot_name):
'''
List application settings
:return: deserialized list response
'''
self.log("List application setting")
try:
response = self.web_client.web_apps.list_application_settings_slot(
resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name)
self.log("Response : {0}".format(response))
return response.properties
except CloudError as ex:
self.fail("Failed to list application settings for web app slot {0} in resource group {1}: {2}".format(
self.name, self.resource_group, str(ex)))
def update_app_settings_slot(self, slot_name=None, app_settings=None):
'''
Update application settings
:return: deserialized updating response
'''
self.log("Update application setting")
if slot_name is None:
slot_name = self.name
if app_settings is None:
app_settings = self.app_settings_strDic
try:
response = self.web_client.web_apps.update_application_settings_slot(resource_group_name=self.resource_group,
name=self.webapp_name,
slot=slot_name,
kind=None,
properties=app_settings)
self.log("Response : {0}".format(response))
return response.as_dict()
except CloudError as ex:
self.fail("Failed to update application settings for web app slot {0} in resource group {1}: {2}".format(
self.name, self.resource_group, str(ex)))
return response
def create_or_update_source_control_slot(self):
'''
Update site source control
:return: deserialized updating response
'''
self.log("Update site source control")
if self.deployment_source is None:
return False
self.deployment_source['is_manual_integration'] = False
self.deployment_source['is_mercurial'] = False
try:
response = self.web_client.web_client.create_or_update_source_control_slot(
resource_group_name=self.resource_group,
name=self.webapp_name,
site_source_control=self.deployment_source,
slot=self.name)
self.log("Response : {0}".format(response))
return response.as_dict()
except CloudError as ex:
self.fail("Failed to update site source control for web app slot {0} in resource group {1}: {2}".format(
self.name, self.resource_group, str(ex)))
def get_configuration(self):
'''
Get web app configuration
:return: deserialized web app configuration response
'''
self.log("Get web app configuration")
try:
response = self.web_client.web_apps.get_configuration(
resource_group_name=self.resource_group, name=self.webapp_name)
self.log("Response : {0}".format(response))
return response
except CloudError as ex:
self.fail("Failed to get configuration for web app {0} in resource group {1}: {2}".format(
self.webapp_name, self.resource_group, str(ex)))
def get_configuration_slot(self, slot_name):
'''
Get slot configuration
:return: deserialized slot configuration response
'''
self.log("Get web app slot configuration")
try:
response = self.web_client.web_apps.get_configuration_slot(
resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name)
self.log("Response : {0}".format(response))
return response
except CloudError as ex:
self.fail("Failed to get configuration for web app slot {0} in resource group {1}: {2}".format(
slot_name, self.resource_group, str(ex)))
def update_configuration_slot(self, slot_name=None, site_config=None):
'''
Update slot configuration
:return: deserialized slot configuration response
'''
self.log("Update web app slot configuration")
if slot_name is None:
slot_name = self.name
if site_config is None:
site_config = self.site_config
try:
response = self.web_client.web_apps.update_configuration_slot(
resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name, site_config=site_config)
self.log("Response : {0}".format(response))
return response
except CloudError as ex:
self.fail("Failed to update configuration for web app slot {0} in resource group {1}: {2}".format(
slot_name, self.resource_group, str(ex)))
def set_state_slot(self, appstate):
'''
Start/stop/restart web app slot
:return: deserialized updating response
'''
try:
if appstate == 'started':
response = self.web_client.web_apps.start_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
elif appstate == 'stopped':
response = self.web_client.web_apps.stop_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
elif appstate == 'restarted':
response = self.web_client.web_apps.restart_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
else:
self.fail("Invalid web app slot state {0}".format(appstate))
self.log("Response : {0}".format(response))
return response
except CloudError as ex:
request_id = ex.request_id if ex.request_id else ''
self.fail("Failed to {0} web app slot {1} in resource group {2}, request_id {3} - {4}".format(
appstate, self.name, self.resource_group, request_id, str(ex)))
def swap_slot(self):
'''
Swap slot
:return: deserialized response
'''
self.log("Swap slot")
try:
if self.swap['action'] == 'swap':
if self.swap['target_slot'] is None:
response = self.web_client.web_apps.swap_slot_with_production(resource_group_name=self.resource_group,
name=self.webapp_name,
target_slot=self.name,
preserve_vnet=self.swap['preserve_vnet'])
else:
response = self.web_client.web_apps.swap_slot_slot(resource_group_name=self.resource_group,
name=self.webapp_name,
slot=self.name,
target_slot=self.swap['target_slot'],
preserve_vnet=self.swap['preserve_vnet'])
elif self.swap['action'] == 'preview':
if self.swap['target_slot'] is None:
response = self.web_client.web_apps.apply_slot_config_to_production(resource_group_name=self.resource_group,
name=self.webapp_name,
target_slot=self.name,
preserve_vnet=self.swap['preserve_vnet'])
else:
response = self.web_client.web_apps.apply_slot_configuration_slot(resource_group_name=self.resource_group,
name=self.webapp_name,
slot=self.name,
target_slot=self.swap['target_slot'],
preserve_vnet=self.swap['preserve_vnet'])
elif self.swap['action'] == 'reset':
if self.swap['target_slot'] is None:
response = self.web_client.web_apps.reset_production_slot_config(resource_group_name=self.resource_group,
name=self.webapp_name)
else:
response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group,
name=self.webapp_name,
slot=self.swap['target_slot'])
response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group,
name=self.webapp_name,
slot=self.name)
self.log("Response : {0}".format(response))
return response
except CloudError as ex:
self.fail("Failed to swap web app slot {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex)))
def clone_slot(self):
if self.configuration_source:
src_slot = None if self.configuration_source.lower() == self.webapp_name.lower() else self.configuration_source
if src_slot is None:
site_config_clone_from = self.get_configuration()
else:
site_config_clone_from = self.get_configuration_slot(slot_name=src_slot)
self.update_configuration_slot(site_config=site_config_clone_from)
if src_slot is None:
app_setting_clone_from = self.list_app_settings()
else:
app_setting_clone_from = self.list_app_settings_slot(src_slot)
if self.app_settings:
app_setting_clone_from.update(self.app_settings)
self.update_app_settings_slot(app_settings=app_setting_clone_from)
def main():
"""Main execution"""
AzureRMWebAppSlots()
if __name__ == '__main__':
main()
| gpl-3.0 | -6,400,516,954,900,791,000 | 37.730476 | 153 | 0.531881 | false |
open-synergy/stock-logistics-warehouse | stock_available/models/product_template.py | 2 | 1628 | # -*- coding: utf-8 -*-
# © 2014 Numérigraphe SARL
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api
from openerp.addons import decimal_precision as dp
class ProductTemplate(models.Model):
_inherit = 'product.template'
@api.multi
@api.depends('product_variant_ids.immediately_usable_qty')
def _immediately_usable_qty(self):
"""No-op implementation of the stock available to promise.
By default, available to promise = forecasted quantity.
**Each** sub-module **must** override this method in **both**
`product.product` **and** `product.template`, because we can't
decide in advance how to compute the template's quantity from the
variants.
"""
for tmpl in self:
tmpl.immediately_usable_qty = tmpl.virtual_available
def _search_immediately_usable_quantity(self, operator, value):
prod_obj = self.env['product.product']
product_variants = prod_obj.search(
[('immediately_usable_qty', operator, value)]
)
return [('product_variant_ids', 'in', product_variants.ids)]
immediately_usable_qty = fields.Float(
digits=dp.get_precision('Product Unit of Measure'),
compute='_immediately_usable_qty',
search='_search_immediately_usable_quantity',
string='Available to promise',
help="Stock for this Product that can be safely proposed "
"for sale to Customers.\n"
"The definition of this value can be configured to suit "
"your needs")
| agpl-3.0 | 1,215,361,201,699,620,600 | 37.714286 | 77 | 0.643296 | false |
OTWillems/GEO1005 | SpatialDecision/external/networkx/algorithms/isomorphism/isomorphvf2.py | 76 | 36794 | # -*- coding: utf-8 -*-
"""
*************
VF2 Algorithm
*************
An implementation of VF2 algorithm for graph ismorphism testing.
The simplest interface to use this module is to call networkx.is_isomorphic().
Introduction
------------
The GraphMatcher and DiGraphMatcher are responsible for matching
graphs or directed graphs in a predetermined manner. This
usually means a check for an isomorphism, though other checks
are also possible. For example, a subgraph of one graph
can be checked for isomorphism to a second graph.
Matching is done via syntactic feasibility. It is also possible
to check for semantic feasibility. Feasibility, then, is defined
as the logical AND of the two functions.
To include a semantic check, the (Di)GraphMatcher class should be
subclassed, and the semantic_feasibility() function should be
redefined. By default, the semantic feasibility function always
returns True. The effect of this is that semantics are not
considered in the matching of G1 and G2.
Examples
--------
Suppose G1 and G2 are isomorphic graphs. Verification is as follows:
>>> from networkx.algorithms import isomorphism
>>> G1 = nx.path_graph(4)
>>> G2 = nx.path_graph(4)
>>> GM = isomorphism.GraphMatcher(G1,G2)
>>> GM.is_isomorphic()
True
GM.mapping stores the isomorphism mapping from G1 to G2.
>>> GM.mapping
{0: 0, 1: 1, 2: 2, 3: 3}
Suppose G1 and G2 are isomorphic directed graphs
graphs. Verification is as follows:
>>> G1 = nx.path_graph(4, create_using=nx.DiGraph())
>>> G2 = nx.path_graph(4, create_using=nx.DiGraph())
>>> DiGM = isomorphism.DiGraphMatcher(G1,G2)
>>> DiGM.is_isomorphic()
True
DiGM.mapping stores the isomorphism mapping from G1 to G2.
>>> DiGM.mapping
{0: 0, 1: 1, 2: 2, 3: 3}
Subgraph Isomorphism
--------------------
Graph theory literature can be ambiguious about the meaning of the
above statement, and we seek to clarify it now.
In the VF2 literature, a mapping M is said to be a graph-subgraph
isomorphism iff M is an isomorphism between G2 and a subgraph of G1.
Thus, to say that G1 and G2 are graph-subgraph isomorphic is to say
that a subgraph of G1 is isomorphic to G2.
Other literature uses the phrase 'subgraph isomorphic' as in 'G1 does
not have a subgraph isomorphic to G2'. Another use is as an in adverb
for isomorphic. Thus, to say that G1 and G2 are subgraph isomorphic
is to say that a subgraph of G1 is isomorphic to G2.
Finally, the term 'subgraph' can have multiple meanings. In this
context, 'subgraph' always means a 'node-induced subgraph'. Edge-induced
subgraph isomorphisms are not directly supported, but one should be
able to perform the check by making use of nx.line_graph(). For
subgraphs which are not induced, the term 'monomorphism' is preferred
over 'isomorphism'. Currently, it is not possible to check for
monomorphisms.
Let G=(N,E) be a graph with a set of nodes N and set of edges E.
If G'=(N',E') is a subgraph, then:
N' is a subset of N
E' is a subset of E
If G'=(N',E') is a node-induced subgraph, then:
N' is a subset of N
E' is the subset of edges in E relating nodes in N'
If G'=(N',E') is an edge-induced subgrpah, then:
N' is the subset of nodes in N related by edges in E'
E' is a subset of E
References
----------
[1] Luigi P. Cordella, Pasquale Foggia, Carlo Sansone, Mario Vento,
"A (Sub)Graph Isomorphism Algorithm for Matching Large Graphs",
IEEE Transactions on Pattern Analysis and Machine Intelligence,
vol. 26, no. 10, pp. 1367-1372, Oct., 2004.
http://ieeexplore.ieee.org/iel5/34/29305/01323804.pdf
[2] L. P. Cordella, P. Foggia, C. Sansone, M. Vento, "An Improved
Algorithm for Matching Large Graphs", 3rd IAPR-TC15 Workshop
on Graph-based Representations in Pattern Recognition, Cuen,
pp. 149-159, 2001.
http://amalfi.dis.unina.it/graph/db/papers/vf-algorithm.pdf
See Also
--------
syntactic_feasibliity(), semantic_feasibility()
Notes
-----
Modified to handle undirected graphs.
Modified to handle multiple edges.
In general, this problem is NP-Complete.
"""
# Copyright (C) 2007-2009 by the NetworkX maintainers
# All rights reserved.
# BSD license.
# This work was originally coded by Christopher Ellison
# as part of the Computational Mechanics Python (CMPy) project.
# James P. Crutchfield, principal investigator.
# Complexity Sciences Center and Physics Department, UC Davis.
import sys
import networkx as nx
__all__ = ['GraphMatcher',
'DiGraphMatcher']
class GraphMatcher(object):
"""Implementation of VF2 algorithm for matching undirected graphs.
Suitable for Graph and MultiGraph instances.
"""
def __init__(self, G1, G2):
"""Initialize GraphMatcher.
Parameters
----------
G1,G2: NetworkX Graph or MultiGraph instances.
The two graphs to check for isomorphism.
Examples
--------
To create a GraphMatcher which checks for syntactic feasibility:
>>> from networkx.algorithms import isomorphism
>>> G1 = nx.path_graph(4)
>>> G2 = nx.path_graph(4)
>>> GM = isomorphism.GraphMatcher(G1,G2)
"""
self.G1 = G1
self.G2 = G2
self.G1_nodes = set(G1.nodes())
self.G2_nodes = set(G2.nodes())
# Set recursion limit.
self.old_recursion_limit = sys.getrecursionlimit()
expected_max_recursion_level = len(self.G2)
if self.old_recursion_limit < 1.5 * expected_max_recursion_level:
# Give some breathing room.
sys.setrecursionlimit(int(1.5 * expected_max_recursion_level))
# Declare that we will be searching for a graph-graph isomorphism.
self.test = 'graph'
# Initialize state
self.initialize()
def reset_recursion_limit(self):
"""Restores the recursion limit."""
### TODO:
### Currently, we use recursion and set the recursion level higher.
### It would be nice to restore the level, but because the
### (Di)GraphMatcher classes make use of cyclic references, garbage
### collection will never happen when we define __del__() to
### restore the recursion level. The result is a memory leak.
### So for now, we do not automatically restore the recursion level,
### and instead provide a method to do this manually. Eventually,
### we should turn this into a non-recursive implementation.
sys.setrecursionlimit(self.old_recursion_limit)
def candidate_pairs_iter(self):
"""Iterator over candidate pairs of nodes in G1 and G2."""
# All computations are done using the current state!
G1_nodes = self.G1_nodes
G2_nodes = self.G2_nodes
# First we compute the inout-terminal sets.
T1_inout = [node for node in G1_nodes if (node in self.inout_1) and (node not in self.core_1)]
T2_inout = [node for node in G2_nodes if (node in self.inout_2) and (node not in self.core_2)]
# If T1_inout and T2_inout are both nonempty.
# P(s) = T1_inout x {min T2_inout}
if T1_inout and T2_inout:
for node in T1_inout:
yield node, min(T2_inout)
else:
# If T1_inout and T2_inout were both empty....
# P(s) = (N_1 - M_1) x {min (N_2 - M_2)}
##if not (T1_inout or T2_inout): # as suggested by [2], incorrect
if 1: # as inferred from [1], correct
# First we determine the candidate node for G2
other_node = min(G2_nodes - set(self.core_2))
for node in self.G1:
if node not in self.core_1:
yield node, other_node
# For all other cases, we don't have any candidate pairs.
def initialize(self):
"""Reinitializes the state of the algorithm.
This method should be redefined if using something other than GMState.
If only subclassing GraphMatcher, a redefinition is not necessary.
"""
# core_1[n] contains the index of the node paired with n, which is m,
# provided n is in the mapping.
# core_2[m] contains the index of the node paired with m, which is n,
# provided m is in the mapping.
self.core_1 = {}
self.core_2 = {}
# See the paper for definitions of M_x and T_x^{y}
# inout_1[n] is non-zero if n is in M_1 or in T_1^{inout}
# inout_2[m] is non-zero if m is in M_2 or in T_2^{inout}
#
# The value stored is the depth of the SSR tree when the node became
# part of the corresponding set.
self.inout_1 = {}
self.inout_2 = {}
# Practically, these sets simply store the nodes in the subgraph.
self.state = GMState(self)
# Provide a convienient way to access the isomorphism mapping.
self.mapping = self.core_1.copy()
def is_isomorphic(self):
"""Returns True if G1 and G2 are isomorphic graphs."""
# Let's do two very quick checks!
# QUESTION: Should we call faster_graph_could_be_isomorphic(G1,G2)?
# For now, I just copy the code.
# Check global properties
if self.G1.order() != self.G2.order(): return False
# Check local properties
d1=sorted(self.G1.degree().values())
d2=sorted(self.G2.degree().values())
if d1 != d2: return False
try:
x = next(self.isomorphisms_iter())
return True
except StopIteration:
return False
def isomorphisms_iter(self):
"""Generator over isomorphisms between G1 and G2."""
# Declare that we are looking for a graph-graph isomorphism.
self.test = 'graph'
self.initialize()
for mapping in self.match():
yield mapping
def match(self):
"""Extends the isomorphism mapping.
This function is called recursively to determine if a complete
isomorphism can be found between G1 and G2. It cleans up the class
variables after each recursive call. If an isomorphism is found,
we yield the mapping.
"""
if len(self.core_1) == len(self.G2):
# Save the final mapping, otherwise garbage collection deletes it.
self.mapping = self.core_1.copy()
# The mapping is complete.
yield self.mapping
else:
for G1_node, G2_node in self.candidate_pairs_iter():
if self.syntactic_feasibility(G1_node, G2_node):
if self.semantic_feasibility(G1_node, G2_node):
# Recursive call, adding the feasible state.
newstate = self.state.__class__(self, G1_node, G2_node)
for mapping in self.match():
yield mapping
# restore data structures
newstate.restore()
def semantic_feasibility(self, G1_node, G2_node):
"""Returns True if adding (G1_node, G2_node) is symantically feasible.
The semantic feasibility function should return True if it is
acceptable to add the candidate pair (G1_node, G2_node) to the current
partial isomorphism mapping. The logic should focus on semantic
information contained in the edge data or a formalized node class.
By acceptable, we mean that the subsequent mapping can still become a
complete isomorphism mapping. Thus, if adding the candidate pair
definitely makes it so that the subsequent mapping cannot become a
complete isomorphism mapping, then this function must return False.
The default semantic feasibility function always returns True. The
effect is that semantics are not considered in the matching of G1
and G2.
The semantic checks might differ based on the what type of test is
being performed. A keyword description of the test is stored in
self.test. Here is a quick description of the currently implemented
tests::
test='graph'
Indicates that the graph matcher is looking for a graph-graph
isomorphism.
test='subgraph'
Indicates that the graph matcher is looking for a subgraph-graph
isomorphism such that a subgraph of G1 is isomorphic to G2.
Any subclass which redefines semantic_feasibility() must maintain
the above form to keep the match() method functional. Implementations
should consider multigraphs.
"""
return True
def subgraph_is_isomorphic(self):
"""Returns True if a subgraph of G1 is isomorphic to G2."""
try:
x = next(self.subgraph_isomorphisms_iter())
return True
except StopIteration:
return False
# subgraph_is_isomorphic.__doc__ += "\n" + subgraph.replace('\n','\n'+indent)
def subgraph_isomorphisms_iter(self):
"""Generator over isomorphisms between a subgraph of G1 and G2."""
# Declare that we are looking for graph-subgraph isomorphism.
self.test = 'subgraph'
self.initialize()
for mapping in self.match():
yield mapping
# subgraph_isomorphisms_iter.__doc__ += "\n" + subgraph.replace('\n','\n'+indent)
def syntactic_feasibility(self, G1_node, G2_node):
"""Returns True if adding (G1_node, G2_node) is syntactically feasible.
This function returns True if it is adding the candidate pair
to the current partial isomorphism mapping is allowable. The addition
is allowable if the inclusion of the candidate pair does not make it
impossible for an isomorphism to be found.
"""
# The VF2 algorithm was designed to work with graphs having, at most,
# one edge connecting any two nodes. This is not the case when
# dealing with an MultiGraphs.
#
# Basically, when we test the look-ahead rules R_neighbor, we will
# make sure that the number of edges are checked. We also add
# a R_self check to verify that the number of selfloops is acceptable.
#
# Users might be comparing Graph instances with MultiGraph instances.
# So the generic GraphMatcher class must work with MultiGraphs.
# Care must be taken since the value in the innermost dictionary is a
# singlet for Graph instances. For MultiGraphs, the value in the
# innermost dictionary is a list.
###
### Test at each step to get a return value as soon as possible.
###
### Look ahead 0
# R_self
# The number of selfloops for G1_node must equal the number of
# self-loops for G2_node. Without this check, we would fail on
# R_neighbor at the next recursion level. But it is good to prune the
# search tree now.
if self.G1.number_of_edges(G1_node,G1_node) != self.G2.number_of_edges(G2_node,G2_node):
return False
# R_neighbor
# For each neighbor n' of n in the partial mapping, the corresponding
# node m' is a neighbor of m, and vice versa. Also, the number of
# edges must be equal.
for neighbor in self.G1[G1_node]:
if neighbor in self.core_1:
if not (self.core_1[neighbor] in self.G2[G2_node]):
return False
elif self.G1.number_of_edges(neighbor, G1_node) != self.G2.number_of_edges(self.core_1[neighbor], G2_node):
return False
for neighbor in self.G2[G2_node]:
if neighbor in self.core_2:
if not (self.core_2[neighbor] in self.G1[G1_node]):
return False
elif self.G1.number_of_edges(self.core_2[neighbor], G1_node) != self.G2.number_of_edges(neighbor, G2_node):
return False
### Look ahead 1
# R_terminout
# The number of neighbors of n that are in T_1^{inout} is equal to the
# number of neighbors of m that are in T_2^{inout}, and vice versa.
num1 = 0
for neighbor in self.G1[G1_node]:
if (neighbor in self.inout_1) and (neighbor not in self.core_1):
num1 += 1
num2 = 0
for neighbor in self.G2[G2_node]:
if (neighbor in self.inout_2) and (neighbor not in self.core_2):
num2 += 1
if self.test == 'graph':
if not (num1 == num2):
return False
else: # self.test == 'subgraph'
if not (num1 >= num2):
return False
### Look ahead 2
# R_new
# The number of neighbors of n that are neither in the core_1 nor
# T_1^{inout} is equal to the number of neighbors of m
# that are neither in core_2 nor T_2^{inout}.
num1 = 0
for neighbor in self.G1[G1_node]:
if neighbor not in self.inout_1:
num1 += 1
num2 = 0
for neighbor in self.G2[G2_node]:
if neighbor not in self.inout_2:
num2 += 1
if self.test == 'graph':
if not (num1 == num2):
return False
else: # self.test == 'subgraph'
if not (num1 >= num2):
return False
# Otherwise, this node pair is syntactically feasible!
return True
class DiGraphMatcher(GraphMatcher):
"""Implementation of VF2 algorithm for matching directed graphs.
Suitable for DiGraph and MultiDiGraph instances.
"""
# __doc__ += "Notes\n%s-----" % (indent,) + sources.replace('\n','\n'+indent)
def __init__(self, G1, G2):
"""Initialize DiGraphMatcher.
G1 and G2 should be nx.Graph or nx.MultiGraph instances.
Examples
--------
To create a GraphMatcher which checks for syntactic feasibility:
>>> from networkx.algorithms import isomorphism
>>> G1 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph()))
>>> G2 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph()))
>>> DiGM = isomorphism.DiGraphMatcher(G1,G2)
"""
super(DiGraphMatcher, self).__init__(G1, G2)
def candidate_pairs_iter(self):
"""Iterator over candidate pairs of nodes in G1 and G2."""
# All computations are done using the current state!
G1_nodes = self.G1_nodes
G2_nodes = self.G2_nodes
# First we compute the out-terminal sets.
T1_out = [node for node in G1_nodes if (node in self.out_1) and (node not in self.core_1)]
T2_out = [node for node in G2_nodes if (node in self.out_2) and (node not in self.core_2)]
# If T1_out and T2_out are both nonempty.
# P(s) = T1_out x {min T2_out}
if T1_out and T2_out:
node_2 = min(T2_out)
for node_1 in T1_out:
yield node_1, node_2
# If T1_out and T2_out were both empty....
# We compute the in-terminal sets.
##elif not (T1_out or T2_out): # as suggested by [2], incorrect
else: # as suggested by [1], correct
T1_in = [node for node in G1_nodes if (node in self.in_1) and (node not in self.core_1)]
T2_in = [node for node in G2_nodes if (node in self.in_2) and (node not in self.core_2)]
# If T1_in and T2_in are both nonempty.
# P(s) = T1_out x {min T2_out}
if T1_in and T2_in:
node_2 = min(T2_in)
for node_1 in T1_in:
yield node_1, node_2
# If all terminal sets are empty...
# P(s) = (N_1 - M_1) x {min (N_2 - M_2)}
##elif not (T1_in or T2_in): # as suggested by [2], incorrect
else: # as inferred from [1], correct
node_2 = min(G2_nodes - set(self.core_2))
for node_1 in G1_nodes:
if node_1 not in self.core_1:
yield node_1, node_2
# For all other cases, we don't have any candidate pairs.
def initialize(self):
"""Reinitializes the state of the algorithm.
This method should be redefined if using something other than DiGMState.
If only subclassing GraphMatcher, a redefinition is not necessary.
"""
# core_1[n] contains the index of the node paired with n, which is m,
# provided n is in the mapping.
# core_2[m] contains the index of the node paired with m, which is n,
# provided m is in the mapping.
self.core_1 = {}
self.core_2 = {}
# See the paper for definitions of M_x and T_x^{y}
# in_1[n] is non-zero if n is in M_1 or in T_1^{in}
# out_1[n] is non-zero if n is in M_1 or in T_1^{out}
#
# in_2[m] is non-zero if m is in M_2 or in T_2^{in}
# out_2[m] is non-zero if m is in M_2 or in T_2^{out}
#
# The value stored is the depth of the search tree when the node became
# part of the corresponding set.
self.in_1 = {}
self.in_2 = {}
self.out_1 = {}
self.out_2 = {}
self.state = DiGMState(self)
# Provide a convienient way to access the isomorphism mapping.
self.mapping = self.core_1.copy()
def syntactic_feasibility(self, G1_node, G2_node):
"""Returns True if adding (G1_node, G2_node) is syntactically feasible.
This function returns True if it is adding the candidate pair
to the current partial isomorphism mapping is allowable. The addition
is allowable if the inclusion of the candidate pair does not make it
impossible for an isomorphism to be found.
"""
# The VF2 algorithm was designed to work with graphs having, at most,
# one edge connecting any two nodes. This is not the case when
# dealing with an MultiGraphs.
#
# Basically, when we test the look-ahead rules R_pred and R_succ, we
# will make sure that the number of edges are checked. We also add
# a R_self check to verify that the number of selfloops is acceptable.
# Users might be comparing DiGraph instances with MultiDiGraph
# instances. So the generic DiGraphMatcher class must work with
# MultiDiGraphs. Care must be taken since the value in the innermost
# dictionary is a singlet for DiGraph instances. For MultiDiGraphs,
# the value in the innermost dictionary is a list.
###
### Test at each step to get a return value as soon as possible.
###
### Look ahead 0
# R_self
# The number of selfloops for G1_node must equal the number of
# self-loops for G2_node. Without this check, we would fail on R_pred
# at the next recursion level. This should prune the tree even further.
if self.G1.number_of_edges(G1_node,G1_node) != self.G2.number_of_edges(G2_node,G2_node):
return False
# R_pred
# For each predecessor n' of n in the partial mapping, the
# corresponding node m' is a predecessor of m, and vice versa. Also,
# the number of edges must be equal
for predecessor in self.G1.pred[G1_node]:
if predecessor in self.core_1:
if not (self.core_1[predecessor] in self.G2.pred[G2_node]):
return False
elif self.G1.number_of_edges(predecessor, G1_node) != self.G2.number_of_edges(self.core_1[predecessor], G2_node):
return False
for predecessor in self.G2.pred[G2_node]:
if predecessor in self.core_2:
if not (self.core_2[predecessor] in self.G1.pred[G1_node]):
return False
elif self.G1.number_of_edges(self.core_2[predecessor], G1_node) != self.G2.number_of_edges(predecessor, G2_node):
return False
# R_succ
# For each successor n' of n in the partial mapping, the corresponding
# node m' is a successor of m, and vice versa. Also, the number of
# edges must be equal.
for successor in self.G1[G1_node]:
if successor in self.core_1:
if not (self.core_1[successor] in self.G2[G2_node]):
return False
elif self.G1.number_of_edges(G1_node, successor) != self.G2.number_of_edges(G2_node, self.core_1[successor]):
return False
for successor in self.G2[G2_node]:
if successor in self.core_2:
if not (self.core_2[successor] in self.G1[G1_node]):
return False
elif self.G1.number_of_edges(G1_node, self.core_2[successor]) != self.G2.number_of_edges(G2_node, successor):
return False
### Look ahead 1
# R_termin
# The number of predecessors of n that are in T_1^{in} is equal to the
# number of predecessors of m that are in T_2^{in}.
num1 = 0
for predecessor in self.G1.pred[G1_node]:
if (predecessor in self.in_1) and (predecessor not in self.core_1):
num1 += 1
num2 = 0
for predecessor in self.G2.pred[G2_node]:
if (predecessor in self.in_2) and (predecessor not in self.core_2):
num2 += 1
if self.test == 'graph':
if not (num1 == num2):
return False
else: # self.test == 'subgraph'
if not (num1 >= num2):
return False
# The number of successors of n that are in T_1^{in} is equal to the
# number of successors of m that are in T_2^{in}.
num1 = 0
for successor in self.G1[G1_node]:
if (successor in self.in_1) and (successor not in self.core_1):
num1 += 1
num2 = 0
for successor in self.G2[G2_node]:
if (successor in self.in_2) and (successor not in self.core_2):
num2 += 1
if self.test == 'graph':
if not (num1 == num2):
return False
else: # self.test == 'subgraph'
if not (num1 >= num2):
return False
# R_termout
# The number of predecessors of n that are in T_1^{out} is equal to the
# number of predecessors of m that are in T_2^{out}.
num1 = 0
for predecessor in self.G1.pred[G1_node]:
if (predecessor in self.out_1) and (predecessor not in self.core_1):
num1 += 1
num2 = 0
for predecessor in self.G2.pred[G2_node]:
if (predecessor in self.out_2) and (predecessor not in self.core_2):
num2 += 1
if self.test == 'graph':
if not (num1 == num2):
return False
else: # self.test == 'subgraph'
if not (num1 >= num2):
return False
# The number of successors of n that are in T_1^{out} is equal to the
# number of successors of m that are in T_2^{out}.
num1 = 0
for successor in self.G1[G1_node]:
if (successor in self.out_1) and (successor not in self.core_1):
num1 += 1
num2 = 0
for successor in self.G2[G2_node]:
if (successor in self.out_2) and (successor not in self.core_2):
num2 += 1
if self.test == 'graph':
if not (num1 == num2):
return False
else: # self.test == 'subgraph'
if not (num1 >= num2):
return False
### Look ahead 2
# R_new
# The number of predecessors of n that are neither in the core_1 nor
# T_1^{in} nor T_1^{out} is equal to the number of predecessors of m
# that are neither in core_2 nor T_2^{in} nor T_2^{out}.
num1 = 0
for predecessor in self.G1.pred[G1_node]:
if (predecessor not in self.in_1) and (predecessor not in self.out_1):
num1 += 1
num2 = 0
for predecessor in self.G2.pred[G2_node]:
if (predecessor not in self.in_2) and (predecessor not in self.out_2):
num2 += 1
if self.test == 'graph':
if not (num1 == num2):
return False
else: # self.test == 'subgraph'
if not (num1 >= num2):
return False
# The number of successors of n that are neither in the core_1 nor
# T_1^{in} nor T_1^{out} is equal to the number of successors of m
# that are neither in core_2 nor T_2^{in} nor T_2^{out}.
num1 = 0
for successor in self.G1[G1_node]:
if (successor not in self.in_1) and (successor not in self.out_1):
num1 += 1
num2 = 0
for successor in self.G2[G2_node]:
if (successor not in self.in_2) and (successor not in self.out_2):
num2 += 1
if self.test == 'graph':
if not (num1 == num2):
return False
else: # self.test == 'subgraph'
if not (num1 >= num2):
return False
# Otherwise, this node pair is syntactically feasible!
return True
class GMState(object):
"""Internal representation of state for the GraphMatcher class.
This class is used internally by the GraphMatcher class. It is used
only to store state specific data. There will be at most G2.order() of
these objects in memory at a time, due to the depth-first search
strategy employed by the VF2 algorithm.
"""
def __init__(self, GM, G1_node=None, G2_node=None):
"""Initializes GMState object.
Pass in the GraphMatcher to which this GMState belongs and the
new node pair that will be added to the GraphMatcher's current
isomorphism mapping.
"""
self.GM = GM
# Initialize the last stored node pair.
self.G1_node = None
self.G2_node = None
self.depth = len(GM.core_1)
if G1_node is None or G2_node is None:
# Then we reset the class variables
GM.core_1 = {}
GM.core_2 = {}
GM.inout_1 = {}
GM.inout_2 = {}
# Watch out! G1_node == 0 should evaluate to True.
if G1_node is not None and G2_node is not None:
# Add the node pair to the isomorphism mapping.
GM.core_1[G1_node] = G2_node
GM.core_2[G2_node] = G1_node
# Store the node that was added last.
self.G1_node = G1_node
self.G2_node = G2_node
# Now we must update the other two vectors.
# We will add only if it is not in there already!
self.depth = len(GM.core_1)
# First we add the new nodes...
if G1_node not in GM.inout_1:
GM.inout_1[G1_node] = self.depth
if G2_node not in GM.inout_2:
GM.inout_2[G2_node] = self.depth
# Now we add every other node...
# Updates for T_1^{inout}
new_nodes = set([])
for node in GM.core_1:
new_nodes.update([neighbor for neighbor in GM.G1[node] if neighbor not in GM.core_1])
for node in new_nodes:
if node not in GM.inout_1:
GM.inout_1[node] = self.depth
# Updates for T_2^{inout}
new_nodes = set([])
for node in GM.core_2:
new_nodes.update([neighbor for neighbor in GM.G2[node] if neighbor not in GM.core_2])
for node in new_nodes:
if node not in GM.inout_2:
GM.inout_2[node] = self.depth
def restore(self):
"""Deletes the GMState object and restores the class variables."""
# First we remove the node that was added from the core vectors.
# Watch out! G1_node == 0 should evaluate to True.
if self.G1_node is not None and self.G2_node is not None:
del self.GM.core_1[self.G1_node]
del self.GM.core_2[self.G2_node]
# Now we revert the other two vectors.
# Thus, we delete all entries which have this depth level.
for vector in (self.GM.inout_1, self.GM.inout_2):
for node in list(vector.keys()):
if vector[node] == self.depth:
del vector[node]
class DiGMState(object):
"""Internal representation of state for the DiGraphMatcher class.
This class is used internally by the DiGraphMatcher class. It is used
only to store state specific data. There will be at most G2.order() of
these objects in memory at a time, due to the depth-first search
strategy employed by the VF2 algorithm.
"""
def __init__(self, GM, G1_node=None, G2_node=None):
"""Initializes DiGMState object.
Pass in the DiGraphMatcher to which this DiGMState belongs and the
new node pair that will be added to the GraphMatcher's current
isomorphism mapping.
"""
self.GM = GM
# Initialize the last stored node pair.
self.G1_node = None
self.G2_node = None
self.depth = len(GM.core_1)
if G1_node is None or G2_node is None:
# Then we reset the class variables
GM.core_1 = {}
GM.core_2 = {}
GM.in_1 = {}
GM.in_2 = {}
GM.out_1 = {}
GM.out_2 = {}
# Watch out! G1_node == 0 should evaluate to True.
if G1_node is not None and G2_node is not None:
# Add the node pair to the isomorphism mapping.
GM.core_1[G1_node] = G2_node
GM.core_2[G2_node] = G1_node
# Store the node that was added last.
self.G1_node = G1_node
self.G2_node = G2_node
# Now we must update the other four vectors.
# We will add only if it is not in there already!
self.depth = len(GM.core_1)
# First we add the new nodes...
for vector in (GM.in_1, GM.out_1):
if G1_node not in vector:
vector[G1_node] = self.depth
for vector in (GM.in_2, GM.out_2):
if G2_node not in vector:
vector[G2_node] = self.depth
# Now we add every other node...
# Updates for T_1^{in}
new_nodes = set([])
for node in GM.core_1:
new_nodes.update([predecessor for predecessor in GM.G1.predecessors(node) if predecessor not in GM.core_1])
for node in new_nodes:
if node not in GM.in_1:
GM.in_1[node] = self.depth
# Updates for T_2^{in}
new_nodes = set([])
for node in GM.core_2:
new_nodes.update([predecessor for predecessor in GM.G2.predecessors(node) if predecessor not in GM.core_2])
for node in new_nodes:
if node not in GM.in_2:
GM.in_2[node] = self.depth
# Updates for T_1^{out}
new_nodes = set([])
for node in GM.core_1:
new_nodes.update([successor for successor in GM.G1.successors(node) if successor not in GM.core_1])
for node in new_nodes:
if node not in GM.out_1:
GM.out_1[node] = self.depth
# Updates for T_2^{out}
new_nodes = set([])
for node in GM.core_2:
new_nodes.update([successor for successor in GM.G2.successors(node) if successor not in GM.core_2])
for node in new_nodes:
if node not in GM.out_2:
GM.out_2[node] = self.depth
def restore(self):
"""Deletes the DiGMState object and restores the class variables."""
# First we remove the node that was added from the core vectors.
# Watch out! G1_node == 0 should evaluate to True.
if self.G1_node is not None and self.G2_node is not None:
del self.GM.core_1[self.G1_node]
del self.GM.core_2[self.G2_node]
# Now we revert the other four vectors.
# Thus, we delete all entries which have this depth level.
for vector in (self.GM.in_1, self.GM.in_2, self.GM.out_1, self.GM.out_2):
for node in list(vector.keys()):
if vector[node] == self.depth:
del vector[node]
| gpl-2.0 | -4,460,066,784,446,512,600 | 37.128497 | 129 | 0.586101 | false |
westinedu/similarinterest | django/utils/http.py | 12 | 7434 | import calendar
import datetime
import re
import sys
import urllib
import urlparse
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import smart_str, force_unicode
from django.utils.functional import allow_lazy
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_unicode(urllib.quote(smart_str(url), smart_str(safe)))
urlquote = allow_lazy(urlquote, unicode)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_unicode(urllib.quote_plus(smart_str(url), smart_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, unicode)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_unicode(urllib.unquote(smart_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, unicode)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_unicode(urllib.unquote_plus(smart_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, unicode)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return urllib.urlencode(
[(smart_str(k),
isinstance(v, (list,tuple)) and [smart_str(i) for i in v] or smart_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an floating point number expressed in seconds since the epoch, in
UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
raise ValueError("%r is not a valid date" % date)
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int.
if value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if not 0 <= i <= sys.maxint:
raise ValueError("Base36 conversion input too large or incorrect type.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.decode('string_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necesary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
| bsd-3-clause | 2,240,052,499,610,463,000 | 33.576744 | 83 | 0.635862 | false |
dlacombejr/deepy | deepy/utils/initializers.py | 2 | 4016 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
def get_fans(shape):
fan_in = shape[0] if len(shape) == 2 else np.prod(shape[1:])
fan_out = shape[1] if len(shape) == 2 else shape[0]
return fan_in, fan_out
class WeightInitializer(object):
"""
Initializer for creating weights.
"""
def __init__(self, seed=None):
if not seed:
seed = 3
self.rand = np.random.RandomState(seed)
def sample(self, shape):
"""
Sample parameters with given shape.
"""
raise NotImplementedError
class UniformInitializer(WeightInitializer):
"""
Uniform weight sampler.
"""
def __init__(self, scale=None, svd=False, seed=None):
super(UniformInitializer, self).__init__(seed)
self.scale = scale
self.svd = svd
def sample(self, shape):
if not self.scale:
scale = np.sqrt(6. / sum(get_fans(shape)))
else:
scale = self.scale
weight = self.rand.uniform(-1, 1, size=shape) * scale
if self.svd:
norm = np.sqrt((weight**2).sum())
ws = scale * weight / norm
_, v, _ = np.linalg.svd(ws)
ws = scale * ws / v[0]
return weight
class GaussianInitializer(WeightInitializer):
"""
Gaussian weight sampler.
"""
def __init__(self, mean=0, deviation=0.01, seed=None):
super(GaussianInitializer, self).__init__(seed)
self.mean = mean
self.deviation = deviation
def sample(self, shape):
weight = self.rand.normal(self.mean, self.deviation, size=shape)
return weight
class IdentityInitializer(WeightInitializer):
"""
Initialize weight as identity matrices.
"""
def __init__(self, scale=1):
super(IdentityInitializer, self).__init__()
self.scale = 1
def sample(self, shape):
assert len(shape) == 2
return np.eye(*shape) * self.scale
class XavierGlorotInitializer(WeightInitializer):
"""
Xavier Glorot's weight initializer.
See http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
"""
def __init__(self, uniform=False, seed=None):
"""
Parameters:
uniform - uniform distribution, default Gaussian
seed - random seed
"""
super(XavierGlorotInitializer, self).__init__(seed)
self.uniform = uniform
def sample(self, shape):
scale = np.sqrt(2. / sum(get_fans(shape)))
if self.uniform:
return self.rand.uniform(-1, 1, size=shape) * scale
else:
return self.rand.randn(*shape) * scale
class KaimingHeInitializer(WeightInitializer):
"""
Kaiming He's initialization scheme, especially made for ReLU.
See http://arxiv.org/abs/1502.01852.
"""
def __init__(self, uniform=False, seed=None):
"""
Parameters:
uniform - uniform distribution, default Gaussian
seed - random seed
"""
super(KaimingHeInitializer, self).__init__(seed)
self.uniform = uniform
def sample(self, shape):
fan_in, fan_out = get_fans(shape)
scale = np.sqrt(2. / fan_in)
if self.uniform:
return self.rand.uniform(-1, 1, size=shape) * scale
else:
return self.rand.randn(*shape) * scale
class OrthogonalInitializer(WeightInitializer):
"""
Orthogonal weight initializer.
"""
def __init__(self, scale=1.1, seed=None):
"""
Parameters:
scale - scale
seed - random seed
"""
super(OrthogonalInitializer, self).__init__(seed)
self.scale = scale
def sample(self, shape):
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v
q = q.reshape(shape)
return self.scale * q[:shape[0], :shape[1]] | mit | 2,973,363,724,832,962,000 | 27.692857 | 72 | 0.573456 | false |
michitomo/ansible-modules-core | packaging/os/redhat_subscription.py | 29 | 16866 | #!/usr/bin/python
DOCUMENTATION = '''
---
module: redhat_subscription
short_description: Manage Red Hat Network registration and subscriptions using the C(subscription-manager) command
description:
- Manage registration and subscription to the Red Hat Network entitlement platform.
version_added: "1.2"
author: "James Laska (@jlaska)"
notes:
- In order to register a system, subscription-manager requires either a username and password, or an activationkey.
requirements:
- subscription-manager
options:
state:
description:
- whether to register and subscribe (C(present)), or unregister (C(absent)) a system
required: false
choices: [ "present", "absent" ]
default: "present"
username:
description:
- Red Hat Network username
required: False
default: null
password:
description:
- Red Hat Network password
required: False
default: null
server_hostname:
description:
- Specify an alternative Red Hat Network server
required: False
default: Current value from C(/etc/rhsm/rhsm.conf) is the default
server_insecure:
description:
- Allow traffic over insecure http
required: False
default: Current value from C(/etc/rhsm/rhsm.conf) is the default
rhsm_baseurl:
description:
- Specify CDN baseurl
required: False
default: Current value from C(/etc/rhsm/rhsm.conf) is the default
autosubscribe:
description:
- Upon successful registration, auto-consume available subscriptions
required: False
default: False
activationkey:
description:
- supply an activation key for use with registration
required: False
default: null
org_id:
description:
- Organisation ID to use in conjunction with activationkey
required: False
default: null
version_added: "2.0"
pool:
description:
- Specify a subscription pool name to consume. Regular expressions accepted.
required: False
default: '^$'
'''
EXAMPLES = '''
# Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
- redhat_subscription: state=present username=joe_user password=somepass autosubscribe=true
# Register with activationkey (1-222333444) and consume subscriptions matching
# the names (Red hat Enterprise Server) and (Red Hat Virtualization)
- redhat_subscription: state=present
activationkey=1-222333444
pool='^(Red Hat Enterprise Server|Red Hat Virtualization)$'
# Update the consumed subscriptions from the previous example (remove the Red
# Hat Virtualization subscription)
- redhat_subscription: state=present
activationkey=1-222333444
pool='^Red Hat Enterprise Server$'
'''
import os
import re
import types
import ConfigParser
import shlex
class RegistrationBase(object):
def __init__(self, module, username=None, password=None):
self.module = module
self.username = username
self.password = password
def configure(self):
raise NotImplementedError("Must be implemented by a sub-class")
def enable(self):
# Remove any existing redhat.repo
redhat_repo = '/etc/yum.repos.d/redhat.repo'
if os.path.isfile(redhat_repo):
os.unlink(redhat_repo)
def register(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unregister(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unsubscribe(self):
raise NotImplementedError("Must be implemented by a sub-class")
def update_plugin_conf(self, plugin, enabled=True):
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
if os.path.isfile(plugin_conf):
cfg = ConfigParser.ConfigParser()
cfg.read([plugin_conf])
if enabled:
cfg.set('main', 'enabled', 1)
else:
cfg.set('main', 'enabled', 0)
fd = open(plugin_conf, 'rwa+')
cfg.write(fd)
fd.close()
def subscribe(self, **kwargs):
raise NotImplementedError("Must be implemented by a sub-class")
class Rhsm(RegistrationBase):
def __init__(self, module, username=None, password=None):
RegistrationBase.__init__(self, module, username, password)
self.config = self._read_config()
self.module = module
def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
'''
Load RHSM configuration from /etc/rhsm/rhsm.conf.
Returns:
* ConfigParser object
'''
# Read RHSM defaults ...
cp = ConfigParser.ConfigParser()
cp.read(rhsm_conf)
# Add support for specifying a default value w/o having to standup some configuration
# Yeah, I know this should be subclassed ... but, oh well
def get_option_default(self, key, default=''):
sect, opt = key.split('.', 1)
if self.has_section(sect) and self.has_option(sect, opt):
return self.get(sect, opt)
else:
return default
cp.get_option = types.MethodType(get_option_default, cp, ConfigParser.ConfigParser)
return cp
def enable(self):
'''
Enable the system to receive updates from subscription-manager.
This involves updating affected yum plugins and removing any
conflicting yum repositories.
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', True)
def configure(self, **kwargs):
'''
Configure the system as directed for registration with RHN
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'config']
# Pass supplied **kwargs as parameters to subscription-manager. Ignore
# non-configuration parameters and replace '_' with '.'. For example,
# 'server_hostname' becomes '--system.hostname'.
for k,v in kwargs.items():
if re.search(r'^(system|rhsm)_', k):
args.append('--%s=%s' % (k.replace('_','.'), v))
self.module.run_command(args, check_rc=True)
@property
def is_registered(self):
'''
Determine whether the current system
Returns:
* Boolean - whether the current system is currently registered to
RHN.
'''
# Quick version...
if False:
return os.path.isfile('/etc/pki/consumer/cert.pem') and \
os.path.isfile('/etc/pki/consumer/key.pem')
args = ['subscription-manager', 'identity']
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
if rc == 0:
return True
else:
return False
def register(self, username, password, autosubscribe, activationkey, org_id):
'''
Register the current system to the provided RHN server
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'register']
# Generate command arguments
if activationkey:
args.extend(['--activationkey', activationkey])
if org_id:
args.extend(['--org', org_id])
else:
if autosubscribe:
args.append('--autosubscribe')
if username:
args.extend(['--username', username])
if password:
args.extend(['--password', password])
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unsubscribe(self, serials=None):
'''
Unsubscribe a system from subscribed channels
Args:
serials(list or None): list of serials to unsubscribe. If
serials is none or an empty list, then
all subscribed channels will be removed.
Raises:
* Exception - if error occurs while running command
'''
items = []
if serials is not None and serials:
items = ["--serial=%s" % s for s in serials]
if serials is None:
items = ["--all"]
if items:
args = ['subscription-manager', 'unsubscribe'] + items
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
return serials
def unregister(self):
'''
Unregister a currently registered system
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'unregister']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def subscribe(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
subscribed_pool_ids = []
for pool in available_pools.filter(regexp):
pool.subscribe()
subscribed_pool_ids.append(pool.get_pool_id())
return subscribed_pool_ids
def update_subscriptions(self, regexp):
changed=False
consumed_pools = RhsmPools(self.module, consumed=True)
pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter(regexp)]
serials_to_remove=[p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep]
serials = self.unsubscribe(serials=serials_to_remove)
subscribed_pool_ids = self.subscribe(regexp)
if subscribed_pool_ids or serials:
changed=True
return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids,
'unsubscribed_serials': serials}
class RhsmPool(object):
'''
Convenience class for housing subscription information
'''
def __init__(self, module, **kwargs):
self.module = module
for k,v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return str(self.__getattribute__('_name'))
def get_pool_id(self):
return getattr(self, 'PoolId', getattr(self, 'PoolID'))
def subscribe(self):
args = "subscription-manager subscribe --pool %s" % self.get_pool_id()
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
if rc == 0:
return True
else:
return False
class RhsmPools(object):
"""
This class is used for manipulating pools subscriptions with RHSM
"""
def __init__(self, module, consumed=False):
self.module = module
self.products = self._load_product_list(consumed)
def __iter__(self):
return self.products.__iter__()
def _load_product_list(self, consumed=False):
"""
Loads list of all available or consumed pools for system in data structure
Args:
consumed(bool): if True list consumed pools, else list available pools (default False)
"""
args = "subscription-manager list"
if consumed:
args += " --consumed"
else:
args += " --available"
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
products = []
for line in stdout.split('\n'):
# Remove leading+trailing whitespace
line = line.strip()
# An empty line implies the end of a output group
if len(line) == 0:
continue
# If a colon ':' is found, parse
elif ':' in line:
(key, value) = line.split(':',1)
key = key.strip().replace(" ", "") # To unify
value = value.strip()
if key in ['ProductName', 'SubscriptionName']:
# Remember the name for later processing
products.append(RhsmPool(self.module, _name=value, key=value))
elif products:
# Associate value with most recently recorded product
products[-1].__setattr__(key, value)
# FIXME - log some warning?
#else:
# warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
return products
def filter(self, regexp='^$'):
'''
Return a list of RhsmPools whose name matches the provided regular expression
'''
r = re.compile(regexp)
for product in self.products:
if r.search(product._name):
yield product
def main():
# Load RHSM configuration from file
rhn = Rhsm(None)
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent']),
username = dict(default=None, required=False),
password = dict(default=None, required=False),
server_hostname = dict(default=rhn.config.get_option('server.hostname'), required=False),
server_insecure = dict(default=rhn.config.get_option('server.insecure'), required=False),
rhsm_baseurl = dict(default=rhn.config.get_option('rhsm.baseurl'), required=False),
autosubscribe = dict(default=False, type='bool'),
activationkey = dict(default=None, required=False),
org_id = dict(default=None, required=False),
pool = dict(default='^$', required=False, type='str'),
)
)
rhn.module = module
state = module.params['state']
username = module.params['username']
password = module.params['password']
server_hostname = module.params['server_hostname']
server_insecure = module.params['server_insecure']
rhsm_baseurl = module.params['rhsm_baseurl']
autosubscribe = module.params['autosubscribe'] == True
activationkey = module.params['activationkey']
org_id = module.params['org_id']
pool = module.params['pool']
# Ensure system is registered
if state == 'present':
# Check for missing parameters ...
if not (activationkey or username or password):
module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, username, password))
if not activationkey and not (username and password):
module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password")
# Register system
if rhn.is_registered:
if pool != '^$':
try:
result = rhn.update_subscriptions(pool)
except Exception, e:
module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, e))
else:
module.exit_json(**result)
else:
module.exit_json(changed=False, msg="System already registered.")
else:
try:
rhn.enable()
rhn.configure(**module.params)
rhn.register(username, password, autosubscribe, activationkey, org_id)
subscribed_pool_ids = rhn.subscribe(pool)
except Exception, e:
module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, e))
else:
module.exit_json(changed=True,
msg="System successfully registered to '%s'." % server_hostname,
subscribed_pool_ids=subscribed_pool_ids)
# Ensure system is *not* registered
if state == 'absent':
if not rhn.is_registered:
module.exit_json(changed=False, msg="System already unregistered.")
else:
try:
rhn.unsubscribe()
rhn.unregister()
except Exception, e:
module.fail_json(msg="Failed to unregister: %s" % e)
else:
module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | -6,449,662,796,856,098,000 | 35.506494 | 161 | 0.583304 | false |
brianlsharp/MissionPlanner | Lib/site-packages/numpy/linalg/tests/test_build.py | 81 | 1631 | from subprocess import call, PIPE, Popen
import sys
import re
import numpy as np
from numpy.linalg import lapack_lite
from numpy.testing import TestCase, dec
from numpy.compat import asbytes_nested
class FindDependenciesLdd:
def __init__(self):
self.cmd = ['ldd']
try:
st = call(self.cmd, stdout=PIPE, stderr=PIPE)
except OSError:
raise RuntimeError("command %s cannot be run" % self.cmd)
def get_dependencies(self, file):
p = Popen(self.cmd + [file], stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if not (p.returncode == 0):
raise RuntimeError("Failed to check dependencies for %s" % libfile)
return stdout
def grep_dependencies(self, file, deps):
stdout = self.get_dependencies(file)
rdeps = dict([(dep, re.compile(dep)) for dep in deps])
founds = []
for l in stdout.splitlines():
for k, v in rdeps.items():
if v.search(l):
founds.append(k)
return founds
class TestF77Mismatch(TestCase):
@dec.skipif(not(sys.platform[:5] == 'linux'),
"Skipping fortran compiler mismatch on non Linux platform")
def test_lapack(self):
f = FindDependenciesLdd()
deps = f.grep_dependencies(lapack_lite.__file__,
asbytes_nested(['libg2c', 'libgfortran']))
self.assertFalse(len(deps) > 1,
"""Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to
cause random crashes and wrong results. See numpy INSTALL.txt for more
information.""")
| gpl-3.0 | -1,720,254,437,328,524,000 | 31.62 | 79 | 0.61496 | false |
sittingbull/artefact.connectors | artefact/connectors/adwords/api/report_field.py | 2 | 1472 | class ReportField(object):
REPORT_FIELD_TYPES_KNOWN = ['double', 'string', 'date', 'datetime', 'boolean']
REPORT_FIELD_TYPES_FALLBACK = 'string'
REPORT_FIELD_TYPE_MAP = {
'money': 'double',
'integer': 'int',
'bid': 'double',
'long': 'int'
}
def __init__(self, definition):
self._name = unicode(definition.fieldName)
self._type = unicode(self.__map_field_types(definition.fieldType))
self._display_name = unicode(definition.displayFieldName)
def __eq__(self, other):
return isinstance(other, ReportField) and self.__dict__ == other.__dict__
def __hash__(self):
return hash("".join([self.__dict__[key] for key in sorted(self.__dict__.keys())]))
def __repr__(self):
return "ReportField(name={name}, type={type}, display_name={display_name})".format(name=self.name, type=self.type, display_name=self.display_name)
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def display_name(self):
return self._display_name
def __map_field_types(self, field_type):
downcased = field_type.lower()
if downcased in self.REPORT_FIELD_TYPES_KNOWN:
return downcased
elif downcased in self.REPORT_FIELD_TYPE_MAP:
return self.REPORT_FIELD_TYPE_MAP[downcased]
else:
return self.REPORT_FIELD_TYPES_FALLBACK
| apache-2.0 | 382,767,283,652,394,240 | 31.711111 | 154 | 0.606658 | false |
2014c2g4/2015cda0623 | static/Brython3.1.0-20150301-090019/Lib/multiprocessing/__init__.py | 693 | 6866 | #
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in a webbrowser.
#
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__version__ = '0.70a1'
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'SimpleQueue', 'JoinableQueue', 'Pool',
'Value', 'Array', 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING',
]
__author__ = 'R. Oudkerk ([email protected])'
#
# Imports
#
import os
import sys
from multiprocessing.process import Process, current_process, active_children
from multiprocessing.util import SUBDEBUG, SUBWARNING
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
import _multiprocessing
#
# Definitions not depending on native semaphores
#
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
#brython fix me
#def Pipe(duplex=True):
# '''
# Returns two connection object connected by a pipe
# '''
# from multiprocessing.connection import Pipe
# return Pipe(duplex)
def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif 'bsd' in sys.platform or sys.platform == 'darwin':
comm = '/sbin/sysctl -n hw.ncpu'
if sys.platform == 'darwin':
comm = '/usr' + comm
try:
with os.popen(comm) as p:
num = int(p.read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus')
def freeze_support():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from multiprocessing.forking import freeze_support
freeze_support()
def get_logger():
'''
Return package logger -- if it does not already exist then it is created
'''
from multiprocessing.util import get_logger
return get_logger()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
from multiprocessing.util import log_to_stderr
return log_to_stderr(level)
#brython fix me
#def allow_connection_pickling():
# '''
# Install support for sending connections and sockets between processes
# '''
# # This is undocumented. In previous versions of multiprocessing
# # its only effect was to make socket objects inheritable on Windows.
# import multiprocessing.connection
#
# Definitions depending on native semaphores
#
def Lock():
'''
Returns a non-recursive lock object
'''
from multiprocessing.synchronize import Lock
return Lock()
def RLock():
'''
Returns a recursive lock object
'''
from multiprocessing.synchronize import RLock
return RLock()
def Condition(lock=None):
'''
Returns a condition object
'''
from multiprocessing.synchronize import Condition
return Condition(lock)
def Semaphore(value=1):
'''
Returns a semaphore object
'''
from multiprocessing.synchronize import Semaphore
return Semaphore(value)
def BoundedSemaphore(value=1):
'''
Returns a bounded semaphore object
'''
from multiprocessing.synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Event():
'''
Returns an event object
'''
from multiprocessing.synchronize import Event
return Event()
def Barrier(parties, action=None, timeout=None):
'''
Returns a barrier object
'''
from multiprocessing.synchronize import Barrier
return Barrier(parties, action, timeout)
def Queue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import Queue
return Queue(maxsize)
def JoinableQueue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import JoinableQueue
return JoinableQueue(maxsize)
def SimpleQueue():
'''
Returns a queue object
'''
from multiprocessing.queues import SimpleQueue
return SimpleQueue()
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
'''
Returns a process pool object
'''
from multiprocessing.pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild)
def RawValue(typecode_or_type, *args):
'''
Returns a shared object
'''
from multiprocessing.sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a shared array
'''
from multiprocessing.sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(typecode_or_type, *args, lock=True):
'''
Returns a synchronized shared object
'''
from multiprocessing.sharedctypes import Value
return Value(typecode_or_type, *args, lock=lock)
def Array(typecode_or_type, size_or_initializer, *, lock=True):
'''
Returns a synchronized shared array
'''
from multiprocessing.sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, lock=lock)
#
#
#
if sys.platform == 'win32':
def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
Useful for people embedding Python.
'''
from multiprocessing.forking import set_executable
set_executable(executable)
__all__ += ['set_executable']
| gpl-3.0 | -3,824,946,195,541,241,300 | 24.524164 | 79 | 0.668075 | false |
ABaldwinHunter/django-clone | tests/forms_tests/tests/test_forms.py | 17 | 154413 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
import datetime
import json
import uuid
from django.core.exceptions import NON_FIELD_ERRORS
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import RegexValidator
from django.forms import (
BooleanField, CharField, CheckboxSelectMultiple, ChoiceField, DateField,
DateTimeField, EmailField, FileField, FloatField, Form, HiddenInput,
ImageField, IntegerField, MultipleChoiceField, MultipleHiddenInput,
MultiValueField, NullBooleanField, PasswordInput, RadioSelect, Select,
SplitDateTimeField, SplitHiddenDateTimeWidget, Textarea, TextInput,
TimeField, ValidationError, forms,
)
from django.forms.utils import ErrorList
from django.http import QueryDict
from django.template import Context, Template
from django.test import SimpleTestCase
from django.test.utils import str_prefix
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import format_html
from django.utils.safestring import SafeData, mark_safe
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class PersonNew(Form):
first_name = CharField(widget=TextInput(attrs={'id': 'first_name_id'}))
last_name = CharField()
birthday = DateField()
class FormsTestCase(SimpleTestCase):
# A Form is a collection of Fields. It knows how to validate a set of data and it
# knows how to render itself in a couple of default ways (e.g., an HTML table).
# You can pass it data in __init__(), as a dictionary.
def test_form(self):
# Pass a dictionary to a Form's __init__().
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'})
self.assertTrue(p.is_bound)
self.assertEqual(p.errors, {})
self.assertTrue(p.is_valid())
self.assertHTMLEqual(p.errors.as_ul(), '')
self.assertEqual(p.errors.as_text(), '')
self.assertEqual(p.cleaned_data["first_name"], 'John')
self.assertEqual(p.cleaned_data["last_name"], 'Lennon')
self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9))
self.assertHTMLEqual(
str(p['first_name']),
'<input type="text" name="first_name" value="John" id="id_first_name" />'
)
self.assertHTMLEqual(
str(p['last_name']),
'<input type="text" name="last_name" value="Lennon" id="id_last_name" />'
)
self.assertHTMLEqual(
str(p['birthday']),
'<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />'
)
nonexistenterror = "Key u?'nonexistentfield' not found in 'Person'"
with six.assertRaisesRegex(self, KeyError, nonexistenterror):
p['nonexistentfield']
self.fail('Attempts to access non-existent fields should fail.')
form_output = []
for boundfield in p:
form_output.append(str(boundfield))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<input type="text" name="first_name" value="John" id="id_first_name" />
<input type="text" name="last_name" value="Lennon" id="id_last_name" />
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />"""
)
form_output = []
for boundfield in p:
form_output.append([boundfield.label, boundfield.data])
self.assertEqual(form_output, [
['First name', 'John'],
['Last name', 'Lennon'],
['Birthday', '1940-10-9']
])
self.assertHTMLEqual(
str(p),
"""<tr><th><label for="id_first_name">First name:</label></th><td>
<input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td>
<input type="text" name="last_name" value="Lennon" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td>
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>"""
)
def test_empty_dict(self):
# Empty dictionaries are valid, too.
p = Person({})
self.assertTrue(p.is_bound)
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertFalse(p.is_valid())
self.assertEqual(p.cleaned_data, {})
self.assertHTMLEqual(
str(p),
"""<tr><th><label for="id_first_name">First name:</label></th><td>
<ul class="errorlist"><li>This field is required.</li></ul>
<input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th>
<td><ul class="errorlist"><li>This field is required.</li></ul>
<input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td>
<ul class="errorlist"><li>This field is required.</li></ul>
<input type="text" name="birthday" id="id_birthday" /></td></tr>"""
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><th><label for="id_first_name">First name:</label></th><td>
<ul class="errorlist"><li>This field is required.</li></ul>
<input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th>
<td><ul class="errorlist"><li>This field is required.</li></ul>
<input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th>
<td><ul class="errorlist"><li>This field is required.</li></ul>
<input type="text" name="birthday" id="id_birthday" /></td></tr>"""
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist"><li>This field is required.</li></ul>
<label for="id_first_name">First name:</label>
<input type="text" name="first_name" id="id_first_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
<label for="id_last_name">Last name:</label>
<input type="text" name="last_name" id="id_last_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
<label for="id_birthday">Birthday:</label>
<input type="text" name="birthday" id="id_birthday" /></li>"""
)
self.assertHTMLEqual(
p.as_p(),
"""<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_first_name">First name:</label>
<input type="text" name="first_name" id="id_first_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_last_name">Last name:</label>
<input type="text" name="last_name" id="id_last_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_birthday">Birthday:</label>
<input type="text" name="birthday" id="id_birthday" /></p>"""
)
def test_unbound_form(self):
# If you don't pass any values to the Form's __init__(), or if you pass None,
# the Form will be considered unbound and won't do any validation. Form.errors
# will be an empty dictionary *but* Form.is_valid() will return False.
p = Person()
self.assertFalse(p.is_bound)
self.assertEqual(p.errors, {})
self.assertFalse(p.is_valid())
try:
p.cleaned_data
self.fail('Attempts to access cleaned_data when validation fails should fail.')
except AttributeError:
pass
self.assertHTMLEqual(
str(p),
"""<tr><th><label for="id_first_name">First name:</label></th><td>
<input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td>
<input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td>
<input type="text" name="birthday" id="id_birthday" /></td></tr>"""
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><th><label for="id_first_name">First name:</label></th><td>
<input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td>
<input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td>
<input type="text" name="birthday" id="id_birthday" /></td></tr>"""
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="id_first_name">First name:</label>
<input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label>
<input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label>
<input type="text" name="birthday" id="id_birthday" /></li>"""
)
self.assertHTMLEqual(
p.as_p(),
"""<p><label for="id_first_name">First name:</label>
<input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label>
<input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label>
<input type="text" name="birthday" id="id_birthday" /></p>"""
)
def test_unicode_values(self):
# Unicode values are handled properly.
p = Person({
'first_name': 'John',
'last_name': '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111',
'birthday': '1940-10-9'
})
self.assertHTMLEqual(
p.as_table(),
'<tr><th><label for="id_first_name">First name:</label></th><td>'
'<input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>\n'
'<tr><th><label for="id_last_name">Last name:</label>'
'</th><td><input type="text" name="last_name" '
'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111"'
'id="id_last_name" /></td></tr>\n'
'<tr><th><label for="id_birthday">Birthday:</label></th><td>'
'<input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>'
)
self.assertHTMLEqual(
p.as_ul(),
'<li><label for="id_first_name">First name:</label> '
'<input type="text" name="first_name" value="John" id="id_first_name" /></li>\n'
'<li><label for="id_last_name">Last name:</label> '
'<input type="text" name="last_name" '
'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></li>\n'
'<li><label for="id_birthday">Birthday:</label> '
'<input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></li>'
)
self.assertHTMLEqual(
p.as_p(),
'<p><label for="id_first_name">First name:</label> '
'<input type="text" name="first_name" value="John" id="id_first_name" /></p>\n'
'<p><label for="id_last_name">Last name:</label> '
'<input type="text" name="last_name" '
'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></p>\n'
'<p><label for="id_birthday">Birthday:</label> '
'<input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></p>'
)
p = Person({'last_name': 'Lennon'})
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertFalse(p.is_valid())
self.assertDictEqual(
p.errors,
{'birthday': ['This field is required.'], 'first_name': ['This field is required.']}
)
self.assertEqual(p.cleaned_data, {'last_name': 'Lennon'})
self.assertEqual(p['first_name'].errors, ['This field is required.'])
self.assertHTMLEqual(
p['first_name'].errors.as_ul(),
'<ul class="errorlist"><li>This field is required.</li></ul>'
)
self.assertEqual(p['first_name'].errors.as_text(), '* This field is required.')
p = Person()
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="first_name" id="id_first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="last_name" id="id_last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="birthday" id="id_birthday" />')
def test_cleaned_data_only_fields(self):
# cleaned_data will always *only* contain a key for fields defined in the
# Form, even if you pass extra data when you define the Form. In this
# example, we pass a bunch of extra fields to the form constructor,
# but cleaned_data contains only the form's fields.
data = {
'first_name': 'John',
'last_name': 'Lennon',
'birthday': '1940-10-9',
'extra1': 'hello',
'extra2': 'hello',
}
p = Person(data)
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
def test_optional_data(self):
# cleaned_data will include a key and value for *all* fields defined in the Form,
# even if the Form's data didn't include a value for fields that are not
# required. In this example, the data dictionary doesn't include a value for the
# "nick_name" field, but cleaned_data includes it. For CharFields, it's set to the
# empty string.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
nick_name = CharField(required=False)
data = {'first_name': 'John', 'last_name': 'Lennon'}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['nick_name'], '')
self.assertEqual(f.cleaned_data['first_name'], 'John')
self.assertEqual(f.cleaned_data['last_name'], 'Lennon')
# For DateFields, it's set to None.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
birth_date = DateField(required=False)
data = {'first_name': 'John', 'last_name': 'Lennon'}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['birth_date'], None)
self.assertEqual(f.cleaned_data['first_name'], 'John')
self.assertEqual(f.cleaned_data['last_name'], 'Lennon')
def test_auto_id(self):
# "auto_id" tells the Form to add an "id" attribute to each form element.
# If it's a string that contains '%s', Django will use that as a format string
# into which the field's name will be inserted. It will also put a <label> around
# the human-readable labels for a field.
p = Person(auto_id='%s_id')
self.assertHTMLEqual(
p.as_table(),
"""<tr><th><label for="first_name_id">First name:</label></th><td>
<input type="text" name="first_name" id="first_name_id" /></td></tr>
<tr><th><label for="last_name_id">Last name:</label></th><td>
<input type="text" name="last_name" id="last_name_id" /></td></tr>
<tr><th><label for="birthday_id">Birthday:</label></th><td>
<input type="text" name="birthday" id="birthday_id" /></td></tr>"""
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="first_name_id">First name:</label>
<input type="text" name="first_name" id="first_name_id" /></li>
<li><label for="last_name_id">Last name:</label>
<input type="text" name="last_name" id="last_name_id" /></li>
<li><label for="birthday_id">Birthday:</label>
<input type="text" name="birthday" id="birthday_id" /></li>"""
)
self.assertHTMLEqual(
p.as_p(),
"""<p><label for="first_name_id">First name:</label>
<input type="text" name="first_name" id="first_name_id" /></p>
<p><label for="last_name_id">Last name:</label>
<input type="text" name="last_name" id="last_name_id" /></p>
<p><label for="birthday_id">Birthday:</label>
<input type="text" name="birthday" id="birthday_id" /></p>"""
)
def test_auto_id_true(self):
# If auto_id is any True value whose str() does not contain '%s', the "id"
# attribute will be the name of the field.
p = Person(auto_id=True)
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="first_name">First name:</label>
<input type="text" name="first_name" id="first_name" /></li>
<li><label for="last_name">Last name:</label>
<input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label>
<input type="text" name="birthday" id="birthday" /></li>"""
)
def test_auto_id_false(self):
# If auto_id is any False value, an "id" attribute won't be output unless it
# was manually entered.
p = Person(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>"""
)
def test_id_on_field(self):
# In this example, auto_id is False, but the "id" attribute for the "first_name"
# field is given. Also note that field gets a <label>, while the others don't.
p = PersonNew(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="first_name_id">First name:</label>
<input type="text" id="first_name_id" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>"""
)
def test_auto_id_on_form_and_field(self):
# If the "id" attribute is specified in the Form and auto_id is True, the "id"
# attribute in the Form gets precedence.
p = PersonNew(auto_id=True)
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="first_name_id">First name:</label>
<input type="text" id="first_name_id" name="first_name" /></li>
<li><label for="last_name">Last name:</label>
<input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label>
<input type="text" name="birthday" id="birthday" /></li>"""
)
def test_various_boolean_values(self):
class SignupForm(Form):
email = EmailField()
get_spam = BooleanField()
f = SignupForm(auto_id=False)
self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" />')
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
f = SignupForm({'email': '[email protected]', 'get_spam': True}, auto_id=False)
self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" value="[email protected]" />')
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
# 'True' or 'true' should be rendered without a value attribute
f = SignupForm({'email': '[email protected]', 'get_spam': 'True'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
f = SignupForm({'email': '[email protected]', 'get_spam': 'true'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
# A value of 'False' or 'false' should be rendered unchecked
f = SignupForm({'email': '[email protected]', 'get_spam': 'False'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
f = SignupForm({'email': '[email protected]', 'get_spam': 'false'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
# A value of '0' should be interpreted as a True value (#16820)
f = SignupForm({'email': '[email protected]', 'get_spam': '0'})
self.assertTrue(f.is_valid())
self.assertTrue(f.cleaned_data.get('get_spam'))
def test_widget_output(self):
# Any Field can have a Widget class passed to its constructor:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea)
f = ContactForm(auto_id=False)
self.assertHTMLEqual(str(f['subject']), '<input type="text" name="subject" />')
self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="10" cols="40"></textarea>')
# as_textarea(), as_text() and as_hidden() are shortcuts for changing the output
# widget type:
self.assertHTMLEqual(f['subject'].as_textarea(), '<textarea name="subject" rows="10" cols="40"></textarea>')
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" />')
self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" />')
# The 'widget' parameter to a Field can also be an instance:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea(attrs={'rows': 80, 'cols': 20}))
f = ContactForm(auto_id=False)
self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="80" cols="20"></textarea>')
# Instance-level attrs are *not* carried over to as_textarea(), as_text() and
# as_hidden():
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" />')
f = ContactForm({'subject': 'Hello', 'message': 'I love you.'}, auto_id=False)
self.assertHTMLEqual(
f['subject'].as_textarea(),
'<textarea rows="10" cols="40" name="subject">Hello</textarea>'
)
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" value="I love you." />')
self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" value="I love you." />')
def test_forms_with_choices(self):
# For a form with a <select>, use ChoiceField:
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')])
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# A subtlety: If one of the choices' value is the empty string and the form is
# unbound, then the <option> for the empty-string choice will get selected="selected".
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('', '------'), ('P', 'Python'), ('J', 'Java')])
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="" selected="selected">------</option>
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
# You can specify widget attributes in the Widget constructor.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(attrs={'class': 'foo'}))
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# When passing a custom widget instance to ChoiceField, note that setting
# 'choices' on the widget is meaningless. The widget will use the choices
# defined on the Field, not the ones defined on the Widget.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(
choices=[('P', 'Python'), ('J', 'Java')],
widget=Select(choices=[('R', 'Ruby'), ('P', 'Perl')], attrs={'class': 'foo'}),
)
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# You can set a ChoiceField's choices after the fact.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField()
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
</select>""")
f.fields['language'].choices = [('P', 'Python'), ('J', 'Java')]
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
def test_forms_with_radio(self):
# Add widget=RadioSelect to use that widget with a ChoiceField.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=RadioSelect)
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul>""")
self.assertHTMLEqual(f.as_table(), """<tr><th>Name:</th><td><input type="text" name="name" /></td></tr>
<tr><th>Language:</th><td><ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li>Name: <input type="text" name="name" /></li>
<li>Language: <ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></li>""")
# Regarding auto_id and <label>, RadioSelect is a special case. Each radio button
# gets a distinct ID, formed by appending an underscore plus the button's
# zero-based index.
f = FrameworkForm(auto_id='id_%s')
self.assertHTMLEqual(
str(f['language']),
"""<ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul>"""
)
# When RadioSelect is used with auto_id, and the whole form is printed using
# either as_table() or as_ul(), the label for the RadioSelect will point to the
# ID of the *first* radio button.
self.assertHTMLEqual(
f.as_table(),
"""<tr><th><label for="id_name">Name:</label></th><td><input type="text" name="name" id="id_name" /></td></tr>
<tr><th><label for="id_language_0">Language:</label></th><td><ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></td></tr>"""
)
self.assertHTMLEqual(
f.as_ul(),
"""<li><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li>
<li><label for="id_language_0">Language:</label> <ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></li>"""
)
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p>
<p><label for="id_language_0">Language:</label> <ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></p>"""
)
# Test iterating on individual radios in a template
t = Template('{% for radio in form.language %}<div class="myradio">{{ radio }}</div>{% endfor %}')
self.assertHTMLEqual(
t.render(Context({'form': f})),
"""<div class="myradio"><label for="id_language_0">
<input id="id_language_0" name="language" type="radio" value="P" /> Python</label></div>
<div class="myradio"><label for="id_language_1">
<input id="id_language_1" name="language" type="radio" value="J" /> Java</label></div>"""
)
def test_form_with_iterable_boundfield(self):
class BeatleForm(Form):
name = ChoiceField(
choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')],
widget=RadioSelect,
)
f = BeatleForm(auto_id=False)
self.assertHTMLEqual(
'\n'.join(str(bf) for bf in f['name']),
"""<label><input type="radio" name="name" value="john" /> John</label>
<label><input type="radio" name="name" value="paul" /> Paul</label>
<label><input type="radio" name="name" value="george" /> George</label>
<label><input type="radio" name="name" value="ringo" /> Ringo</label>"""
)
self.assertHTMLEqual(
'\n'.join('<div>%s</div>' % bf for bf in f['name']),
"""<div><label><input type="radio" name="name" value="john" /> John</label></div>
<div><label><input type="radio" name="name" value="paul" /> Paul</label></div>
<div><label><input type="radio" name="name" value="george" /> George</label></div>
<div><label><input type="radio" name="name" value="ringo" /> Ringo</label></div>"""
)
def test_form_with_noniterable_boundfield(self):
# You can iterate over any BoundField, not just those with widget=RadioSelect.
class BeatleForm(Form):
name = CharField()
f = BeatleForm(auto_id=False)
self.assertHTMLEqual('\n'.join(str(bf) for bf in f['name']), '<input type="text" name="name" />')
def test_forms_with_multiple_choice(self):
# MultipleChoiceField is a special case, as its data is required to be a list:
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField()
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
</select>""")
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')])
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P">Paul McCartney</option>
</select>""")
f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
self.assertHTMLEqual(str(f['name']), '<input type="text" name="name" value="Yesterday" />')
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P" selected="selected">Paul McCartney</option>
</select>""")
def test_form_with_disabled_fields(self):
class PersonForm(Form):
name = CharField()
birthday = DateField(disabled=True)
class PersonFormFieldInitial(Form):
name = CharField()
birthday = DateField(disabled=True, initial=datetime.date(1974, 8, 16))
# Disabled fields are generally not transmitted by user agents.
# The value from the form's initial data is used.
f1 = PersonForm({'name': 'John Doe'}, initial={'birthday': datetime.date(1974, 8, 16)})
f2 = PersonFormFieldInitial({'name': 'John Doe'})
for form in (f1, f2):
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data,
{'birthday': datetime.date(1974, 8, 16), 'name': 'John Doe'}
)
# Values provided in the form's data are ignored.
data = {'name': 'John Doe', 'birthday': '1984-11-10'}
f1 = PersonForm(data, initial={'birthday': datetime.date(1974, 8, 16)})
f2 = PersonFormFieldInitial(data)
for form in (f1, f2):
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data,
{'birthday': datetime.date(1974, 8, 16), 'name': 'John Doe'}
)
def test_hidden_data(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')])
# MultipleChoiceField rendered as_hidden() is a special case. Because it can
# have multiple values, its as_hidden() renders multiple <input type="hidden">
# tags.
f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
self.assertHTMLEqual(f['composers'].as_hidden(), '<input type="hidden" name="composers" value="P" />')
f = SongForm({'name': 'From Me To You', 'composers': ['P', 'J']}, auto_id=False)
self.assertHTMLEqual(f['composers'].as_hidden(), """<input type="hidden" name="composers" value="P" />
<input type="hidden" name="composers" value="J" />""")
# DateTimeField rendered as_hidden() is special too
class MessageForm(Form):
when = SplitDateTimeField()
f = MessageForm({'when_0': '1992-01-01', 'when_1': '01:01'})
self.assertTrue(f.is_valid())
self.assertHTMLEqual(
str(f['when']),
'<input type="text" name="when_0" value="1992-01-01" id="id_when_0" />'
'<input type="text" name="when_1" value="01:01" id="id_when_1" />'
)
self.assertHTMLEqual(
f['when'].as_hidden(),
'<input type="hidden" name="when_0" value="1992-01-01" id="id_when_0" />'
'<input type="hidden" name="when_1" value="01:01" id="id_when_1" />'
)
def test_multiple_choice_checkbox(self):
# MultipleChoiceField can also be used with the CheckboxSelectMultiple widget.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(
choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')],
widget=CheckboxSelectMultiple,
)
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
f = SongForm({'composers': ['J']}, auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
f = SongForm({'composers': ['J', 'P']}, auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input checked="checked" type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
# Test iterating on individual checkboxes in a template
t = Template('{% for checkbox in form.composers %}<div class="mycheckbox">{{ checkbox }}</div>{% endfor %}')
self.assertHTMLEqual(t.render(Context({'form': f})), """<div class="mycheckbox"><label>
<input checked="checked" name="composers" type="checkbox" value="J" /> John Lennon</label></div>
<div class="mycheckbox"><label>
<input checked="checked" name="composers" type="checkbox" value="P" /> Paul McCartney</label></div>""")
def test_checkbox_auto_id(self):
# Regarding auto_id, CheckboxSelectMultiple is a special case. Each checkbox
# gets a distinct ID, formed by appending an underscore plus the checkbox's
# zero-based index.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(
choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')],
widget=CheckboxSelectMultiple,
)
f = SongForm(auto_id='%s_id')
self.assertHTMLEqual(
str(f['composers']),
"""<ul id="composers_id">
<li><label for="composers_id_0">
<input type="checkbox" name="composers" value="J" id="composers_id_0" /> John Lennon</label></li>
<li><label for="composers_id_1">
<input type="checkbox" name="composers" value="P" id="composers_id_1" /> Paul McCartney</label></li>
</ul>"""
)
def test_multiple_choice_list_data(self):
# Data for a MultipleChoiceField should be a list. QueryDict and
# MultiValueDict conveniently work with this.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(
choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')],
widget=CheckboxSelectMultiple,
)
data = {'name': 'Yesterday', 'composers': ['J', 'P']}
f = SongForm(data)
self.assertEqual(f.errors, {})
data = QueryDict('name=Yesterday&composers=J&composers=P')
f = SongForm(data)
self.assertEqual(f.errors, {})
data = MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P']))
f = SongForm(data)
self.assertEqual(f.errors, {})
def test_multiple_hidden(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(
choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')],
widget=CheckboxSelectMultiple,
)
# The MultipleHiddenInput widget renders multiple values as hidden fields.
class SongFormHidden(Form):
name = CharField()
composers = MultipleChoiceField(
choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')],
widget=MultipleHiddenInput,
)
f = SongFormHidden(MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P'])), auto_id=False)
self.assertHTMLEqual(
f.as_ul(),
"""<li>Name: <input type="text" name="name" value="Yesterday" />
<input type="hidden" name="composers" value="J" />
<input type="hidden" name="composers" value="P" /></li>"""
)
# When using CheckboxSelectMultiple, the framework expects a list of input and
# returns a list of input.
f = SongForm({'name': 'Yesterday'}, auto_id=False)
self.assertEqual(f.errors['composers'], ['This field is required.'])
f = SongForm({'name': 'Yesterday', 'composers': ['J']}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['composers'], ['J'])
self.assertEqual(f.cleaned_data['name'], 'Yesterday')
f = SongForm({'name': 'Yesterday', 'composers': ['J', 'P']}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['composers'], ['J', 'P'])
self.assertEqual(f.cleaned_data['name'], 'Yesterday')
def test_escaping(self):
# Validation errors are HTML-escaped when output as HTML.
class EscapingForm(Form):
special_name = CharField(label="<em>Special</em> Field")
special_safe_name = CharField(label=mark_safe("<em>Special</em> Field"))
def clean_special_name(self):
raise ValidationError("Something's wrong with '%s'" % self.cleaned_data['special_name'])
def clean_special_safe_name(self):
raise ValidationError(
mark_safe("'<b>%s</b>' is a safe string" % self.cleaned_data['special_safe_name'])
)
f = EscapingForm({
'special_name':
"Nothing to escape",
'special_safe_name': "Nothing to escape",
}, auto_id=False)
self.assertHTMLEqual(
f.as_table(),
"""<tr><th><em>Special</em> Field:</th><td>
<ul class="errorlist"><li>Something's wrong with 'Nothing to escape'</li></ul>
<input type="text" name="special_name" value="Nothing to escape" /></td></tr>
<tr><th><em>Special</em> Field:</th><td>
<ul class="errorlist"><li>'<b>Nothing to escape</b>' is a safe string</li></ul>
<input type="text" name="special_safe_name" value="Nothing to escape" /></td></tr>"""
)
f = EscapingForm({
'special_name': "Should escape < & > and <script>alert('xss')</script>",
'special_safe_name': "<i>Do not escape</i>"
}, auto_id=False)
self.assertHTMLEqual(
f.as_table(),
"""<tr><th><em>Special</em> Field:</th><td>
<ul class="errorlist"><li>Something's wrong with 'Should escape < & > and
<script>alert('xss')</script>'</li></ul>
<input type="text" name="special_name"
value="Should escape < & > and <script>alert('xss')</script>" /></td></tr>
<tr><th><em>Special</em> Field:</th><td>
<ul class="errorlist"><li>'<b><i>Do not escape</i></b>' is a safe string</li></ul>
<input type="text" name="special_safe_name" value="<i>Do not escape</i>" /></td></tr>"""
)
def test_validating_multiple_fields(self):
# There are a couple of ways to do multiple-field validation. If you want the
# validation message to be associated with a particular field, implement the
# clean_XXX() method on the Form, where XXX is the field name. As in
# Field.clean(), the clean_XXX() method should return the cleaned value. In the
# clean_XXX() method, you have access to self.cleaned_data, which is a dictionary
# of all the data that has been cleaned *so far*, in order by the fields,
# including the current field (e.g., the field XXX if you're in clean_XXX()).
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean_password2(self):
if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2')
and self.cleaned_data['password1'] != self.cleaned_data['password2']):
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data['password2']
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertEqual(f.errors['username'], ['This field is required.'])
self.assertEqual(f.errors['password1'], ['This field is required.'])
self.assertEqual(f.errors['password2'], ['This field is required.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
self.assertEqual(f.errors['password2'], ['Please make sure your passwords match.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['username'], 'adrian')
self.assertEqual(f.cleaned_data['password1'], 'foo')
self.assertEqual(f.cleaned_data['password2'], 'foo')
# Another way of doing multiple-field validation is by implementing the
# Form's clean() method. Usually ValidationError raised by that method
# will not be associated with a particular field and will have a
# special-case association with the field named '__all__'. It's
# possible to associate the errors to particular field with the
# Form.add_error() method or by passing a dictionary that maps each
# field to one or more errors.
#
# Note that in Form.clean(), you have access to self.cleaned_data, a
# dictionary of all the fields/values that have *not* raised a
# ValidationError. Also note Form.clean() is required to return a
# dictionary of all clean data.
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
# Test raising a ValidationError as NON_FIELD_ERRORS.
if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2')
and self.cleaned_data['password1'] != self.cleaned_data['password2']):
raise ValidationError('Please make sure your passwords match.')
# Test raising ValidationError that targets multiple fields.
errors = {}
if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE':
errors['password1'] = 'Forbidden value.'
if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE':
errors['password2'] = ['Forbidden value.']
if errors:
raise ValidationError(errors)
# Test Form.add_error()
if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE2':
self.add_error(None, 'Non-field error 1.')
self.add_error('password1', 'Forbidden value 2.')
if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE2':
self.add_error('password2', 'Forbidden value 2.')
raise ValidationError('Non-field error 2.')
return self.cleaned_data
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertHTMLEqual(
f.as_table(),
"""<tr><th>Username:</th><td>
<ul class="errorlist"><li>This field is required.</li></ul>
<input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><ul class="errorlist"><li>This field is required.</li></ul>
<input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><ul class="errorlist"><li>This field is required.</li></ul>
<input type="password" name="password2" /></td></tr>"""
)
self.assertEqual(f.errors['username'], ['This field is required.'])
self.assertEqual(f.errors['password1'], ['This field is required.'])
self.assertEqual(f.errors['password2'], ['This field is required.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
self.assertEqual(f.errors['__all__'], ['Please make sure your passwords match.'])
self.assertHTMLEqual(
f.as_table(),
"""<tr><td colspan="2">
<ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><input type="text" name="username" value="adrian" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>"""
)
self.assertHTMLEqual(
f.as_ul(),
"""<li><ul class="errorlist nonfield">
<li>Please make sure your passwords match.</li></ul></li>
<li>Username: <input type="text" name="username" value="adrian" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" /></li>
<li>Password2: <input type="password" name="password2" /></li>"""
)
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['username'], 'adrian')
self.assertEqual(f.cleaned_data['password1'], 'foo')
self.assertEqual(f.cleaned_data['password2'], 'foo')
f = UserRegistration({
'username': 'adrian',
'password1': 'FORBIDDEN_VALUE',
'password2': 'FORBIDDEN_VALUE',
}, auto_id=False)
self.assertEqual(f.errors['password1'], ['Forbidden value.'])
self.assertEqual(f.errors['password2'], ['Forbidden value.'])
f = UserRegistration({
'username': 'adrian',
'password1': 'FORBIDDEN_VALUE2',
'password2': 'FORBIDDEN_VALUE2',
}, auto_id=False)
self.assertEqual(f.errors['__all__'], ['Non-field error 1.', 'Non-field error 2.'])
self.assertEqual(f.errors['password1'], ['Forbidden value 2.'])
self.assertEqual(f.errors['password2'], ['Forbidden value 2.'])
with six.assertRaisesRegex(self, ValueError, "has no field named"):
f.add_error('missing_field', 'Some error.')
def test_update_error_dict(self):
class CodeForm(Form):
code = CharField(max_length=10)
def clean(self):
try:
raise ValidationError({'code': [ValidationError('Code error 1.')]})
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError({'code': [ValidationError('Code error 2.')]})
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError({'code': forms.ErrorList(['Code error 3.'])})
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError('Non-field error 1.')
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError([ValidationError('Non-field error 2.')])
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
# Ensure that the newly added list of errors is an instance of ErrorList.
for field, error_list in self._errors.items():
if not isinstance(error_list, self.error_class):
self._errors[field] = self.error_class(error_list)
form = CodeForm({'code': 'hello'})
# Trigger validation.
self.assertFalse(form.is_valid())
# Check that update_error_dict didn't lose track of the ErrorDict type.
self.assertIsInstance(form._errors, forms.ErrorDict)
self.assertEqual(dict(form.errors), {
'code': ['Code error 1.', 'Code error 2.', 'Code error 3.'],
NON_FIELD_ERRORS: ['Non-field error 1.', 'Non-field error 2.'],
})
def test_has_error(self):
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput, min_length=5)
password2 = CharField(widget=PasswordInput)
def clean(self):
if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2')
and self.cleaned_data['password1'] != self.cleaned_data['password2']):
raise ValidationError(
'Please make sure your passwords match.',
code='password_mismatch',
)
f = UserRegistration(data={})
self.assertTrue(f.has_error('password1'))
self.assertTrue(f.has_error('password1', 'required'))
self.assertFalse(f.has_error('password1', 'anything'))
f = UserRegistration(data={'password1': 'Hi', 'password2': 'Hi'})
self.assertTrue(f.has_error('password1'))
self.assertTrue(f.has_error('password1', 'min_length'))
self.assertFalse(f.has_error('password1', 'anything'))
self.assertFalse(f.has_error('password2'))
self.assertFalse(f.has_error('password2', 'anything'))
f = UserRegistration(data={'password1': 'Bonjour', 'password2': 'Hello'})
self.assertFalse(f.has_error('password1'))
self.assertFalse(f.has_error('password1', 'required'))
self.assertTrue(f.has_error(NON_FIELD_ERRORS))
self.assertTrue(f.has_error(NON_FIELD_ERRORS, 'password_mismatch'))
self.assertFalse(f.has_error(NON_FIELD_ERRORS, 'anything'))
def test_dynamic_construction(self):
# It's possible to construct a Form dynamically by adding to the self.fields
# dictionary in __init__(). Don't forget to call Form.__init__() within the
# subclass' __init__().
class Person(Form):
first_name = CharField()
last_name = CharField()
def __init__(self, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
self.fields['birthday'] = DateField()
p = Person(auto_id=False)
self.assertHTMLEqual(
p.as_table(),
"""<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /></td></tr>"""
)
# Instances of a dynamic Form do not persist fields from one Form instance to
# the next.
class MyForm(Form):
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [('field1', CharField()), ('field2', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(
my_form.as_table(),
"""<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>"""
)
field_list = [('field3', CharField()), ('field4', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(
my_form.as_table(),
"""<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>"""
)
class MyForm(Form):
default_field_1 = CharField()
default_field_2 = CharField()
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [('field1', CharField()), ('field2', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(
my_form.as_table(),
"""<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>"""
)
field_list = [('field3', CharField()), ('field4', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(
my_form.as_table(),
"""<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>"""
)
# Similarly, changes to field attributes do not persist from one Form instance
# to the next.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
def __init__(self, names_required=False, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if names_required:
self.fields['first_name'].required = True
self.fields['first_name'].widget.attrs['class'] = 'required'
self.fields['last_name'].required = True
self.fields['last_name'].widget.attrs['class'] = 'required'
f = Person(names_required=False)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {}))
f = Person(names_required=True)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (True, True))
self.assertEqual(
f['first_name'].field.widget.attrs,
f['last_name'].field.widget.attrs,
({'class': 'reuired'}, {'class': 'required'})
)
f = Person(names_required=False)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {}))
class Person(Form):
first_name = CharField(max_length=30)
last_name = CharField(max_length=30)
def __init__(self, name_max_length=None, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if name_max_length:
self.fields['first_name'].max_length = name_max_length
self.fields['last_name'].max_length = name_max_length
f = Person(name_max_length=None)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30))
f = Person(name_max_length=20)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (20, 20))
f = Person(name_max_length=None)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30))
# Similarly, choices do not persist from one Form instance to the next.
# Refs #15127.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
gender = ChoiceField(choices=(('f', 'Female'), ('m', 'Male')))
def __init__(self, allow_unspec_gender=False, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if allow_unspec_gender:
self.fields['gender'].choices += (('u', 'Unspecified'),)
f = Person()
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')])
f = Person(allow_unspec_gender=True)
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male'), ('u', 'Unspecified')])
f = Person()
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')])
def test_validators_independence(self):
""" Test that we are able to modify a form field validators list without polluting
other forms """
from django.core.validators import MaxValueValidator
class MyForm(Form):
myfield = CharField(max_length=25)
f1 = MyForm()
f2 = MyForm()
f1.fields['myfield'].validators[0] = MaxValueValidator(12)
self.assertNotEqual(f1.fields['myfield'].validators[0], f2.fields['myfield'].validators[0])
def test_hidden_widget(self):
# HiddenInput widgets are displayed differently in the as_table(), as_ul())
# and as_p() output of a Form -- their verbose names are not displayed, and a
# separate row is not displayed. They're displayed in the last row of the
# form, directly after that row's form element.
class Person(Form):
first_name = CharField()
last_name = CharField()
hidden_text = CharField(widget=HiddenInput)
birthday = DateField()
p = Person(auto_id=False)
self.assertHTMLEqual(
p.as_table(),
"""<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></td></tr>"""
)
self.assertHTMLEqual(
p.as_ul(),
"""<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></li>"""
)
self.assertHTMLEqual(
p.as_p(), """<p>First name: <input type="text" name="first_name" /></p>
<p>Last name: <input type="text" name="last_name" /></p>
<p>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></p>"""
)
# With auto_id set, a HiddenInput still gets an ID, but it doesn't get a label.
p = Person(auto_id='id_%s')
self.assertHTMLEqual(
p.as_table(),
"""<tr><th><label for="id_first_name">First name:</label></th><td>
<input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td>
<input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td>
<input type="text" name="birthday" id="id_birthday" />
<input type="hidden" name="hidden_text" id="id_hidden_text" /></td></tr>"""
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="id_first_name">First name:</label>
<input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label>
<input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label>
<input type="text" name="birthday" id="id_birthday" />
<input type="hidden" name="hidden_text" id="id_hidden_text" /></li>"""
)
self.assertHTMLEqual(
p.as_p(),
"""<p><label for="id_first_name">First name:</label>
<input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label>
<input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label>
<input type="text" name="birthday" id="id_birthday" />
<input type="hidden" name="hidden_text" id="id_hidden_text" /></p>"""
)
# If a field with a HiddenInput has errors, the as_table() and as_ul() output
# will include the error message(s) with the text "(Hidden field [fieldname]) "
# prepended. This message is displayed at the top of the output, regardless of
# its field's order in the form.
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}, auto_id=False)
self.assertHTMLEqual(
p.as_table(),
"""<tr><td colspan="2">
<ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></td></tr>
<tr><th>First name:</th><td><input type="text" name="first_name" value="John" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" value="Lennon" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" value="1940-10-9" />
<input type="hidden" name="hidden_text" /></td></tr>"""
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></li>
<li>First name: <input type="text" name="first_name" value="John" /></li>
<li>Last name: <input type="text" name="last_name" value="Lennon" /></li>
<li>Birthday: <input type="text" name="birthday" value="1940-10-9" />
<input type="hidden" name="hidden_text" /></li>"""
)
self.assertHTMLEqual(
p.as_p(),
"""<ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul>
<p>First name: <input type="text" name="first_name" value="John" /></p>
<p>Last name: <input type="text" name="last_name" value="Lennon" /></p>
<p>Birthday: <input type="text" name="birthday" value="1940-10-9" /
><input type="hidden" name="hidden_text" /></p>"""
)
# A corner case: It's possible for a form to have only HiddenInputs.
class TestForm(Form):
foo = CharField(widget=HiddenInput)
bar = CharField(widget=HiddenInput)
p = TestForm(auto_id=False)
self.assertHTMLEqual(p.as_table(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
self.assertHTMLEqual(p.as_ul(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
self.assertHTMLEqual(p.as_p(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
def test_field_order(self):
# A Form's fields are displayed in the same order in which they were defined.
class TestForm(Form):
field1 = CharField()
field2 = CharField()
field3 = CharField()
field4 = CharField()
field5 = CharField()
field6 = CharField()
field7 = CharField()
field8 = CharField()
field9 = CharField()
field10 = CharField()
field11 = CharField()
field12 = CharField()
field13 = CharField()
field14 = CharField()
p = TestForm(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>
<tr><th>Field5:</th><td><input type="text" name="field5" /></td></tr>
<tr><th>Field6:</th><td><input type="text" name="field6" /></td></tr>
<tr><th>Field7:</th><td><input type="text" name="field7" /></td></tr>
<tr><th>Field8:</th><td><input type="text" name="field8" /></td></tr>
<tr><th>Field9:</th><td><input type="text" name="field9" /></td></tr>
<tr><th>Field10:</th><td><input type="text" name="field10" /></td></tr>
<tr><th>Field11:</th><td><input type="text" name="field11" /></td></tr>
<tr><th>Field12:</th><td><input type="text" name="field12" /></td></tr>
<tr><th>Field13:</th><td><input type="text" name="field13" /></td></tr>
<tr><th>Field14:</th><td><input type="text" name="field14" /></td></tr>""")
def test_explicit_field_order(self):
class TestFormParent(Form):
field1 = CharField()
field2 = CharField()
field4 = CharField()
field5 = CharField()
field6 = CharField()
field_order = ['field6', 'field5', 'field4', 'field2', 'field1']
class TestForm(TestFormParent):
field3 = CharField()
field_order = ['field2', 'field4', 'field3', 'field5', 'field6']
class TestFormRemove(TestForm):
field1 = None
class TestFormMissing(TestForm):
field_order = ['field2', 'field4', 'field3', 'field5', 'field6', 'field1']
field1 = None
class TestFormInit(TestFormParent):
field3 = CharField()
field_order = None
def __init__(self, **kwargs):
super(TestFormInit, self).__init__(**kwargs)
self.order_fields(field_order=TestForm.field_order)
p = TestFormParent()
self.assertEqual(list(p.fields.keys()), TestFormParent.field_order)
p = TestFormRemove()
self.assertEqual(list(p.fields.keys()), TestForm.field_order)
p = TestFormMissing()
self.assertEqual(list(p.fields.keys()), TestForm.field_order)
p = TestForm()
self.assertEqual(list(p.fields.keys()), TestFormMissing.field_order)
p = TestFormInit()
order = list(TestForm.field_order) + ['field1']
self.assertEqual(list(p.fields.keys()), order)
TestForm.field_order = ['unknown']
p = TestForm()
self.assertEqual(list(p.fields.keys()), ['field1', 'field2', 'field4', 'field5', 'field6', 'field3'])
def test_form_html_attributes(self):
# Some Field classes have an effect on the HTML attributes of their associated
# Widget. If you set max_length in a CharField and its associated widget is
# either a TextInput or PasswordInput, then the widget's rendered HTML will
# include the "maxlength" attribute.
class UserRegistration(Form):
username = CharField(max_length=10) # uses TextInput by default
password = CharField(max_length=10, widget=PasswordInput)
realname = CharField(max_length=10, widget=TextInput) # redundantly define widget, just to test
address = CharField() # no max_length defined here
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(),
"""<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>
<li>Realname: <input type="text" name="realname" maxlength="10" /></li>
<li>Address: <input type="text" name="address" /></li>""")
# If you specify a custom "attrs" that includes the "maxlength" attribute,
# the Field's max_length attribute will override whatever "maxlength" you specify
# in "attrs".
class UserRegistration(Form):
username = CharField(max_length=10, widget=TextInput(attrs={'maxlength': 20}))
password = CharField(max_length=10, widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(),
"""<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>""")
def test_specifying_labels(self):
# You can specify the label for a field by using the 'label' argument to a Field
# class. If you don't specify 'label', Django will use the field name with
# underscores converted to spaces, and the initial letter capitalized.
class UserRegistration(Form):
username = CharField(max_length=10, label='Your username')
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput, label='Contraseña (de nuevo)')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(),
"""<li>Your username: <input type="text" name="username" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" /></li>
<li>Contraseña (de nuevo): <input type="password" name="password2" /></li>""")
# Labels for as_* methods will only end in a colon if they don't end in other
# punctuation already.
class Questions(Form):
q1 = CharField(label='The first question')
q2 = CharField(label='What is your name?')
q3 = CharField(label='The answer to life is:')
q4 = CharField(label='Answer this question!')
q5 = CharField(label='The last question. Period.')
self.assertHTMLEqual(
Questions(auto_id=False).as_p(),
"""<p>The first question: <input type="text" name="q1" /></p>
<p>What is your name? <input type="text" name="q2" /></p>
<p>The answer to life is: <input type="text" name="q3" /></p>
<p>Answer this question! <input type="text" name="q4" /></p>
<p>The last question. Period. <input type="text" name="q5" /></p>"""
)
self.assertHTMLEqual(
Questions().as_p(),
"""<p><label for="id_q1">The first question:</label> <input type="text" name="q1" id="id_q1" /></p>
<p><label for="id_q2">What is your name?</label> <input type="text" name="q2" id="id_q2" /></p>
<p><label for="id_q3">The answer to life is:</label> <input type="text" name="q3" id="id_q3" /></p>
<p><label for="id_q4">Answer this question!</label> <input type="text" name="q4" id="id_q4" /></p>
<p><label for="id_q5">The last question. Period.</label> <input type="text" name="q5" id="id_q5" /></p>"""
)
# If a label is set to the empty string for a field, that field won't get a label.
class UserRegistration(Form):
username = CharField(max_length=10, label='')
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li> <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(auto_id='id_%s')
self.assertHTMLEqual(
p.as_ul(),
"""<li> <input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>"""
)
# If label is None, Django will auto-create the label from the field name. This
# is default behavior.
class UserRegistration(Form):
username = CharField(max_length=10, label=None)
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(),
"""<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(auto_id='id_%s')
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="id_username">Username:</label>
<input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>"""
)
def test_label_suffix(self):
# You can specify the 'label_suffix' argument to a Form class to modify the
# punctuation symbol used at the end of a label. By default, the colon (:) is
# used, and is only appended to the label if the label doesn't already end with a
# punctuation symbol: ., !, ? or :. If you specify a different suffix, it will
# be appended regardless of the last character of the label.
class FavoriteForm(Form):
color = CharField(label='Favorite color?')
animal = CharField(label='Favorite animal')
answer = CharField(label='Secret answer', label_suffix=' =')
f = FavoriteForm(auto_id=False)
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal: <input type="text" name="animal" /></li>
<li>Secret answer = <input type="text" name="answer" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='?')
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal? <input type="text" name="animal" /></li>
<li>Secret answer = <input type="text" name="answer" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='')
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal <input type="text" name="animal" /></li>
<li>Secret answer = <input type="text" name="answer" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='\u2192')
self.assertHTMLEqual(
f.as_ul(),
'<li>Favorite color? <input type="text" name="color" /></li>\n'
'<li>Favorite animal\u2192 <input type="text" name="animal" /></li>\n'
'<li>Secret answer = <input type="text" name="answer" /></li>'
)
def test_initial_data(self):
# You can specify initial data for a field by using the 'initial' argument to a
# Field class. This initial data is displayed when a Form is rendered with *no*
# data. It is not displayed when a Form is rendered with any data (including an
# empty dictionary). Also, the initial value is *not* used if data for a
# particular required field isn't provided.
class UserRegistration(Form):
username = CharField(max_length=10, initial='django')
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>"""
)
# Here, we're submitting data, so the initial value will *not* be displayed.
p = UserRegistration({}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist"><li>This field is required.</li></ul>
Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" /></li>"""
)
p = UserRegistration({'username': ''}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist"><li>This field is required.</li></ul>
Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" /></li>"""
)
p = UserRegistration({'username': 'foo'}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" /></li>"""
)
# An 'initial' value is *not* used as a fallback if data is not provided. In this
# example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
def test_dynamic_initial_data(self):
# The previous technique dealt with "hard-coded" initial data, but it's also
# possible to specify initial data after you've already created the Form class
# (i.e., at runtime). Use the 'initial' parameter to the Form constructor. This
# should be a dictionary containing initial values for one or more fields in the
# form, keyed by field name.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>"""
)
p = UserRegistration(initial={'username': 'stephane'}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>"""
)
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration({}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist"><li>This field is required.</li></ul>
Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" /></li>"""
)
p = UserRegistration({'username': ''}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist"><li>This field is required.</li></ul>
Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" /></li>"""
)
p = UserRegistration({'username': 'foo'}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" /></li>"""
)
# A dynamic 'initial' value is *not* used as a fallback if data is not provided.
# In this example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'}, initial={'username': 'django'})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
# then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial='django')
password = CharField(widget=PasswordInput)
p = UserRegistration(initial={'username': 'babik'}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>Username: <input type="text" name="username" value="babik" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>"""
)
def test_callable_initial_data(self):
# The previous technique dealt with raw values as initial data, but it's also
# possible to specify callable data.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')])
# We need to define functions that get called later.)
def initial_django():
return 'django'
def initial_stephane():
return 'stephane'
def initial_options():
return ['f', 'b']
def initial_other_options():
return ['b', 'w']
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(initial={'username': initial_django, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>"""
)
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration({}, initial={'username': initial_django, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist"><li>This field is required.</li></ul>
Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>"""
)
p = UserRegistration({'username': ''}, initial={'username': initial_django}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist"><li>This field is required.</li></ul>
Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>"""
)
p = UserRegistration(
{'username': 'foo', 'options': ['f', 'b']}, initial={'username': initial_django}, auto_id=False
)
self.assertHTMLEqual(
p.as_ul(),
"""<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>"""
)
# A callable 'initial' value is *not* used as a fallback if data is not provided.
# In this example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'}, initial={'username': initial_django, 'options': initial_options})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
# then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial=initial_django)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(
choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')],
initial=initial_other_options,
)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w" selected="selected">whiz</option>
</select></li>"""
)
p = UserRegistration(initial={'username': initial_stephane, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>"""
)
def test_changed_data(self):
class Person(Form):
first_name = CharField(initial='Hans')
last_name = CharField(initial='Greatel')
birthday = DateField(initial=datetime.date(1974, 8, 16))
p = Person(data={'first_name': 'Hans', 'last_name': 'Scrmbl',
'birthday': '1974-08-16'})
self.assertTrue(p.is_valid())
self.assertNotIn('first_name', p.changed_data)
self.assertIn('last_name', p.changed_data)
self.assertNotIn('birthday', p.changed_data)
# Test that field raising ValidationError is always in changed_data
class PedanticField(forms.Field):
def to_python(self, value):
raise ValidationError('Whatever')
class Person2(Person):
pedantic = PedanticField(initial='whatever', show_hidden_initial=True)
p = Person2(data={'first_name': 'Hans', 'last_name': 'Scrmbl',
'birthday': '1974-08-16', 'initial-pedantic': 'whatever'})
self.assertFalse(p.is_valid())
self.assertIn('pedantic', p.changed_data)
def test_boundfield_values(self):
# It's possible to get to the value which would be used for rendering
# the widget for a field by using the BoundField's value method.
class UserRegistration(Form):
username = CharField(max_length=10, initial='djangonaut')
password = CharField(widget=PasswordInput)
unbound = UserRegistration()
bound = UserRegistration({'password': 'foo'})
self.assertEqual(bound['username'].value(), None)
self.assertEqual(unbound['username'].value(), 'djangonaut')
self.assertEqual(bound['password'].value(), 'foo')
self.assertEqual(unbound['password'].value(), None)
def test_boundfield_initial_called_once(self):
"""
Multiple calls to BoundField().value() in an unbound form should return
the same result each time (#24391).
"""
class MyForm(Form):
name = CharField(max_length=10, initial=uuid.uuid4)
form = MyForm()
name = form['name']
self.assertEqual(name.value(), name.value())
# BoundField is also cached
self.assertIs(form['name'], name)
def test_boundfield_rendering(self):
"""
Python 2 issue: Test that rendering a BoundField with bytestring content
doesn't lose it's safe string status (#22950).
"""
class CustomWidget(TextInput):
def render(self, name, value, attrs=None):
return format_html(str('<input{} />'), ' id=custom')
class SampleForm(Form):
name = CharField(widget=CustomWidget)
f = SampleForm(data={'name': 'bar'})
self.assertIsInstance(force_text(f['name']), SafeData)
def test_custom_boundfield(self):
class CustomField(CharField):
def get_bound_field(self, form, name):
return (form, name)
class SampleForm(Form):
name = CustomField()
f = SampleForm()
self.assertEqual(f['name'], (f, 'name'))
def test_initial_datetime_values(self):
now = datetime.datetime.now()
# Nix microseconds (since they should be ignored). #22502
now_no_ms = now.replace(microsecond=0)
if now == now_no_ms:
now = now.replace(microsecond=1)
def delayed_now():
return now
def delayed_now_time():
return now.time()
class HiddenInputWithoutMicrosec(HiddenInput):
supports_microseconds = False
class TextInputWithoutMicrosec(TextInput):
supports_microseconds = False
class DateTimeForm(Form):
auto_timestamp = DateTimeField(initial=delayed_now)
auto_time_only = TimeField(initial=delayed_now_time)
supports_microseconds = DateTimeField(initial=delayed_now, widget=TextInput)
hi_default_microsec = DateTimeField(initial=delayed_now, widget=HiddenInput)
hi_without_microsec = DateTimeField(initial=delayed_now, widget=HiddenInputWithoutMicrosec)
ti_without_microsec = DateTimeField(initial=delayed_now, widget=TextInputWithoutMicrosec)
unbound = DateTimeForm()
self.assertEqual(unbound['auto_timestamp'].value(), now_no_ms)
self.assertEqual(unbound['auto_time_only'].value(), now_no_ms.time())
self.assertEqual(unbound['supports_microseconds'].value(), now)
self.assertEqual(unbound['hi_default_microsec'].value(), now)
self.assertEqual(unbound['hi_without_microsec'].value(), now_no_ms)
self.assertEqual(unbound['ti_without_microsec'].value(), now_no_ms)
def test_help_text(self):
# You can specify descriptive text for a field by using the 'help_text' argument)
class UserRegistration(Form):
username = CharField(max_length=10, help_text='e.g., [email protected]')
password = CharField(widget=PasswordInput, help_text='Wählen Sie mit Bedacht.')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>Username: <input type="text" name="username" maxlength="10" />
<span class="helptext">e.g., [email protected]</span></li>
<li>Password: <input type="password" name="password" />
<span class="helptext">Wählen Sie mit Bedacht.</span></li>"""
)
self.assertHTMLEqual(
p.as_p(),
"""<p>Username: <input type="text" name="username" maxlength="10" />
<span class="helptext">e.g., [email protected]</span></p>
<p>Password: <input type="password" name="password" />
<span class="helptext">Wählen Sie mit Bedacht.</span></p>"""
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /><br />
<span class="helptext">e.g., [email protected]</span></td></tr>
<tr><th>Password:</th><td><input type="password" name="password" /><br />
<span class="helptext">Wählen Sie mit Bedacht.</span></td></tr>"""
)
# The help text is displayed whether or not data is provided for the form.
p = UserRegistration({'username': 'foo'}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>Username: <input type="text" name="username" value="foo" maxlength="10" />
<span class="helptext">e.g., [email protected]</span></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" />
<span class="helptext">Wählen Sie mit Bedacht.</span></li>"""
)
# help_text is not displayed for hidden fields. It can be used for documentation
# purposes, though.
class UserRegistration(Form):
username = CharField(max_length=10, help_text='e.g., [email protected]')
password = CharField(widget=PasswordInput)
next = CharField(widget=HiddenInput, initial='/', help_text='Redirect destination')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>Username: <input type="text" name="username" maxlength="10" />
<span class="helptext">e.g., [email protected]</span></li>
<li>Password: <input type="password" name="password" />
<input type="hidden" name="next" value="/" /></li>"""
)
def test_subclassing_forms(self):
# You can subclass a Form to add fields. The resulting form subclass will have
# all of the fields of the parent Form, plus whichever fields you define in the
# subclass.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Musician(Person):
instrument = CharField()
p = Person(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>"""
)
m = Musician(auto_id=False)
self.assertHTMLEqual(
m.as_ul(),
"""<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Instrument: <input type="text" name="instrument" /></li>"""
)
# Yes, you can subclass multiple forms. The fields are added in the order in
# which the parent classes are listed.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Instrument(Form):
instrument = CharField()
class Beatle(Person, Instrument):
haircut_type = CharField()
b = Beatle(auto_id=False)
self.assertHTMLEqual(b.as_ul(), """<li>Instrument: <input type="text" name="instrument" /></li>
<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Haircut type: <input type="text" name="haircut_type" /></li>""")
def test_forms_with_prefixes(self):
# Sometimes it's necessary to have multiple forms display on the same HTML page,
# or multiple copies of the same form. We can accomplish this with form prefixes.
# Pass the keyword argument 'prefix' to the Form constructor to use this feature.
# This value will be prepended to each HTML form field name. One way to think
# about this is "namespaces for HTML forms". Notice that in the data argument,
# each field's key has the prefix, in this case 'person1', prepended to the
# actual field name.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
data = {
'person1-first_name': 'John',
'person1-last_name': 'Lennon',
'person1-birthday': '1940-10-9'
}
p = Person(data, prefix='person1')
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="id_person1-first_name">First name:</label>
<input type="text" name="person1-first_name" value="John" id="id_person1-first_name" /></li>
<li><label for="id_person1-last_name">Last name:</label>
<input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" /></li>
<li><label for="id_person1-birthday">Birthday:</label>
<input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" /></li>"""
)
self.assertHTMLEqual(
str(p['first_name']),
'<input type="text" name="person1-first_name" value="John" id="id_person1-first_name" />'
)
self.assertHTMLEqual(
str(p['last_name']),
'<input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" />'
)
self.assertHTMLEqual(
str(p['birthday']),
'<input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" />'
)
self.assertEqual(p.errors, {})
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
# Let's try submitting some bad data to make sure form.errors and field.errors
# work as expected.
data = {
'person1-first_name': '',
'person1-last_name': '',
'person1-birthday': ''
}
p = Person(data, prefix='person1')
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertEqual(p['first_name'].errors, ['This field is required.'])
try:
p['person1-first_name'].errors
self.fail('Attempts to access non-existent fields should fail.')
except KeyError:
pass
# In this example, the data doesn't have a prefix, but the form requires it, so
# the form doesn't "see" the fields.
data = {
'first_name': 'John',
'last_name': 'Lennon',
'birthday': '1940-10-9'
}
p = Person(data, prefix='person1')
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
# With prefixes, a single data dictionary can hold data for multiple instances
# of the same form.
data = {
'person1-first_name': 'John',
'person1-last_name': 'Lennon',
'person1-birthday': '1940-10-9',
'person2-first_name': 'Jim',
'person2-last_name': 'Morrison',
'person2-birthday': '1943-12-8'
}
p1 = Person(data, prefix='person1')
self.assertTrue(p1.is_valid())
self.assertEqual(p1.cleaned_data['first_name'], 'John')
self.assertEqual(p1.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p1.cleaned_data['birthday'], datetime.date(1940, 10, 9))
p2 = Person(data, prefix='person2')
self.assertTrue(p2.is_valid())
self.assertEqual(p2.cleaned_data['first_name'], 'Jim')
self.assertEqual(p2.cleaned_data['last_name'], 'Morrison')
self.assertEqual(p2.cleaned_data['birthday'], datetime.date(1943, 12, 8))
# By default, forms append a hyphen between the prefix and the field name, but a
# form can alter that behavior by implementing the add_prefix() method. This
# method takes a field name and returns the prefixed field, according to
# self.prefix.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
def add_prefix(self, field_name):
return '%s-prefix-%s' % (self.prefix, field_name) if self.prefix else field_name
p = Person(prefix='foo')
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="id_foo-prefix-first_name">First name:</label>
<input type="text" name="foo-prefix-first_name" id="id_foo-prefix-first_name" /></li>
<li><label for="id_foo-prefix-last_name">Last name:</label>
<input type="text" name="foo-prefix-last_name" id="id_foo-prefix-last_name" /></li>
<li><label for="id_foo-prefix-birthday">Birthday:</label>
<input type="text" name="foo-prefix-birthday" id="id_foo-prefix-birthday" /></li>"""
)
data = {
'foo-prefix-first_name': 'John',
'foo-prefix-last_name': 'Lennon',
'foo-prefix-birthday': '1940-10-9'
}
p = Person(data, prefix='foo')
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
def test_class_prefix(self):
# Prefix can be also specified at the class level.
class Person(Form):
first_name = CharField()
prefix = 'foo'
p = Person()
self.assertEqual(p.prefix, 'foo')
p = Person(prefix='bar')
self.assertEqual(p.prefix, 'bar')
def test_forms_with_null_boolean(self):
# NullBooleanField is a bit of a special case because its presentation (widget)
# is different than its data. This is handled transparently, though.
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
p = Person({'name': 'Joe'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '1'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '2'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '3'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': True}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': False}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
def test_forms_with_file_fields(self):
# FileFields are a special case because they take their data from the request.FILES,
# not request.POST.
class FileForm(Form):
file1 = FileField()
f = FileForm(auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={}, auto_id=False)
self.assertHTMLEqual(
f.as_table(),
'<tr><th>File1:</th><td>'
'<ul class="errorlist"><li>This field is required.</li></ul>'
'<input type="file" name="file1" /></td></tr>'
)
f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'')}, auto_id=False)
self.assertHTMLEqual(
f.as_table(),
'<tr><th>File1:</th><td>'
'<ul class="errorlist"><li>The submitted file is empty.</li></ul>'
'<input type="file" name="file1" /></td></tr>'
)
f = FileForm(data={}, files={'file1': 'something that is not a file'}, auto_id=False)
self.assertHTMLEqual(
f.as_table(),
'<tr><th>File1:</th><td>'
'<ul class="errorlist"><li>No file was submitted. Check the '
'encoding type on the form.</li></ul>'
'<input type="file" name="file1" /></td></tr>'
)
f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'some content')}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
self.assertTrue(f.is_valid())
file1 = SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8'))
f = FileForm(data={}, files={'file1': file1}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
def test_basic_processing_in_view(self):
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2')
and self.cleaned_data['password1'] != self.cleaned_data['password2']):
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data
def my_function(method, post_data):
if method == 'POST':
form = UserRegistration(post_data, auto_id=False)
else:
form = UserRegistration(auto_id=False)
if form.is_valid():
return 'VALID: %r' % sorted(six.iteritems(form.cleaned_data))
t = Template(
'<form action="" method="post">\n'
'<table>\n{{ form }}\n</table>\n<input type="submit" />\n</form>'
)
return t.render(Context({'form': form}))
# Case 1: GET (an empty form, with no errors).)
self.assertHTMLEqual(my_function('GET', {}), """<form action="" method="post">
<table>
<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>
</table>
<input type="submit" />
</form>""")
# Case 2: POST with erroneous data (a redisplayed form, with errors).)
self.assertHTMLEqual(
my_function('POST', {'username': 'this-is-a-long-username', 'password1': 'foo', 'password2': 'bar'}),
"""<form action="" method="post">
<table>
<tr><td colspan="2"><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><ul class="errorlist">
<li>Ensure this value has at most 10 characters (it has 23).</li></ul>
<input type="text" name="username" value="this-is-a-long-username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>
</table>
<input type="submit" />
</form>"""
)
# Case 3: POST with valid data (the success message).)
self.assertEqual(
my_function('POST', {'username': 'adrian', 'password1': 'secret', 'password2': 'secret'}),
str_prefix(
"VALID: [('password1', %(_)s'secret'), ('password2', %(_)s'secret'), ('username', %(_)s'adrian')]"
)
)
def test_templates_with_forms(self):
class UserRegistration(Form):
username = CharField(max_length=10, help_text="Good luck picking a username that doesn't already exist.")
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2')
and self.cleaned_data['password1'] != self.cleaned_data['password2']):
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data
# You have full flexibility in displaying form fields in a template. Just pass a
# Form instance to the template, and use "dot" access to refer to individual
# fields. Note, however, that this flexibility comes with the responsibility of
# displaying all the errors, including any that might not be associated with a
# particular field.
t = Template('''<form action="">
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
self.assertHTMLEqual(
t.render(Context({'form': UserRegistration({'username': 'django'}, auto_id=False)})),
"""<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul><p>
<label>Password: <input type="password" name="password1" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>"""
)
# Use form.[field].label to output a field's label. You can specify the label for
# a field by using the 'label' argument to a Field class. If you don't specify
# 'label', Django will use the field name with underscores converted to spaces,
# and the initial letter capitalized.
t = Template('''<form action="">
<p><label>{{ form.username.label }}: {{ form.username }}</label></p>
<p><label>{{ form.password1.label }}: {{ form.password1 }}</label></p>
<p><label>{{ form.password2.label }}: {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p><label>Username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password1: <input type="password" name="password1" /></label></p>
<p><label>Password2: <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
# User form.[field].label_tag to output a field's label with a <label> tag
# wrapped around it, but *only* if the given field has an "id" attribute.
# Recall from above that passing the "auto_id" argument to a Form gives each
# field an "id" attribute.
t = Template('''<form action="">
<p>{{ form.username.label_tag }} {{ form.username }}</p>
<p>{{ form.password1.label_tag }} {{ form.password1 }}</p>
<p>{{ form.password2.label_tag }} {{ form.password2 }}</p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /></p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>""")
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id='id_%s')})), """<form action="">
<p><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>
<p><label for="id_password1">Password1:</label> <input type="password" name="password1" id="id_password1" /></p>
<p><label for="id_password2">Password2:</label> <input type="password" name="password2" id="id_password2" /></p>
<input type="submit" />
</form>""")
# User form.[field].help_text to output a field's help text. If the given field
# does not have help text, nothing will be output.
t = Template('''<form action="">
<p>{{ form.username.label_tag }} {{ form.username }}<br />{{ form.username.help_text }}</p>
<p>{{ form.password1.label_tag }} {{ form.password1 }}</p>
<p>{{ form.password2.label_tag }} {{ form.password2 }}</p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(
t.render(Context({'form': UserRegistration(auto_id=False)})),
"""<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /><br />
Good luck picking a username that doesn't already exist.</p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>"""
)
self.assertEqual(
Template('{{ form.password1.help_text }}').render(Context({'form': UserRegistration(auto_id=False)})),
''
)
# To display the errors that aren't associated with a particular field -- e.g.,
# the errors caused by Form.clean() -- use {{ form.non_field_errors }} in the
# template. If used on its own, it is displayed as a <ul> (or an empty string, if
# the list of errors is empty). You can also use it in {% if %} statements.
t = Template('''<form action="">
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(
t.render(Context({
'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
})),
"""<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>"""
)
t = Template('''<form action="">
{{ form.non_field_errors }}
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(
t.render(Context({
'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
})),
"""<form action="">
<ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul>
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>"""
)
def test_empty_permitted(self):
# Sometimes (pretty much in formsets) we want to allow a form to pass validation
# if it is completely empty. We can accomplish this by using the empty_permitted
# argument to a form constructor.
class SongForm(Form):
artist = CharField()
name = CharField()
# First let's show what happens id empty_permitted=False (the default):
data = {'artist': '', 'song': ''}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['This field is required.'], 'artist': ['This field is required.']})
self.assertEqual(form.cleaned_data, {})
# Now let's show what happens when empty_permitted=True and the form is empty.
form = SongForm(data, empty_permitted=True)
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(form.cleaned_data, {})
# But if we fill in data for one of the fields, the form is no longer empty and
# the whole thing must pass validation.
data = {'artist': 'The Doors', 'song': ''}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['This field is required.']})
self.assertEqual(form.cleaned_data, {'artist': 'The Doors'})
# If a field is not given in the data then None is returned for its data. Lets
# make sure that when checking for empty_permitted that None is treated
# accordingly.
data = {'artist': None, 'song': ''}
form = SongForm(data, empty_permitted=True)
self.assertTrue(form.is_valid())
# However, we *really* need to be sure we are checking for None as any data in
# initial that returns False on a boolean call needs to be treated literally.
class PriceForm(Form):
amount = FloatField()
qty = IntegerField()
data = {'amount': '0.0', 'qty': ''}
form = PriceForm(data, initial={'amount': 0.0}, empty_permitted=True)
self.assertTrue(form.is_valid())
def test_extracting_hidden_and_visible(self):
class SongForm(Form):
token = CharField(widget=HiddenInput)
artist = CharField()
name = CharField()
form = SongForm()
self.assertEqual([f.name for f in form.hidden_fields()], ['token'])
self.assertEqual([f.name for f in form.visible_fields()], ['artist', 'name'])
def test_hidden_initial_gets_id(self):
class MyForm(Form):
field1 = CharField(max_length=50, show_hidden_initial=True)
self.assertHTMLEqual(
MyForm().as_table(),
'<tr><th><label for="id_field1">Field1:</label></th>'
'<td><input id="id_field1" type="text" name="field1" maxlength="50" />'
'<input type="hidden" name="initial-field1" id="initial-id_field1" /></td></tr>'
)
def test_error_html_required_html_classes(self):
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
email = EmailField(required=False)
age = IntegerField()
p = Person({})
p.error_css_class = 'error'
p.required_css_class = 'required'
self.assertHTMLEqual(
p.as_ul(),
"""<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul>
<label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li>
<li class="required"><label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></li>
<li><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" /></li>
<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul>
<label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" /></li>"""
)
self.assertHTMLEqual(
p.as_p(),
"""<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error"><label class="required" for="id_name">Name:</label>
<input type="text" name="name" id="id_name" /></p>
<p class="required"><label class="required" for="id_is_cool">Is cool:</label>
<select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></p>
<p><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error"><label class="required" for="id_age">Age:</label>
<input type="number" name="age" id="id_age" /></p>"""
)
self.assertHTMLEqual(
p.as_table(),
"""<tr class="required error">
<th><label class="required" for="id_name">Name:</label></th>
<td><ul class="errorlist"><li>This field is required.</li></ul>
<input type="text" name="name" id="id_name" /></td></tr>
<tr class="required"><th><label class="required" for="id_is_cool">Is cool:</label></th>
<td><select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></td></tr>
<tr><th><label for="id_email">Email:</label></th><td>
<input type="email" name="email" id="id_email" /></td></tr>
<tr class="required error"><th><label class="required" for="id_age">Age:</label></th>
<td><ul class="errorlist"><li>This field is required.</li></ul>
<input type="number" name="age" id="id_age" /></td></tr>"""
)
def test_label_has_required_css_class(self):
"""
#17922 - required_css_class is added to the label_tag() of required fields.
"""
class SomeForm(Form):
required_css_class = 'required'
field = CharField(max_length=10)
field2 = IntegerField(required=False)
f = SomeForm({'field': 'test'})
self.assertHTMLEqual(f['field'].label_tag(), '<label for="id_field" class="required">Field:</label>')
self.assertHTMLEqual(
f['field'].label_tag(attrs={'class': 'foo'}),
'<label for="id_field" class="foo required">Field:</label>'
)
self.assertHTMLEqual(f['field2'].label_tag(), '<label for="id_field2">Field2:</label>')
def test_label_split_datetime_not_displayed(self):
class EventForm(Form):
happened_at = SplitDateTimeField(widget=SplitHiddenDateTimeWidget)
form = EventForm()
self.assertHTMLEqual(
form.as_ul(),
'<input type="hidden" name="happened_at_0" id="id_happened_at_0" />'
'<input type="hidden" name="happened_at_1" id="id_happened_at_1" />'
)
def test_multivalue_field_validation(self):
def bad_names(value):
if value == 'bad value':
raise ValidationError('bad value not allowed')
class NameField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (CharField(label='First name', max_length=10),
CharField(label='Last name', max_length=10))
super(NameField, self).__init__(fields=fields, *args, **kwargs)
def compress(self, data_list):
return ' '.join(data_list)
class NameForm(Form):
name = NameField(validators=[bad_names])
form = NameForm(data={'name': ['bad', 'value']})
form.full_clean()
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['bad value not allowed']})
form = NameForm(data={'name': ['should be overly', 'long for the field names']})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['Ensure this value has at most 10 characters (it has 16).',
'Ensure this value has at most 10 characters (it has 24).']})
form = NameForm(data={'name': ['fname', 'lname']})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'name': 'fname lname'})
def test_multivalue_deep_copy(self):
"""
#19298 -- MultiValueField needs to override the default as it needs
to deep-copy subfields:
"""
class ChoicesField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (ChoiceField(label='Rank',
choices=((1, 1), (2, 2))),
CharField(label='Name', max_length=10))
super(ChoicesField, self).__init__(fields=fields, *args, **kwargs)
field = ChoicesField()
field2 = copy.deepcopy(field)
self.assertIsInstance(field2, ChoicesField)
self.assertIsNot(field2.fields, field.fields)
self.assertIsNot(field2.fields[0].choices, field.fields[0].choices)
def test_multivalue_initial_data(self):
"""
#23674 -- invalid initial data should not break form.changed_data()
"""
class DateAgeField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (DateField(label="Date"), IntegerField(label="Age"))
super(DateAgeField, self).__init__(fields=fields, *args, **kwargs)
class DateAgeForm(Form):
date_age = DateAgeField()
data = {"date_age": ["1998-12-06", 16]}
form = DateAgeForm(data, initial={"date_age": ["200-10-10", 14]})
self.assertTrue(form.has_changed())
def test_multivalue_optional_subfields(self):
class PhoneField(MultiValueField):
def __init__(self, *args, **kwargs):
fields = (
CharField(label='Country Code', validators=[
RegexValidator(r'^\+[0-9]{1,2}$', message='Enter a valid country code.')]),
CharField(label='Phone Number'),
CharField(label='Extension', error_messages={'incomplete': 'Enter an extension.'}),
CharField(label='Label', required=False, help_text='E.g. home, work.'),
)
super(PhoneField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
return '%s.%s ext. %s (label: %s)' % tuple(data_list)
return None
# An empty value for any field will raise a `required` error on a
# required `MultiValueField`.
f = PhoneField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ['+61'])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ['+61', '287654321', '123'])
self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home']))
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
# Empty values for fields will NOT raise a `required` error on an
# optional `MultiValueField`
f = PhoneField(required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean([]))
self.assertEqual('+61. ext. (label: )', f.clean(['+61']))
self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123']))
self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home']))
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
# For a required `MultiValueField` with `require_all_fields=False`, a
# `required` error will only be raised if all fields are empty. Fields
# can individually be required or optional. An empty value for any
# required field will raise an `incomplete` error.
f = PhoneField(require_all_fields=False)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'Enter a complete value.'", f.clean, ['+61'])
self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123']))
six.assertRaisesRegex(self, ValidationError,
"'Enter a complete value\.', u?'Enter an extension\.'", f.clean, ['', '', '', 'Home'])
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
# For an optional `MultiValueField` with `require_all_fields=False`, we
# don't get any `required` error but we still get `incomplete` errors.
f = PhoneField(required=False, require_all_fields=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean([]))
self.assertRaisesMessage(ValidationError, "'Enter a complete value.'", f.clean, ['+61'])
self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123']))
six.assertRaisesRegex(self, ValidationError,
"'Enter a complete value\.', u?'Enter an extension\.'", f.clean, ['', '', '', 'Home'])
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
def test_custom_empty_values(self):
"""
Test that form fields can customize what is considered as an empty value
for themselves (#19997).
"""
class CustomJSONField(CharField):
empty_values = [None, '']
def to_python(self, value):
# Fake json.loads
if value == '{}':
return {}
return super(CustomJSONField, self).to_python(value)
class JSONForm(forms.Form):
json = CustomJSONField()
form = JSONForm(data={'json': '{}'})
form.full_clean()
self.assertEqual(form.cleaned_data, {'json': {}})
def test_boundfield_label_tag(self):
class SomeForm(Form):
field = CharField()
boundfield = SomeForm()['field']
testcases = [ # (args, kwargs, expected)
# without anything: just print the <label>
((), {}, '<label for="id_field">Field:</label>'),
# passing just one argument: overrides the field's label
(('custom',), {}, '<label for="id_field">custom:</label>'),
# the overridden label is escaped
(('custom&',), {}, '<label for="id_field">custom&:</label>'),
((mark_safe('custom&'),), {}, '<label for="id_field">custom&:</label>'),
# Passing attrs to add extra attributes on the <label>
((), {'attrs': {'class': 'pretty'}}, '<label for="id_field" class="pretty">Field:</label>')
]
for args, kwargs, expected in testcases:
self.assertHTMLEqual(boundfield.label_tag(*args, **kwargs), expected)
def test_boundfield_label_tag_no_id(self):
"""
If a widget has no id, label_tag just returns the text with no
surrounding <label>.
"""
class SomeForm(Form):
field = CharField()
boundfield = SomeForm(auto_id='')['field']
self.assertHTMLEqual(boundfield.label_tag(), 'Field:')
self.assertHTMLEqual(boundfield.label_tag('Custom&'), 'Custom&:')
def test_boundfield_label_tag_custom_widget_id_for_label(self):
class CustomIdForLabelTextInput(TextInput):
def id_for_label(self, id):
return 'custom_' + id
class EmptyIdForLabelTextInput(TextInput):
def id_for_label(self, id):
return None
class SomeForm(Form):
custom = CharField(widget=CustomIdForLabelTextInput)
empty = CharField(widget=EmptyIdForLabelTextInput)
form = SomeForm()
self.assertHTMLEqual(form['custom'].label_tag(), '<label for="custom_id_custom">Custom:</label>')
self.assertHTMLEqual(form['empty'].label_tag(), '<label>Empty:</label>')
def test_boundfield_empty_label(self):
class SomeForm(Form):
field = CharField(label='')
boundfield = SomeForm()['field']
self.assertHTMLEqual(boundfield.label_tag(), '<label for="id_field"></label>')
def test_boundfield_id_for_label(self):
class SomeForm(Form):
field = CharField(label='')
self.assertEqual(SomeForm()['field'].id_for_label, 'id_field')
def test_boundfield_id_for_label_override_by_attrs(self):
"""
If an id is provided in `Widget.attrs`, it overrides the generated ID,
unless it is `None`.
"""
class SomeForm(Form):
field = CharField(widget=TextInput(attrs={'id': 'myCustomID'}))
field_none = CharField(widget=TextInput(attrs={'id': None}))
form = SomeForm()
self.assertEqual(form['field'].id_for_label, 'myCustomID')
self.assertEqual(form['field_none'].id_for_label, 'id_field_none')
def test_label_tag_override(self):
"""
BoundField label_suffix (if provided) overrides Form label_suffix
"""
class SomeForm(Form):
field = CharField()
boundfield = SomeForm(label_suffix='!')['field']
self.assertHTMLEqual(boundfield.label_tag(label_suffix='$'), '<label for="id_field">Field$</label>')
def test_field_name(self):
"""#5749 - `field_name` may be used as a key in _html_output()."""
class SomeForm(Form):
some_field = CharField()
def as_p(self):
return self._html_output(
normal_row='<p id="p_%(field_name)s"></p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p id="p_some_field"></p>')
def test_field_without_css_classes(self):
"""
`css_classes` may be used as a key in _html_output() (empty classes).
"""
class SomeForm(Form):
some_field = CharField()
def as_p(self):
return self._html_output(
normal_row='<p class="%(css_classes)s"></p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p class=""></p>')
def test_field_with_css_class(self):
"""
`css_classes` may be used as a key in _html_output() (class comes
from required_css_class in this case).
"""
class SomeForm(Form):
some_field = CharField()
required_css_class = 'foo'
def as_p(self):
return self._html_output(
normal_row='<p class="%(css_classes)s"></p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p class="foo"></p>')
def test_field_name_with_hidden_input(self):
"""
BaseForm._html_output() should merge all the hidden input fields and
put them in the last row.
"""
class SomeForm(Form):
hidden1 = CharField(widget=HiddenInput)
custom = CharField()
hidden2 = CharField(widget=HiddenInput)
def as_p(self):
return self._html_output(
normal_row='<p%(html_class_attr)s>%(field)s %(field_name)s</p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(
form.as_p(),
'<p><input id="id_custom" name="custom" type="text" /> custom'
'<input id="id_hidden1" name="hidden1" type="hidden" />'
'<input id="id_hidden2" name="hidden2" type="hidden" /></p>'
)
def test_field_name_with_hidden_input_and_non_matching_row_ender(self):
"""
BaseForm._html_output() should merge all the hidden input fields and
put them in the last row ended with the specific row ender.
"""
class SomeForm(Form):
hidden1 = CharField(widget=HiddenInput)
custom = CharField()
hidden2 = CharField(widget=HiddenInput)
def as_p(self):
return self._html_output(
normal_row='<p%(html_class_attr)s>%(field)s %(field_name)s</p>',
error_row='%s',
row_ender='<hr/><hr/>',
help_text_html=' %s',
errors_on_separate_row=True
)
form = SomeForm()
self.assertHTMLEqual(
form.as_p(),
'<p><input id="id_custom" name="custom" type="text" /> custom</p>\n'
'<input id="id_hidden1" name="hidden1" type="hidden" />'
'<input id="id_hidden2" name="hidden2" type="hidden" /><hr/><hr/>'
)
def test_error_dict(self):
class MyForm(Form):
foo = CharField()
bar = CharField()
def clean(self):
raise ValidationError('Non-field error.', code='secret', params={'a': 1, 'b': 2})
form = MyForm({})
self.assertEqual(form.is_valid(), False)
errors = form.errors.as_text()
control = [
'* foo\n * This field is required.',
'* bar\n * This field is required.',
'* __all__\n * Non-field error.',
]
for error in control:
self.assertIn(error, errors)
errors = form.errors.as_ul()
control = [
'<li>foo<ul class="errorlist"><li>This field is required.</li></ul></li>',
'<li>bar<ul class="errorlist"><li>This field is required.</li></ul></li>',
'<li>__all__<ul class="errorlist nonfield"><li>Non-field error.</li></ul></li>',
]
for error in control:
self.assertInHTML(error, errors)
errors = json.loads(form.errors.as_json())
control = {
'foo': [{'code': 'required', 'message': 'This field is required.'}],
'bar': [{'code': 'required', 'message': 'This field is required.'}],
'__all__': [{'code': 'secret', 'message': 'Non-field error.'}]
}
self.assertEqual(errors, control)
def test_error_dict_as_json_escape_html(self):
"""#21962 - adding html escape flag to ErrorDict"""
class MyForm(Form):
foo = CharField()
bar = CharField()
def clean(self):
raise ValidationError('<p>Non-field error.</p>',
code='secret',
params={'a': 1, 'b': 2})
control = {
'foo': [{'code': 'required', 'message': 'This field is required.'}],
'bar': [{'code': 'required', 'message': 'This field is required.'}],
'__all__': [{'code': 'secret', 'message': '<p>Non-field error.</p>'}]
}
form = MyForm({})
self.assertFalse(form.is_valid())
errors = json.loads(form.errors.as_json())
self.assertEqual(errors, control)
errors = json.loads(form.errors.as_json(escape_html=True))
control['__all__'][0]['message'] = '<p>Non-field error.</p>'
self.assertEqual(errors, control)
def test_error_list(self):
e = ErrorList()
e.append('Foo')
e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'}))
self.assertIsInstance(e, list)
self.assertIn('Foo', e)
self.assertIn('Foo', forms.ValidationError(e))
self.assertEqual(
e.as_text(),
'* Foo\n* Foobar'
)
self.assertEqual(
e.as_ul(),
'<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>'
)
self.assertEqual(
json.loads(e.as_json()),
[{"message": "Foo", "code": ""}, {"message": "Foobar", "code": "foobar"}]
)
def test_error_list_class_not_specified(self):
e = ErrorList()
e.append('Foo')
e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'}))
self.assertEqual(
e.as_ul(),
'<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>'
)
def test_error_list_class_has_one_class_specified(self):
e = ErrorList(error_class='foobar-error-class')
e.append('Foo')
e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'}))
self.assertEqual(
e.as_ul(),
'<ul class="errorlist foobar-error-class"><li>Foo</li><li>Foobar</li></ul>'
)
def test_error_list_with_hidden_field_errors_has_correct_class(self):
class Person(Form):
first_name = CharField()
last_name = CharField(widget=HiddenInput)
p = Person({'first_name': 'John'})
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist nonfield">
<li>(Hidden field last_name) This field is required.</li></ul></li><li>
<label for="id_first_name">First name:</label>
<input id="id_first_name" name="first_name" type="text" value="John" />
<input id="id_last_name" name="last_name" type="hidden" /></li>"""
)
self.assertHTMLEqual(
p.as_p(),
"""<ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul>
<p><label for="id_first_name">First name:</label>
<input id="id_first_name" name="first_name" type="text" value="John" />
<input id="id_last_name" name="last_name" type="hidden" /></p>"""
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><td colspan="2"><ul class="errorlist nonfield">
<li>(Hidden field last_name) This field is required.</li></ul></td></tr>
<tr><th><label for="id_first_name">First name:</label></th><td>
<input id="id_first_name" name="first_name" type="text" value="John" />
<input id="id_last_name" name="last_name" type="hidden" /></td></tr>"""
)
def test_error_list_with_non_field_errors_has_correct_class(self):
class Person(Form):
first_name = CharField()
last_name = CharField()
def clean(self):
raise ValidationError('Generic validation error')
p = Person({'first_name': 'John', 'last_name': 'Lennon'})
self.assertHTMLEqual(
str(p.non_field_errors()),
'<ul class="errorlist nonfield"><li>Generic validation error</li></ul>'
)
self.assertHTMLEqual(
p.as_ul(),
"""<li>
<ul class="errorlist nonfield"><li>Generic validation error</li></ul></li>
<li><label for="id_first_name">First name:</label>
<input id="id_first_name" name="first_name" type="text" value="John" /></li>
<li><label for="id_last_name">Last name:</label>
<input id="id_last_name" name="last_name" type="text" value="Lennon" /></li>"""
)
self.assertHTMLEqual(
p.non_field_errors().as_text(),
'* Generic validation error'
)
self.assertHTMLEqual(
p.as_p(),
"""<ul class="errorlist nonfield"><li>Generic validation error</li></ul>
<p><label for="id_first_name">First name:</label>
<input id="id_first_name" name="first_name" type="text" value="John" /></p>
<p><label for="id_last_name">Last name:</label>
<input id="id_last_name" name="last_name" type="text" value="Lennon" /></p>"""
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><td colspan="2"><ul class="errorlist nonfield"><li>Generic validation error</li></ul></td></tr>
<tr><th><label for="id_first_name">First name:</label></th><td>
<input id="id_first_name" name="first_name" type="text" value="John" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td>
<input id="id_last_name" name="last_name" type="text" value="Lennon" /></td></tr>"""
)
def test_errorlist_override(self):
@python_2_unicode_compatible
class DivErrorList(ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return '<div class="errorlist">%s</div>' % ''.join(
'<div class="error">%s</div>' % force_text(e) for e in self)
class CommentForm(Form):
name = CharField(max_length=50, required=False)
email = EmailField()
comment = CharField()
data = dict(email='invalid')
f = CommentForm(data, auto_id=False, error_class=DivErrorList)
self.assertHTMLEqual(f.as_p(), """<p>Name: <input type="text" name="name" maxlength="50" /></p>
<div class="errorlist"><div class="error">Enter a valid email address.</div></div>
<p>Email: <input type="email" name="email" value="invalid" /></p>
<div class="errorlist"><div class="error">This field is required.</div></div>
<p>Comment: <input type="text" name="comment" /></p>""")
def test_baseform_repr(self):
"""
BaseForm.__repr__() should contain some basic information about the
form.
"""
p = Person()
self.assertEqual(repr(p), "<Person bound=False, valid=Unknown, fields=(first_name;last_name;birthday)>")
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'})
self.assertEqual(repr(p), "<Person bound=True, valid=Unknown, fields=(first_name;last_name;birthday)>")
p.is_valid()
self.assertEqual(repr(p), "<Person bound=True, valid=True, fields=(first_name;last_name;birthday)>")
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': 'fakedate'})
p.is_valid()
self.assertEqual(repr(p), "<Person bound=True, valid=False, fields=(first_name;last_name;birthday)>")
def test_baseform_repr_dont_trigger_validation(self):
"""
BaseForm.__repr__() shouldn't trigger the form validation.
"""
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': 'fakedate'})
repr(p)
self.assertRaises(AttributeError, lambda: p.cleaned_data)
self.assertFalse(p.is_valid())
self.assertEqual(p.cleaned_data, {'first_name': 'John', 'last_name': 'Lennon'})
def test_accessing_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
if not self.errors:
data['username'] = data['username'].lower()
return data
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_nothing_returned(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
self.cleaned_data['username'] = self.cleaned_data['username'].lower()
# don't return anything
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_in_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
# Return a different dict. We have not changed self.cleaned_data.
return {
'username': data['username'].lower(),
'password': 'this_is_not_a_secret',
}
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_multipart_encoded_form(self):
class FormWithoutFile(Form):
username = CharField()
class FormWithFile(Form):
username = CharField()
file = FileField()
class FormWithImage(Form):
image = ImageField()
self.assertFalse(FormWithoutFile().is_multipart())
self.assertTrue(FormWithFile().is_multipart())
self.assertTrue(FormWithImage().is_multipart())
def test_html_safe(self):
class SimpleForm(Form):
username = CharField()
form = SimpleForm()
self.assertTrue(hasattr(SimpleForm, '__html__'))
self.assertEqual(force_text(form), form.__html__())
self.assertTrue(hasattr(form['username'], '__html__'))
self.assertEqual(force_text(form['username']), form['username'].__html__())
| bsd-3-clause | 3,051,549,394,321,279,000 | 45.806794 | 122 | 0.588762 | false |
google/googleapps-message-recall | message_recall/lib/oauth2client/django_orm.py | 261 | 3833 | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utilities for Django.
Utilities for using OAuth 2.0 in conjunction with
the Django datastore.
"""
__author__ = '[email protected] (Joe Gregorio)'
import oauth2client
import base64
import pickle
from django.db import models
from oauth2client.client import Storage as BaseStorage
class CredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
if 'null' not in kwargs:
kwargs['null'] = True
super(CredentialsField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class FlowField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
if 'null' not in kwargs:
kwargs['null'] = True
super(FlowField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Flow):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from
the datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credential = None
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
args = {self.key_name: self.key_value}
entity = self.model_class(**args)
setattr(entity, self.property_name, credentials)
entity.save()
def locked_delete(self):
"""Delete Credentials from the datastore."""
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query).delete()
| apache-2.0 | -1,595,591,232,636,126,200 | 27.604478 | 80 | 0.694234 | false |
yfdyh000/olympia | apps/amo/cron.py | 14 | 8565 | import itertools
from datetime import datetime, timedelta
from subprocess import Popen, PIPE
from django.conf import settings
from django.utils import translation
from django.db import connection, transaction
import cronjobs
import commonware.log
import amo
from amo.utils import chunked
from amo.helpers import user_media_path
from bandwagon.models import Collection
from constants.base import VALID_STATUSES
from devhub.models import ActivityLog
from lib.es.utils import raise_if_reindex_in_progress
from sharing import SERVICES_LIST, LOCAL_SERVICES_LIST
from stats.models import AddonShareCount, Contribution
from . import tasks
log = commonware.log.getLogger('z.cron')
@cronjobs.register
def gc(test_result=True):
"""Site-wide garbage collections."""
def days_ago(days):
return datetime.today() - timedelta(days=days)
log.debug('Collecting data to delete')
logs = (ActivityLog.objects.filter(created__lt=days_ago(90))
.exclude(action__in=amo.LOG_KEEP).values_list('id', flat=True))
# Paypal only keeps retrying to verify transactions for up to 3 days. If we
# still have an unverified transaction after 6 days, we might as well get
# rid of it.
contributions_to_delete = (
Contribution.objects
.filter(transaction_id__isnull=True, created__lt=days_ago(6))
.values_list('id', flat=True))
collections_to_delete = (
Collection.objects.filter(created__lt=days_ago(2),
type=amo.COLLECTION_ANONYMOUS)
.values_list('id', flat=True))
for chunk in chunked(logs, 100):
tasks.delete_logs.delay(chunk)
for chunk in chunked(contributions_to_delete, 100):
tasks.delete_stale_contributions.delay(chunk)
for chunk in chunked(collections_to_delete, 100):
tasks.delete_anonymous_collections.delay(chunk)
# Incomplete addons cannot be deleted here because when an addon is
# rejected during a review it is marked as incomplete. See bug 670295.
log.debug('Cleaning up sharing services.')
service_names = [s.shortname for s in SERVICES_LIST]
# collect local service names
original_language = translation.get_language()
for language in settings.LANGUAGES:
translation.activate(language)
service_names.extend([unicode(s.shortname)
for s in LOCAL_SERVICES_LIST])
translation.activate(original_language)
AddonShareCount.objects.exclude(service__in=set(service_names)).delete()
log.debug('Cleaning up test results extraction cache.')
# lol at check for '/'
if settings.MEDIA_ROOT and settings.MEDIA_ROOT != '/':
cmd = ('find', settings.MEDIA_ROOT, '-maxdepth', '1', '-name',
'validate-*', '-mtime', '+7', '-type', 'd',
'-exec', 'rm', '-rf', "{}", ';')
output = Popen(cmd, stdout=PIPE).communicate()[0]
for line in output.split("\n"):
log.debug(line)
else:
log.warning('MEDIA_ROOT not defined.')
if user_media_path('collection_icons'):
log.debug('Cleaning up uncompressed icons.')
cmd = ('find', user_media_path('collection_icons'),
'-name', '*__unconverted', '-mtime', '+1', '-type', 'f',
'-exec', 'rm', '{}', ';')
output = Popen(cmd, stdout=PIPE).communicate()[0]
for line in output.split("\n"):
log.debug(line)
USERPICS_PATH = user_media_path('userpics')
if USERPICS_PATH:
log.debug('Cleaning up uncompressed userpics.')
cmd = ('find', USERPICS_PATH,
'-name', '*__unconverted', '-mtime', '+1', '-type', 'f',
'-exec', 'rm', '{}', ';')
output = Popen(cmd, stdout=PIPE).communicate()[0]
for line in output.split("\n"):
log.debug(line)
@cronjobs.register
def category_totals():
"""
Update category counts for sidebar navigation.
"""
log.debug('Starting category counts update...')
p = ",".join(['%s'] * len(VALID_STATUSES))
cursor = connection.cursor()
cursor.execute("""
UPDATE categories AS t INNER JOIN (
SELECT at.category_id, COUNT(DISTINCT Addon.id) AS ct
FROM addons AS Addon
INNER JOIN versions AS Version ON (Addon.id = Version.addon_id)
INNER JOIN applications_versions AS av ON (av.version_id = Version.id)
INNER JOIN addons_categories AS at ON (at.addon_id = Addon.id)
INNER JOIN files AS File ON (Version.id = File.version_id
AND File.status IN (%s))
WHERE Addon.status IN (%s) AND Addon.inactive = 0
GROUP BY at.category_id)
AS j ON (t.id = j.category_id)
SET t.count = j.ct
""" % (p, p), VALID_STATUSES * 2)
transaction.commit_unless_managed()
@cronjobs.register
def collection_subscribers():
"""
Collection weekly and monthly subscriber counts.
"""
log.debug('Starting collection subscriber update...')
cursor = connection.cursor()
cursor.execute("""
UPDATE collections SET weekly_subscribers = 0, monthly_subscribers = 0
""")
cursor.execute("""
UPDATE collections AS c
INNER JOIN (
SELECT
COUNT(collection_id) AS count,
collection_id
FROM collection_subscriptions
WHERE created >= DATE_SUB(CURDATE(), INTERVAL 7 DAY)
GROUP BY collection_id
) AS weekly ON (c.id = weekly.collection_id)
INNER JOIN (
SELECT
COUNT(collection_id) AS count,
collection_id
FROM collection_subscriptions
WHERE created >= DATE_SUB(CURDATE(), INTERVAL 31 DAY)
GROUP BY collection_id
) AS monthly ON (c.id = monthly.collection_id)
SET c.weekly_subscribers = weekly.count,
c.monthly_subscribers = monthly.count
""")
transaction.commit_unless_managed()
@cronjobs.register
def unconfirmed():
"""
Delete user accounts that have not been confirmed for two weeks.
"""
log.debug("Removing user accounts that haven't been confirmed "
"for two weeks...")
cursor = connection.cursor()
cursor.execute("""
DELETE users
FROM users
LEFT JOIN addons_users on users.id = addons_users.user_id
LEFT JOIN addons_collections ON users.id=addons_collections.user_id
LEFT JOIN collections_users ON users.id=collections_users.user_id
WHERE users.created < DATE_SUB(CURDATE(), INTERVAL 2 WEEK)
AND users.confirmationcode != ''
AND addons_users.user_id IS NULL
AND addons_collections.user_id IS NULL
AND collections_users.user_id IS NULL
""")
transaction.commit_unless_managed()
@cronjobs.register
def share_count_totals():
"""
Sum share counts for each addon & service.
"""
cursor = connection.cursor()
cursor.execute("""
REPLACE INTO stats_share_counts_totals (addon_id, service, count)
(SELECT addon_id, service, SUM(count)
FROM stats_share_counts
RIGHT JOIN addons ON addon_id = addons.id
WHERE service IN (%s)
GROUP BY addon_id, service)
""" % ','.join(['%s'] * len(SERVICES_LIST)),
[s.shortname for s in SERVICES_LIST])
transaction.commit_unless_managed()
@cronjobs.register
def weekly_downloads():
"""
Update 7-day add-on download counts.
"""
raise_if_reindex_in_progress('amo')
cursor = connection.cursor()
cursor.execute("""
SELECT addon_id, SUM(count) AS weekly_count
FROM download_counts
WHERE `date` >= DATE_SUB(CURDATE(), INTERVAL 7 DAY)
GROUP BY addon_id
ORDER BY addon_id""")
counts = cursor.fetchall()
addon_ids = [r[0] for r in counts]
if not addon_ids:
return
cursor.execute("""
SELECT id, 0
FROM addons
WHERE id NOT IN %s""", (addon_ids,))
counts += cursor.fetchall()
cursor.execute("""
CREATE TEMPORARY TABLE tmp_wd
(addon_id INT PRIMARY KEY, count INT)""")
cursor.execute('INSERT INTO tmp_wd VALUES %s' %
','.join(['(%s,%s)'] * len(counts)),
list(itertools.chain(*counts)))
cursor.execute("""
UPDATE addons INNER JOIN tmp_wd
ON addons.id = tmp_wd.addon_id
SET weeklydownloads = tmp_wd.count""")
cursor.execute("DROP TABLE IF EXISTS tmp_wd")
transaction.commit_unless_managed()
| bsd-3-clause | 3,741,104,273,846,964,000 | 33.817073 | 79 | 0.618447 | false |
wavefrontHQ/python-client | wavefront_api_client/models/module_layer.py | 1 | 2765 | # coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ModuleLayer(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""ModuleLayer - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ModuleLayer, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ModuleLayer):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| apache-2.0 | 9,094,145,418,465,221,000 | 30.781609 | 409 | 0.56528 | false |
tsabi/Odoo-tsabi-fixes | addons/l10n_bo/__openerp__.py | 114 | 1779 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Bolivia Localization Chart Account",
"version": "1.0",
"description": """
Bolivian accounting chart and tax localization.
Plan contable boliviano e impuestos de acuerdo a disposiciones vigentes
""",
"author": "Cubic ERP",
"website": "http://cubicERP.com",
"category": "Localization/Account Charts",
"depends": [
"account_chart",
],
"data":[
"account_tax_code.xml",
"l10n_bo_chart.xml",
"account_tax.xml",
"l10n_bo_wizard.xml",
],
"demo_xml": [
],
"data": [
],
"active": False,
"installable": True,
"certificate" : "",
'images': ['images/config_chart_l10n_bo.jpeg','images/l10n_bo_chart.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 8,123,427,026,190,797,000 | 33.192308 | 79 | 0.598089 | false |
alexgorban/models | research/vid2depth/model.py | 5 | 16378 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Build model for inference or training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import nets
from ops import icp_grad # pylint: disable=unused-import
from ops.icp_op import icp
import project
import reader
import tensorflow as tf
import util
gfile = tf.gfile
slim = tf.contrib.slim
NUM_SCALES = 4
class Model(object):
"""Model code from SfMLearner."""
def __init__(self,
data_dir=None,
is_training=True,
learning_rate=0.0002,
beta1=0.9,
reconstr_weight=0.85,
smooth_weight=0.05,
ssim_weight=0.15,
icp_weight=0.0,
batch_size=4,
img_height=128,
img_width=416,
seq_length=3,
legacy_mode=False):
self.data_dir = data_dir
self.is_training = is_training
self.learning_rate = learning_rate
self.reconstr_weight = reconstr_weight
self.smooth_weight = smooth_weight
self.ssim_weight = ssim_weight
self.icp_weight = icp_weight
self.beta1 = beta1
self.batch_size = batch_size
self.img_height = img_height
self.img_width = img_width
self.seq_length = seq_length
self.legacy_mode = legacy_mode
logging.info('data_dir: %s', data_dir)
logging.info('learning_rate: %s', learning_rate)
logging.info('beta1: %s', beta1)
logging.info('smooth_weight: %s', smooth_weight)
logging.info('ssim_weight: %s', ssim_weight)
logging.info('icp_weight: %s', icp_weight)
logging.info('batch_size: %s', batch_size)
logging.info('img_height: %s', img_height)
logging.info('img_width: %s', img_width)
logging.info('seq_length: %s', seq_length)
logging.info('legacy_mode: %s', legacy_mode)
if self.is_training:
self.reader = reader.DataReader(self.data_dir, self.batch_size,
self.img_height, self.img_width,
self.seq_length, NUM_SCALES)
self.build_train_graph()
else:
self.build_depth_test_graph()
self.build_egomotion_test_graph()
# At this point, the model is ready. Print some info on model params.
util.count_parameters()
def build_train_graph(self):
self.build_inference_for_training()
self.build_loss()
self.build_train_op()
self.build_summaries()
def build_inference_for_training(self):
"""Invokes depth and ego-motion networks and computes clouds if needed."""
(self.image_stack, self.intrinsic_mat, self.intrinsic_mat_inv) = (
self.reader.read_data())
with tf.name_scope('egomotion_prediction'):
self.egomotion, _ = nets.egomotion_net(self.image_stack, is_training=True,
legacy_mode=self.legacy_mode)
with tf.variable_scope('depth_prediction'):
# Organized by ...[i][scale]. Note that the order is flipped in
# variables in build_loss() below.
self.disp = {}
self.depth = {}
if self.icp_weight > 0:
self.cloud = {}
for i in range(self.seq_length):
image = self.image_stack[:, :, :, 3 * i:3 * (i + 1)]
multiscale_disps_i, _ = nets.disp_net(image, is_training=True)
multiscale_depths_i = [1.0 / d for d in multiscale_disps_i]
self.disp[i] = multiscale_disps_i
self.depth[i] = multiscale_depths_i
if self.icp_weight > 0:
multiscale_clouds_i = [
project.get_cloud(d,
self.intrinsic_mat_inv[:, s, :, :],
name='cloud%d_%d' % (s, i))
for (s, d) in enumerate(multiscale_depths_i)
]
self.cloud[i] = multiscale_clouds_i
# Reuse the same depth graph for all images.
tf.get_variable_scope().reuse_variables()
logging.info('disp: %s', util.info(self.disp))
def build_loss(self):
"""Adds ops for computing loss."""
with tf.name_scope('compute_loss'):
self.reconstr_loss = 0
self.smooth_loss = 0
self.ssim_loss = 0
self.icp_transform_loss = 0
self.icp_residual_loss = 0
# self.images is organized by ...[scale][B, h, w, seq_len * 3].
self.images = [{} for _ in range(NUM_SCALES)]
# Following nested lists are organized by ...[scale][source-target].
self.warped_image = [{} for _ in range(NUM_SCALES)]
self.warp_mask = [{} for _ in range(NUM_SCALES)]
self.warp_error = [{} for _ in range(NUM_SCALES)]
self.ssim_error = [{} for _ in range(NUM_SCALES)]
self.icp_transform = [{} for _ in range(NUM_SCALES)]
self.icp_residual = [{} for _ in range(NUM_SCALES)]
self.middle_frame_index = util.get_seq_middle(self.seq_length)
# Compute losses at each scale.
for s in range(NUM_SCALES):
# Scale image stack.
height_s = int(self.img_height / (2**s))
width_s = int(self.img_width / (2**s))
self.images[s] = tf.image.resize_area(self.image_stack,
[height_s, width_s])
# Smoothness.
if self.smooth_weight > 0:
for i in range(self.seq_length):
# In legacy mode, use the depth map from the middle frame only.
if not self.legacy_mode or i == self.middle_frame_index:
self.smooth_loss += 1.0 / (2**s) * self.depth_smoothness(
self.disp[i][s], self.images[s][:, :, :, 3 * i:3 * (i + 1)])
for i in range(self.seq_length):
for j in range(self.seq_length):
# Only consider adjacent frames.
if i == j or abs(i - j) != 1:
continue
# In legacy mode, only consider the middle frame as target.
if self.legacy_mode and j != self.middle_frame_index:
continue
source = self.images[s][:, :, :, 3 * i:3 * (i + 1)]
target = self.images[s][:, :, :, 3 * j:3 * (j + 1)]
target_depth = self.depth[j][s]
key = '%d-%d' % (i, j)
# Extract ego-motion from i to j
egomotion_index = min(i, j)
egomotion_mult = 1
if i > j:
# Need to inverse egomotion when going back in sequence.
egomotion_mult *= -1
# For compatiblity with SfMLearner, interpret all egomotion vectors
# as pointing toward the middle frame. Note that unlike SfMLearner,
# each vector captures the motion to/from its next frame, and not
# the center frame. Although with seq_length == 3, there is no
# difference.
if self.legacy_mode:
if egomotion_index >= self.middle_frame_index:
egomotion_mult *= -1
egomotion = egomotion_mult * self.egomotion[:, egomotion_index, :]
# Inverse warp the source image to the target image frame for
# photometric consistency loss.
self.warped_image[s][key], self.warp_mask[s][key] = (
project.inverse_warp(source,
target_depth,
egomotion,
self.intrinsic_mat[:, s, :, :],
self.intrinsic_mat_inv[:, s, :, :]))
# Reconstruction loss.
self.warp_error[s][key] = tf.abs(self.warped_image[s][key] - target)
self.reconstr_loss += tf.reduce_mean(
self.warp_error[s][key] * self.warp_mask[s][key])
# SSIM.
if self.ssim_weight > 0:
self.ssim_error[s][key] = self.ssim(self.warped_image[s][key],
target)
# TODO(rezama): This should be min_pool2d().
ssim_mask = slim.avg_pool2d(self.warp_mask[s][key], 3, 1, 'VALID')
self.ssim_loss += tf.reduce_mean(
self.ssim_error[s][key] * ssim_mask)
# 3D loss.
if self.icp_weight > 0:
cloud_a = self.cloud[j][s]
cloud_b = self.cloud[i][s]
self.icp_transform[s][key], self.icp_residual[s][key] = icp(
cloud_a, egomotion, cloud_b)
self.icp_transform_loss += 1.0 / (2**s) * tf.reduce_mean(
tf.abs(self.icp_transform[s][key]))
self.icp_residual_loss += 1.0 / (2**s) * tf.reduce_mean(
tf.abs(self.icp_residual[s][key]))
self.total_loss = self.reconstr_weight * self.reconstr_loss
if self.smooth_weight > 0:
self.total_loss += self.smooth_weight * self.smooth_loss
if self.ssim_weight > 0:
self.total_loss += self.ssim_weight * self.ssim_loss
if self.icp_weight > 0:
self.total_loss += self.icp_weight * (self.icp_transform_loss +
self.icp_residual_loss)
def gradient_x(self, img):
return img[:, :, :-1, :] - img[:, :, 1:, :]
def gradient_y(self, img):
return img[:, :-1, :, :] - img[:, 1:, :, :]
def depth_smoothness(self, depth, img):
"""Computes image-aware depth smoothness loss."""
depth_dx = self.gradient_x(depth)
depth_dy = self.gradient_y(depth)
image_dx = self.gradient_x(img)
image_dy = self.gradient_y(img)
weights_x = tf.exp(-tf.reduce_mean(tf.abs(image_dx), 3, keepdims=True))
weights_y = tf.exp(-tf.reduce_mean(tf.abs(image_dy), 3, keepdims=True))
smoothness_x = depth_dx * weights_x
smoothness_y = depth_dy * weights_y
return tf.reduce_mean(abs(smoothness_x)) + tf.reduce_mean(abs(smoothness_y))
def ssim(self, x, y):
"""Computes a differentiable structured image similarity measure."""
c1 = 0.01**2
c2 = 0.03**2
mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')
mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')
sigma_x = slim.avg_pool2d(x**2, 3, 1, 'VALID') - mu_x**2
sigma_y = slim.avg_pool2d(y**2, 3, 1, 'VALID') - mu_y**2
sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y
ssim_n = (2 * mu_x * mu_y + c1) * (2 * sigma_xy + c2)
ssim_d = (mu_x**2 + mu_y**2 + c1) * (sigma_x + sigma_y + c2)
ssim = ssim_n / ssim_d
return tf.clip_by_value((1 - ssim) / 2, 0, 1)
def build_train_op(self):
with tf.name_scope('train_op'):
optim = tf.train.AdamOptimizer(self.learning_rate, self.beta1)
self.train_op = slim.learning.create_train_op(self.total_loss, optim)
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.incr_global_step = tf.assign(self.global_step, self.global_step + 1)
def build_summaries(self):
"""Adds scalar and image summaries for TensorBoard."""
tf.summary.scalar('total_loss', self.total_loss)
tf.summary.scalar('reconstr_loss', self.reconstr_loss)
if self.smooth_weight > 0:
tf.summary.scalar('smooth_loss', self.smooth_loss)
if self.ssim_weight > 0:
tf.summary.scalar('ssim_loss', self.ssim_loss)
if self.icp_weight > 0:
tf.summary.scalar('icp_transform_loss', self.icp_transform_loss)
tf.summary.scalar('icp_residual_loss', self.icp_residual_loss)
for i in range(self.seq_length - 1):
tf.summary.histogram('tx%d' % i, self.egomotion[:, i, 0])
tf.summary.histogram('ty%d' % i, self.egomotion[:, i, 1])
tf.summary.histogram('tz%d' % i, self.egomotion[:, i, 2])
tf.summary.histogram('rx%d' % i, self.egomotion[:, i, 3])
tf.summary.histogram('ry%d' % i, self.egomotion[:, i, 4])
tf.summary.histogram('rz%d' % i, self.egomotion[:, i, 5])
for s in range(NUM_SCALES):
for i in range(self.seq_length):
tf.summary.image('scale%d_image%d' % (s, i),
self.images[s][:, :, :, 3 * i:3 * (i + 1)])
if i in self.depth:
tf.summary.histogram('scale%d_depth%d' % (s, i), self.depth[i][s])
tf.summary.histogram('scale%d_disp%d' % (s, i), self.disp[i][s])
tf.summary.image('scale%d_disparity%d' % (s, i), self.disp[i][s])
for key in self.warped_image[s]:
tf.summary.image('scale%d_warped_image%s' % (s, key),
self.warped_image[s][key])
tf.summary.image('scale%d_warp_mask%s' % (s, key),
self.warp_mask[s][key])
tf.summary.image('scale%d_warp_error%s' % (s, key),
self.warp_error[s][key])
if self.ssim_weight > 0:
tf.summary.image('scale%d_ssim_error%s' % (s, key),
self.ssim_error[s][key])
if self.icp_weight > 0:
tf.summary.image('scale%d_icp_residual%s' % (s, key),
self.icp_residual[s][key])
transform = self.icp_transform[s][key]
tf.summary.histogram('scale%d_icp_tx%s' % (s, key), transform[:, 0])
tf.summary.histogram('scale%d_icp_ty%s' % (s, key), transform[:, 1])
tf.summary.histogram('scale%d_icp_tz%s' % (s, key), transform[:, 2])
tf.summary.histogram('scale%d_icp_rx%s' % (s, key), transform[:, 3])
tf.summary.histogram('scale%d_icp_ry%s' % (s, key), transform[:, 4])
tf.summary.histogram('scale%d_icp_rz%s' % (s, key), transform[:, 5])
def build_depth_test_graph(self):
"""Builds depth model reading from placeholders."""
with tf.name_scope('depth_prediction'):
with tf.variable_scope('depth_prediction'):
input_uint8 = tf.placeholder(
tf.uint8, [self.batch_size, self.img_height, self.img_width, 3],
name='raw_input')
input_float = tf.image.convert_image_dtype(input_uint8, tf.float32)
# TODO(rezama): Retrain published model with batchnorm params and set
# is_training to False.
est_disp, _ = nets.disp_net(input_float, is_training=True)
est_depth = 1.0 / est_disp[0]
self.inputs_depth = input_uint8
self.est_depth = est_depth
def build_egomotion_test_graph(self):
"""Builds egomotion model reading from placeholders."""
input_uint8 = tf.placeholder(
tf.uint8,
[self.batch_size, self.img_height, self.img_width * self.seq_length, 3],
name='raw_input')
input_float = tf.image.convert_image_dtype(input_uint8, tf.float32)
image_seq = input_float
image_stack = self.unpack_image_batches(image_seq)
with tf.name_scope('egomotion_prediction'):
# TODO(rezama): Retrain published model with batchnorm params and set
# is_training to False.
egomotion, _ = nets.egomotion_net(image_stack, is_training=True,
legacy_mode=self.legacy_mode)
self.inputs_egomotion = input_uint8
self.est_egomotion = egomotion
def unpack_image_batches(self, image_seq):
"""[B, h, w * seq_length, 3] -> [B, h, w, 3 * seq_length]."""
with tf.name_scope('unpack_images'):
image_list = [
image_seq[:, :, i * self.img_width:(i + 1) * self.img_width, :]
for i in range(self.seq_length)
]
image_stack = tf.concat(image_list, axis=3)
image_stack.set_shape([
self.batch_size, self.img_height, self.img_width, self.seq_length * 3
])
return image_stack
def inference(self, inputs, sess, mode):
"""Runs depth or egomotion inference from placeholders."""
fetches = {}
if mode == 'depth':
fetches['depth'] = self.est_depth
inputs_ph = self.inputs_depth
if mode == 'egomotion':
fetches['egomotion'] = self.est_egomotion
inputs_ph = self.inputs_egomotion
results = sess.run(fetches, feed_dict={inputs_ph: inputs})
return results
| apache-2.0 | -378,245,796,341,218,940 | 41.986877 | 80 | 0.572231 | false |
lastweek/gem5 | src/arch/x86/isa/insts/simd64/integer/arithmetic/addition.py | 91 | 4737 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PADDB_MMX_MMX {
maddi mmx, mmx, mmxm, size=1, ext=0
};
def macroop PADDB_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=1, ext=0
};
def macroop PADDB_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=1, ext=0
};
def macroop PADDW_MMX_MMX {
maddi mmx, mmx, mmxm, size=2, ext=0
};
def macroop PADDW_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=2, ext=0
};
def macroop PADDW_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=2, ext=0
};
def macroop PADDD_MMX_MMX {
maddi mmx, mmx, mmxm, size=4, ext=0
};
def macroop PADDD_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=4, ext=0
};
def macroop PADDD_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=4, ext=0
};
def macroop PADDQ_MMX_MMX {
maddi mmx, mmx, mmxm, size=8, ext=0
};
def macroop PADDQ_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=8, ext=0
};
def macroop PADDQ_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=8, ext=0
};
def macroop PADDSB_MMX_MMX {
maddi mmx, mmx, mmxm, size=1, ext = "2 |" + Signed
};
def macroop PADDSB_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=1, ext = "2 |" + Signed
};
def macroop PADDSB_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=1, ext = "2 |" + Signed
};
def macroop PADDSW_MMX_MMX {
maddi mmx, mmx, mmxm, size=2, ext = "2 |" + Signed
};
def macroop PADDSW_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=2, ext = "2 |" + Signed
};
def macroop PADDSW_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=2, ext = "2 |" + Signed
};
def macroop PADDUSB_MMX_MMX {
maddi mmx, mmx, mmxm, size=1, ext=2
};
def macroop PADDUSB_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=1, ext=2
};
def macroop PADDUSB_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=1, ext=2
};
def macroop PADDUSW_MMX_MMX {
maddi mmx, mmx, mmxm, size=2, ext=2
};
def macroop PADDUSW_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=2, ext=2
};
def macroop PADDUSW_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=2, ext=2
};
'''
| bsd-3-clause | -7,121,462,339,753,888,000 | 28.981013 | 72 | 0.698332 | false |
simon-pepin/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause | -1,563,828,953,288,922,000 | 33.10245 | 79 | 0.621016 | false |
JuliaSprenger/python-neo | neo/test/iotest/test_brainwaresrcio.py | 3 | 13068 | """
Tests of neo.io.brainwaresrcio
"""
import logging
import os.path
import unittest
import numpy as np
import quantities as pq
from neo.core import (Block, Event,
Group, Segment, SpikeTrain)
from neo.io import BrainwareSrcIO, brainwaresrcio
from neo.test.iotest.common_io_test import BaseTestIO
from neo.test.tools import (assert_same_sub_schema,
assert_neo_object_is_compliant)
from neo.test.iotest.tools import create_generic_reader
FILES_TO_TEST = ['block_300ms_4rep_1clust_part_ch1.src',
'block_500ms_5rep_empty_fullclust_ch1.src',
'block_500ms_5rep_empty_partclust_ch1.src',
'interleaved_500ms_5rep_ch2.src',
'interleaved_500ms_5rep_nospikes_ch1.src',
'interleaved_500ms_7rep_noclust_ch1.src',
'long_170s_1rep_1clust_ch2.src',
'multi_500ms_mulitrep_ch1.src',
'random_500ms_12rep_noclust_part_ch2.src',
'sequence_500ms_5rep_ch2.src']
FILES_TO_COMPARE = ['block_300ms_4rep_1clust_part_ch1',
'block_500ms_5rep_empty_fullclust_ch1',
'block_500ms_5rep_empty_partclust_ch1',
'interleaved_500ms_5rep_ch2',
'interleaved_500ms_5rep_nospikes_ch1',
'interleaved_500ms_7rep_noclust_ch1',
'',
'multi_500ms_mulitrep_ch1',
'random_500ms_12rep_noclust_part_ch2',
'sequence_500ms_5rep_ch2']
def proc_src(filename):
'''Load an src file that has already been processed by the official matlab
file converter. That matlab data is saved to an m-file, which is then
converted to a numpy '.npz' file. This numpy file is the file actually
loaded. This function converts it to a neo block and returns the block.
This block can be compared to the block produced by BrainwareSrcIO to
make sure BrainwareSrcIO is working properly
block = proc_src(filename)
filename: The file name of the numpy file to load. It should end with
'*_src_py?.npz'. This will be converted to a neo 'file_origin' property
with the value '*.src', so the filename to compare should fit that pattern.
'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
for the python 3 version of the numpy file.
example: filename = 'file1_src_py2.npz'
src file name = 'file1.src'
'''
with np.load(filename, allow_pickle=True) as srcobj:
srcfile = list(srcobj.items())[0][1]
filename = os.path.basename(filename[:-12] + '.src')
block = Block(file_origin=filename)
NChannels = srcfile['NChannels'][0, 0][0, 0]
side = str(srcfile['side'][0, 0][0])
ADperiod = srcfile['ADperiod'][0, 0][0, 0]
comm_seg = proc_src_comments(srcfile, filename)
block.segments.append(comm_seg)
all_units = proc_src_units(srcfile, filename)
block.groups.extend(all_units)
for rep in srcfile['sets'][0, 0].flatten():
proc_src_condition(rep, filename, ADperiod, side, block)
block.create_many_to_one_relationship()
return block
def proc_src_comments(srcfile, filename):
'''Get the comments in an src file that has been#!N
processed by the official
matlab function. See proc_src for details'''
comm_seg = Segment(name='Comments', file_origin=filename)
commentarray = srcfile['comments'].flatten()[0]
senders = [res[0] for res in commentarray['sender'].flatten()]
texts = [res[0] for res in commentarray['text'].flatten()]
timeStamps = [res[0, 0] for res in commentarray['timeStamp'].flatten()]
timeStamps = np.array(timeStamps, dtype=np.float32)
t_start = timeStamps.min()
timeStamps = pq.Quantity(timeStamps - t_start, units=pq.d).rescale(pq.s)
texts = np.array(texts, dtype='U')
senders = np.array(senders, dtype='S')
t_start = brainwaresrcio.convert_brainwaresrc_timestamp(t_start.tolist())
comments = Event(times=timeStamps, labels=texts, senders=senders)
comm_seg.events = [comments]
comm_seg.rec_datetime = t_start
return comm_seg
def proc_src_units(srcfile, filename):
'''Get the units in an src file that has been processed by the official
matlab function. See proc_src for details'''
all_units = []
un_unit = Group(name='UnassignedSpikes', file_origin=filename,
elliptic=[], boundaries=[], timestamp=[], max_valid=[])
all_units.append(un_unit)
sortInfo = srcfile['sortInfo'][0, 0]
timeslice = sortInfo['timeslice'][0, 0]
maxValid = timeslice['maxValid'][0, 0]
cluster = timeslice['cluster'][0, 0]
if len(cluster):
maxValid = maxValid[0, 0]
elliptic = [res.flatten() for res in cluster['elliptic'].flatten()]
boundaries = [res.flatten() for res in cluster['boundaries'].flatten()]
fullclust = zip(elliptic, boundaries)
for ielliptic, iboundaries in fullclust:
unit = Group(file_origin=filename,
boundaries=[iboundaries],
elliptic=[ielliptic], timeStamp=[],
max_valid=[maxValid])
all_units.append(unit)
return all_units
def proc_src_condition(rep, filename, ADperiod, side, block):
'''Get the condition in a src file that has been processed by the official
matlab function. See proc_src for details'''
stim = rep['stim'].flatten()
params = [str(res[0]) for res in stim['paramName'][0].flatten()]
values = [res for res in stim['paramVal'][0].flatten()]
stim = dict(zip(params, values))
sweepLen = rep['sweepLen'][0, 0]
if not len(rep):
return
unassignedSpikes = rep['unassignedSpikes'].flatten()
if len(unassignedSpikes):
damaIndexes = [res[0, 0] for res in unassignedSpikes['damaIndex']]
timeStamps = [res[0, 0] for res in unassignedSpikes['timeStamp']]
spikeunit = [res.flatten() for res in unassignedSpikes['spikes']]
respWin = np.array([], dtype=np.int32)
trains = proc_src_condition_unit(spikeunit, sweepLen, side, ADperiod,
respWin, damaIndexes, timeStamps,
filename)
block.groups[0].spiketrains.extend(trains)
atrains = [trains]
else:
damaIndexes = []
timeStamps = []
atrains = []
clusters = rep['clusters'].flatten()
if len(clusters):
IdStrings = [res[0] for res in clusters['IdString']]
sweepLens = [res[0, 0] for res in clusters['sweepLen']]
respWins = [res.flatten() for res in clusters['respWin']]
spikeunits = []
for cluster in clusters['sweeps']:
if len(cluster):
spikes = [res.flatten() for res in
cluster['spikes'].flatten()]
else:
spikes = []
spikeunits.append(spikes)
else:
IdStrings = []
sweepLens = []
respWins = []
spikeunits = []
for unit, IdString in zip(block.groups[1:], IdStrings):
unit.name = str(IdString)
fullunit = zip(spikeunits, block.groups[1:], sweepLens, respWins)
for spikeunit, unit, sweepLen, respWin in fullunit:
trains = proc_src_condition_unit(spikeunit, sweepLen, side, ADperiod,
respWin, damaIndexes, timeStamps,
filename)
atrains.append(trains)
unit.spiketrains.extend(trains)
atrains = zip(*atrains)
for trains in atrains:
segment = Segment(file_origin=filename, feature_type=-1,
go_by_closest_unit_center=False,
include_unit_bounds=False, **stim)
block.segments.append(segment)
segment.spiketrains = trains
def proc_src_condition_unit(spikeunit, sweepLen, side, ADperiod, respWin,
damaIndexes, timeStamps, filename):
'''Get the unit in a condition in a src file that has been processed by
the official matlab function. See proc_src for details'''
if not damaIndexes:
damaIndexes = [0] * len(spikeunit)
timeStamps = [0] * len(spikeunit)
trains = []
for sweep, damaIndex, timeStamp in zip(spikeunit, damaIndexes,
timeStamps):
timeStamp = brainwaresrcio.convert_brainwaresrc_timestamp(timeStamp)
train = proc_src_condition_unit_repetition(sweep, damaIndex,
timeStamp, sweepLen,
side, ADperiod, respWin,
filename)
trains.append(train)
return trains
def proc_src_condition_unit_repetition(sweep, damaIndex, timeStamp, sweepLen,
side, ADperiod, respWin, filename):
'''Get the repetion for a unit in a condition in a src file that has been
processed by the official matlab function. See proc_src for details'''
damaIndex = damaIndex.astype('int32')
if len(sweep):
times = np.array([res[0, 0] for res in sweep['time']])
shapes = np.concatenate([res.flatten()[np.newaxis][np.newaxis] for res
in sweep['shape']], axis=0)
trig2 = np.array([res[0, 0] for res in sweep['trig2']])
else:
times = np.array([])
shapes = np.array([[[]]])
trig2 = np.array([])
times = pq.Quantity(times, units=pq.ms, dtype=np.float32)
t_start = pq.Quantity(0, units=pq.ms, dtype=np.float32)
t_stop = pq.Quantity(sweepLen, units=pq.ms, dtype=np.float32)
trig2 = pq.Quantity(trig2, units=pq.ms, dtype=np.uint8)
waveforms = pq.Quantity(shapes, dtype=np.int8, units=pq.mV)
sampling_period = pq.Quantity(ADperiod, units=pq.us)
train = SpikeTrain(times=times, t_start=t_start, t_stop=t_stop,
trig2=trig2, dtype=np.float32, timestamp=timeStamp,
dama_index=damaIndex, side=side, copy=True,
respwin=respWin, waveforms=waveforms,
file_origin=filename)
train.annotations['side'] = side
train.sampling_period = sampling_period
return train
class BrainwareSrcIOTestCase(BaseTestIO, unittest.TestCase):
'''
Unit test testcase for neo.io.BrainwareSrcIO
'''
ioclass = BrainwareSrcIO
read_and_write_is_bijective = False
# These are the files it tries to read and test for compliance
files_to_test = FILES_TO_TEST
# these are reference files to compare to
files_to_compare = FILES_TO_COMPARE
# add the suffix
for i, fname in enumerate(files_to_compare):
if fname:
files_to_compare[i] += '_src_py3.npz'
# Will fetch from g-node if they don't already exist locally
# How does it know to do this before any of the other tests?
files_to_download = files_to_test + files_to_compare
def setUp(self):
super().setUp()
def test_reading_same(self):
for ioobj, path in self.iter_io_objects(return_path=True):
obj_reader_all = create_generic_reader(ioobj, readall=True)
obj_reader_base = create_generic_reader(ioobj, target=False)
obj_reader_next = create_generic_reader(ioobj, target='next_block')
obj_reader_single = create_generic_reader(ioobj)
obj_all = obj_reader_all()
obj_base = obj_reader_base()
obj_single = obj_reader_single()
obj_next = [obj_reader_next()]
while ioobj._isopen:
obj_next.append(obj_reader_next())
try:
assert_same_sub_schema(obj_all, obj_base)
assert_same_sub_schema(obj_all[0], obj_single)
assert_same_sub_schema(obj_all, obj_next)
except BaseException as exc:
exc.args += ('from ' + os.path.basename(path),)
raise
self.assertEqual(len(obj_all), len(obj_next))
def test_against_reference(self):
for filename, refname in zip(self.files_to_test,
self.files_to_compare):
if not refname:
continue
obj = self.read_file(filename=filename, readall=True)[0]
refobj = proc_src(self.get_filename_path(refname))
try:
assert_neo_object_is_compliant(obj)
assert_neo_object_is_compliant(refobj)
#assert_same_sub_schema(obj, refobj) # commented out until IO is adapted to use Group
except BaseException as exc:
exc.args += ('from ' + filename,)
raise
if __name__ == '__main__':
logger = logging.getLogger(BrainwareSrcIO.__module__ +
'.' +
BrainwareSrcIO.__name__)
logger.setLevel(100)
unittest.main()
| bsd-3-clause | -1,596,888,692,930,027,300 | 38.720365 | 102 | 0.595654 | false |
dennis-sheil/commandergenius | project/jni/python/src/Lib/test/test_heapq.py | 56 | 13195 | """Unittests for heapq."""
import random
import unittest
from test import test_support
import sys
# We do a bit of trickery here to be able to test both the C implementation
# and the Python implementation of the module.
# Make it impossible to import the C implementation anymore.
sys.modules['_heapq'] = 0
# We must also handle the case that heapq was imported before.
if 'heapq' in sys.modules:
del sys.modules['heapq']
# Now we can import the module and get the pure Python implementation.
import heapq as py_heapq
# Restore everything to normal.
del sys.modules['_heapq']
del sys.modules['heapq']
# This is now the module with the C implementation.
import heapq as c_heapq
class TestHeap(unittest.TestCase):
module = None
def test_push_pop(self):
# 1) Push 256 random numbers and pop them off, verifying all's OK.
heap = []
data = []
self.check_invariant(heap)
for i in range(256):
item = random.random()
data.append(item)
self.module.heappush(heap, item)
self.check_invariant(heap)
results = []
while heap:
item = self.module.heappop(heap)
self.check_invariant(heap)
results.append(item)
data_sorted = data[:]
data_sorted.sort()
self.assertEqual(data_sorted, results)
# 2) Check that the invariant holds for a sorted array
self.check_invariant(results)
self.assertRaises(TypeError, self.module.heappush, [])
try:
self.assertRaises(TypeError, self.module.heappush, None, None)
self.assertRaises(TypeError, self.module.heappop, None)
except AttributeError:
pass
def check_invariant(self, heap):
# Check the heap invariant.
for pos, item in enumerate(heap):
if pos: # pos 0 has no parent
parentpos = (pos-1) >> 1
self.assert_(heap[parentpos] <= item)
def test_heapify(self):
for size in range(30):
heap = [random.random() for dummy in range(size)]
self.module.heapify(heap)
self.check_invariant(heap)
self.assertRaises(TypeError, self.module.heapify, None)
def test_naive_nbest(self):
data = [random.randrange(2000) for i in range(1000)]
heap = []
for item in data:
self.module.heappush(heap, item)
if len(heap) > 10:
self.module.heappop(heap)
heap.sort()
self.assertEqual(heap, sorted(data)[-10:])
def heapiter(self, heap):
# An iterator returning a heap's elements, smallest-first.
try:
while 1:
yield self.module.heappop(heap)
except IndexError:
pass
def test_nbest(self):
# Less-naive "N-best" algorithm, much faster (if len(data) is big
# enough <wink>) than sorting all of data. However, if we had a max
# heap instead of a min heap, it could go faster still via
# heapify'ing all of data (linear time), then doing 10 heappops
# (10 log-time steps).
data = [random.randrange(2000) for i in range(1000)]
heap = data[:10]
self.module.heapify(heap)
for item in data[10:]:
if item > heap[0]: # this gets rarer the longer we run
self.module.heapreplace(heap, item)
self.assertEqual(list(self.heapiter(heap)), sorted(data)[-10:])
self.assertRaises(TypeError, self.module.heapreplace, None)
self.assertRaises(TypeError, self.module.heapreplace, None, None)
self.assertRaises(IndexError, self.module.heapreplace, [], None)
def test_nbest_with_pushpop(self):
data = [random.randrange(2000) for i in range(1000)]
heap = data[:10]
self.module.heapify(heap)
for item in data[10:]:
self.module.heappushpop(heap, item)
self.assertEqual(list(self.heapiter(heap)), sorted(data)[-10:])
self.assertEqual(self.module.heappushpop([], 'x'), 'x')
def test_heappushpop(self):
h = []
x = self.module.heappushpop(h, 10)
self.assertEqual((h, x), ([], 10))
h = [10]
x = self.module.heappushpop(h, 10.0)
self.assertEqual((h, x), ([10], 10.0))
self.assertEqual(type(h[0]), int)
self.assertEqual(type(x), float)
h = [10];
x = self.module.heappushpop(h, 9)
self.assertEqual((h, x), ([10], 9))
h = [10];
x = self.module.heappushpop(h, 11)
self.assertEqual((h, x), ([11], 10))
def test_heapsort(self):
# Exercise everything with repeated heapsort checks
for trial in xrange(100):
size = random.randrange(50)
data = [random.randrange(25) for i in range(size)]
if trial & 1: # Half of the time, use heapify
heap = data[:]
self.module.heapify(heap)
else: # The rest of the time, use heappush
heap = []
for item in data:
self.module.heappush(heap, item)
heap_sorted = [self.module.heappop(heap) for i in range(size)]
self.assertEqual(heap_sorted, sorted(data))
def test_merge(self):
inputs = []
for i in xrange(random.randrange(5)):
row = sorted(random.randrange(1000) for j in range(random.randrange(10)))
inputs.append(row)
self.assertEqual(sorted(chain(*inputs)), list(self.module.merge(*inputs)))
self.assertEqual(list(self.module.merge()), [])
def test_merge_stability(self):
class Int(int):
pass
inputs = [[], [], [], []]
for i in range(20000):
stream = random.randrange(4)
x = random.randrange(500)
obj = Int(x)
obj.pair = (x, stream)
inputs[stream].append(obj)
for stream in inputs:
stream.sort()
result = [i.pair for i in self.module.merge(*inputs)]
self.assertEqual(result, sorted(result))
def test_nsmallest(self):
data = [(random.randrange(2000), i) for i in range(1000)]
for f in (None, lambda x: x[0] * 547 % 2000):
for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100):
self.assertEqual(self.module.nsmallest(n, data), sorted(data)[:n])
self.assertEqual(self.module.nsmallest(n, data, key=f),
sorted(data, key=f)[:n])
def test_nlargest(self):
data = [(random.randrange(2000), i) for i in range(1000)]
for f in (None, lambda x: x[0] * 547 % 2000):
for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100):
self.assertEqual(self.module.nlargest(n, data),
sorted(data, reverse=True)[:n])
self.assertEqual(self.module.nlargest(n, data, key=f),
sorted(data, key=f, reverse=True)[:n])
class TestHeapPython(TestHeap):
module = py_heapq
class TestHeapC(TestHeap):
module = c_heapq
def test_comparison_operator(self):
# Issue 3501: Make sure heapq works with both __lt__ and __le__
def hsort(data, comp):
data = map(comp, data)
self.module.heapify(data)
return [self.module.heappop(data).x for i in range(len(data))]
class LT:
def __init__(self, x):
self.x = x
def __lt__(self, other):
return self.x > other.x
class LE:
def __init__(self, x):
self.x = x
def __le__(self, other):
return self.x >= other.x
data = [random.random() for i in range(100)]
target = sorted(data, reverse=True)
self.assertEqual(hsort(data, LT), target)
self.assertEqual(hsort(data, LE), target)
#==============================================================================
class LenOnly:
"Dummy sequence class defining __len__ but not __getitem__."
def __len__(self):
return 10
class GetOnly:
"Dummy sequence class defining __getitem__ but not __len__."
def __getitem__(self, ndx):
return 10
class CmpErr:
"Dummy element that always raises an error during comparison"
def __cmp__(self, other):
raise ZeroDivisionError
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def next(self):
raise StopIteration
from itertools import chain, imap
def L(seqn):
'Test multiple tiers of iterators'
return chain(imap(lambda x:x, R(Ig(G(seqn)))))
class TestErrorHandling(unittest.TestCase):
# only for C implementation
module = c_heapq
def test_non_sequence(self):
for f in (self.module.heapify, self.module.heappop):
self.assertRaises(TypeError, f, 10)
for f in (self.module.heappush, self.module.heapreplace,
self.module.nlargest, self.module.nsmallest):
self.assertRaises(TypeError, f, 10, 10)
def test_len_only(self):
for f in (self.module.heapify, self.module.heappop):
self.assertRaises(TypeError, f, LenOnly())
for f in (self.module.heappush, self.module.heapreplace):
self.assertRaises(TypeError, f, LenOnly(), 10)
for f in (self.module.nlargest, self.module.nsmallest):
self.assertRaises(TypeError, f, 2, LenOnly())
def test_get_only(self):
for f in (self.module.heapify, self.module.heappop):
self.assertRaises(TypeError, f, GetOnly())
for f in (self.module.heappush, self.module.heapreplace):
self.assertRaises(TypeError, f, GetOnly(), 10)
for f in (self.module.nlargest, self.module.nsmallest):
self.assertRaises(TypeError, f, 2, GetOnly())
def test_get_only(self):
seq = [CmpErr(), CmpErr(), CmpErr()]
for f in (self.module.heapify, self.module.heappop):
self.assertRaises(ZeroDivisionError, f, seq)
for f in (self.module.heappush, self.module.heapreplace):
self.assertRaises(ZeroDivisionError, f, seq, 10)
for f in (self.module.nlargest, self.module.nsmallest):
self.assertRaises(ZeroDivisionError, f, 2, seq)
def test_arg_parsing(self):
for f in (self.module.heapify, self.module.heappop,
self.module.heappush, self.module.heapreplace,
self.module.nlargest, self.module.nsmallest):
self.assertRaises(TypeError, f, 10)
def test_iterable_args(self):
for f in (self.module.nlargest, self.module.nsmallest):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, L, R):
self.assertEqual(f(2, g(s)), f(2,s))
self.assertEqual(f(2, S(s)), [])
self.assertRaises(TypeError, f, 2, X(s))
self.assertRaises(TypeError, f, 2, N(s))
self.assertRaises(ZeroDivisionError, f, 2, E(s))
#==============================================================================
def test_main(verbose=None):
from types import BuiltinFunctionType
test_classes = [TestHeapPython, TestHeapC, TestErrorHandling]
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
| lgpl-2.1 | -6,174,846,925,579,488,000 | 33.007732 | 85 | 0.56726 | false |
chromium/chromium | third_party/blink/web_tests/http/tests/websocket/expect-unfragmented_wsh.py | 7 | 1154 | # Read 32 messages and verify that they are not fragmented.
# This can be removed if the "reassemble small messages" feature is removed. See
# https://crbug.com/1086273.
from mod_pywebsocket import common
from mod_pywebsocket import msgutil
NUMBER_OF_MESSAGES = 32
def web_socket_do_extra_handshake(request):
# Disable permessage-deflate because it may reassemble messages.
request.ws_extension_processors = []
def web_socket_transfer_data(request):
for i in range(NUMBER_OF_MESSAGES):
# We need to use an internal function to verify that the frame has the
# "final" flag set.
opcode, recv_payload, final, reserved1, reserved2, reserved3 = \
request.ws_stream._receive_frame()
# We assume that the browser will not send any control messages.
if opcode != common.OPCODE_BINARY:
msgutil.send_message(request, 'FAIL: message %r was not opcode binary' % i)
return
if not final:
msgutil.send_message(request, 'FAIL: message %r was fragmented' % i)
return
msgutil.send_message(request, 'OK: message %r not fragmented' % i)
| bsd-3-clause | 366,798,977,242,418,050 | 35.0625 | 87 | 0.677643 | false |
smartfile/django-1.4 | django/conf/project_template/project_name/settings.py | 41 | 5380 | # Django settings for {{ project_name }} project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '{{ secret_key }}'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = '{{ project_name }}.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| bsd-3-clause | 1,032,081,707,042,921,900 | 33.709677 | 101 | 0.680112 | false |
soaplib/soaplib | examples/multiple_services.py | 1 | 1055 |
from classserializer import UserManager, User
from soaplib.core import Application
from soaplib.core.model.clazz import ClassModel
from soaplib.core.model.primitive import Integer, String
from soaplib.core.service import soap, DefinitionBase
from soaplib.core.server import wsgi
computer_database = {}
computerid_seq = 1
class Computer(ClassModel):
__namespace__ = "assets"
assetid = Integer
description = String
class ComputerManager(DefinitionBase):
@soap(Computer, _returns=Computer)
def add_computer(self, computer):
global computer_database
global computerid_seq
computer.assetid = computerid_seq
computerid_seq += 1
computer_database[computer.assetid] = computer
return computer.assetid
if __name__ == "__main__":
from wsgiref.simple_server import make_server
soap_app = Application([ComputerManager, UserManager],tns="itServices")
wsgi_app = wsgi.Application(soap_app)
server = make_server("localhost", 7789, wsgi_app)
server.serve_forever() | lgpl-2.1 | -6,641,727,588,634,941,000 | 24.756098 | 75 | 0.716588 | false |
alanljj/oca-partner-contact | account_partner_merge/partner_merge.py | 37 | 1703 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
class MergePartnerAutomatic(orm.TransientModel):
_inherit = 'base.partner.merge.automatic.wizard'
def _update_values(self, cr, uid, src_partners, dst_partner, context=None):
"""Make sure we don't forget to update the stored value of
invoice field commercial_partner_id
"""
super(MergePartnerAutomatic, self)._update_values(
cr, uid, src_partners, dst_partner, context=context
)
invoice_obj = self.pool.get('account.invoice')
invoice_ids = invoice_obj.search(
cr, uid, [('partner_id', '=', dst_partner.id)], context=context
)
# call write to refresh stored value
invoice_obj.write(cr, uid, invoice_ids, {}, context=context)
| agpl-3.0 | 4,508,805,494,636,485,600 | 41.575 | 79 | 0.616559 | false |
antonve/s4-project-mooc | lms/djangoapps/bulk_email/migrations/0008_add_course_authorizations.py | 114 | 6426 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseAuthorization'
db.create_table('bulk_email_courseauthorization', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('email_enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('bulk_email', ['CourseAuthorization'])
def backwards(self, orm):
# Deleting model 'CourseAuthorization'
db.delete_table('bulk_email_courseauthorization')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'bulk_email.courseauthorization': {
'Meta': {'object_name': 'CourseAuthorization'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'email_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'bulk_email.courseemail': {
'Meta': {'object_name': 'CourseEmail'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'html_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'text_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'to_option': ('django.db.models.fields.CharField', [], {'default': "'myself'", 'max_length': '64'})
},
'bulk_email.courseemailtemplate': {
'Meta': {'object_name': 'CourseEmailTemplate'},
'html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plain_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'bulk_email.optout': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'Optout'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bulk_email']
| agpl-3.0 | -5,952,370,044,820,813,000 | 66.642105 | 182 | 0.554466 | false |
robclark/xbmc | lib/libUPnP/Neptune/Extras/Tools/Logging/NeptuneLogConsole.py | 22 | 2839 | #!/usr/bin/env python
from socket import *
from optparse import OptionParser
UDP_ADDR = "0.0.0.0"
UDP_PORT = 7724
BUFFER_SIZE = 65536
#HEADER_KEYS = ['Logger', 'Level', 'Source-File', 'Source-Function', 'Source-Line', 'TimeStamp']
HEADER_KEYS = {
'mini': ('Level'),
'standard': ('Logger', 'Level', 'Source-Function'),
'long': ('Logger', 'Level', 'Source-File', 'Source-Line', 'Source-Function'),
'all': ('Logger', 'Level', 'Source-File', 'Source-Line', 'Source-Function', 'TimeStamp'),
'custom': ()
}
Senders = {}
class LogRecord:
def __init__(self, data):
offset = 0
self.headers = {}
for line in data.split("\r\n"):
offset += len(line)+2
if ':' not in line: break
key,value=line.split(":",1)
self.headers[key] = value.strip()
self.body = data[offset:]
def __getitem__(self, index):
return self.headers[index]
def format(self, sender_index, keys):
parts = ['['+str(sender_index)+']']
if 'Level' in keys:
parts.append('['+self.headers['Level']+']')
if 'Logger' in keys:
parts.append(self.headers['Logger'])
if 'Source-File' in keys:
if 'Source-Line' in keys:
parts.append(self.headers['Source-File']+':'+self.headers['Source-Line'])
else:
parts.append(self.headers['Source-File'])
if 'TimeStamp' in keys:
parts.append(self.headers['TimeStamp'])
if 'Source-Function' in keys:
parts.append(self.headers['Source-Function'])
parts.append(self.body)
return ' '.join(parts)
class Listener:
def __init__(self, format='standard', port=UDP_PORT):
self.socket = socket(AF_INET,SOCK_DGRAM)
self.socket.bind((UDP_ADDR, port))
self.format_keys = HEADER_KEYS[format]
def listen(self):
while True:
data,addr = self.socket.recvfrom(BUFFER_SIZE)
sender_index = len(Senders.keys())
if addr in Senders:
sender_index = Senders[addr]
else:
print "### NEW SENDER:", addr
Senders[addr] = sender_index
record = LogRecord(data)
print record.format(sender_index, self.format_keys)
### main
parser = OptionParser(usage="%prog [options]")
parser.add_option("-p", "--port", dest="port", help="port number to listen on", type="int", default=UDP_PORT)
parser.add_option("-f", "--format", dest="format", help="log format (mini, standard, long, or all)", choices=('mini', 'standard', 'long', 'all'), default='standard')
(options, args) = parser.parse_args()
print "Listening on port", options.port
l = Listener(format=options.format, port=options.port)
l.listen()
| gpl-2.0 | 5,449,301,233,682,222,000 | 34.4875 | 165 | 0.567101 | false |
kartikp1995/gnuradio | gr-analog/python/analog/qa_pll_freqdet.py | 40 | 6611 | #!/usr/bin/env python
#
# Copyright 2004,2007,2010-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
from gnuradio import gr, gr_unittest, analog, blocks
class test_pll_freqdet(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block()
def tearDown (self):
self.tb = None
def test_pll_freqdet(self):
expected_result = (0.0,
4.338889228818161e-08,
0.3776331578612825,
1.0993741049896133,
2.1332509128284287,
3.448827166947317,
5.017193050406445,
6.810936277840595,
8.804128662605573,
10.972292025122194,
13.292363360097312,
15.742678902380248,
18.302902979158944,
20.954030233328815,
23.678333003762834,
26.459293141999492,
29.2815901542755,
32.13105969864019,
34.99462836613535,
37.860284035876894,
40.71702547869386,
43.5548208542428,
46.364569172614004,
49.138038040003174,
51.86783994277676,
54.547378886619114,
57.17080592915505,
59.73298657053974,
62.229444428114014,
64.65634937843706,
67.01044048049889,
69.28902004673668,
71.48990028218192,
73.61137363954212,
75.65217724529884,
77.61146325478951,
79.48876920728905,
81.28396466515709,
82.9972452848542,
84.62912095897468,
86.18033873945902,
87.65188876657749,
89.0449983399466,
90.36106669970881,
91.6016768844999,
92.76854829957963,
93.86354857479924,
94.88865206171563,
95.84592204664062,
96.73751075064077,
97.56564154258655,
98.33257336525031,
99.04061259327368,
99.69208931723288,
100.28935141465512,
100.83475862103487,
101.33065881389933,
101.77937615484109,
102.18323480545271,
102.54452335342484,
102.8654948125462,
103.14836662270359,
103.39530879191456,
103.6084320383601,
103.78982336428665,
103.94148676616939,
104.06536695064705,
104.16337305045634,
104.23733119256288,
104.28900821409572,
104.32008794641274,
104.33220678900258,
104.32694185151738,
104.30578723783803,
104.27016590404165,
104.22144151636876,
104.16091845122337,
104.08982993720561,
104.00932619714447,
103.9205337379343,
103.82447234476369,
103.72213808688659,
103.6144440277858,
103.50225579907487,
103.38636788456353,
103.26755105212685,
103.14649306386876,
103.02383425002395,
102.90019122489248,
102.7761213129379,
102.65211069081985,
102.5286218192634,
102.40608158509168,
102.28486944325857,
102.16532927481605,
102.04778124488143,
101.93248622873554,
101.81969324369186,
101.70961573316195,
101.60243156665544)
sampling_freq = 10e3
freq = sampling_freq / 100
loop_bw = math.pi/100.0
maxf = 1
minf = -1
src = analog.sig_source_c(sampling_freq, analog.GR_COS_WAVE, freq, 1.0)
pll = analog.pll_freqdet_cf(loop_bw, maxf, minf)
head = blocks.head(gr.sizeof_float, int (freq))
dst = blocks.vector_sink_f()
self.tb.connect(src, pll, head)
self.tb.connect(head, dst)
self.tb.run()
dst_data = dst.data()
# convert it from normalized frequency to absolute frequency (Hz)
dst_data = [i*(sampling_freq/(2*math.pi)) for i in dst_data]
self.assertFloatTuplesAlmostEqual(expected_result, dst_data, 3)
if __name__ == '__main__':
gr_unittest.run(test_pll_freqdet, "test_pll_freqdet.xml")
| gpl-3.0 | 4,644,895,739,234,520,000 | 40.062112 | 79 | 0.439117 | false |
etos/django | docs/conf.py | 16 | 12406 | # Django documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 27 09:06:53 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't picklable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
from os.path import abspath, dirname, join
# Workaround for sphinx-build recursion limit overflow:
# pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# RuntimeError: maximum recursion depth exceeded while pickling an object
#
# Python's default allowed recursion depth is 1000 but this isn't enough for
# building docs/ref/settings.txt sometimes.
# https://groups.google.com/d/topic/sphinx-dev/MtRf64eGtv4/discussion
sys.setrecursionlimit(2000)
# Make sure we get the version of this copy of Django
sys.path.insert(1, dirname(dirname(abspath(__file__))))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(abspath(join(dirname(__file__), "_ext")))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3' # Actually 1.3.4, but micro versions aren't supported here.
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"djangodocs",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"ticket_role",
"cve_role",
]
# Spelling check needs an additional module that is not installed by default.
# Add it only if spelling check is requested so docs can be generated without it.
if 'spelling' in sys.argv:
extensions.append("sphinxcontrib.spelling")
# Workaround for https://bitbucket.org/dhellmann/sphinxcontrib-spelling/issues/13
html_use_smartypants = False
# Spelling language.
spelling_lang = 'en_US'
# Location of word list.
spelling_word_list_filename = 'spelling_wordlist'
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'Django'
copyright = 'Django Software Foundation and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
try:
from django import VERSION, get_version
except ImportError:
release = version
else:
def django_release():
pep440ver = get_version()
if VERSION[3:5] == ('alpha', 0) and 'dev' not in pep440ver:
return pep440ver + '.dev'
return pep440ver
release = django_release()
# The "development version" of Django
django_next_version = '2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# Location for .po/.mo translation files used when language is set
locale_dirs = ['locale/']
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# Links to Python's docs should reference the most recent version of the 3.x
# branch, which is located at this URL.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'sphinx': ('http://sphinx-doc.org/', None),
'psycopg2': ('http://initd.org/psycopg/docs/', None),
}
# Python's docs don't change every week.
intersphinx_cache_limit = 90 # days
# The 'versionadded' and 'versionchanged' directives are overridden.
suppress_warnings = ['app.add_directive']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "djangodocs"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_theme"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# Content template for the index page.
# html_index = ''
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Djangodoc'
modindex_common_prefix = ["django."]
# Appended to every page
rst_epilog = """
.. |django-users| replace:: :ref:`django-users <django-users-mailing-list>`
.. |django-core-mentorship| replace:: :ref:`django-core-mentorship <django-core-mentorship-mailing-list>`
.. |django-developers| replace:: :ref:`django-developers <django-developers-mailing-list>`
.. |django-announce| replace:: :ref:`django-announce <django-announce-mailing-list>`
.. |django-updates| replace:: :ref:`django-updates <django-updates-mailing-list>`
"""
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'preamble': (
'\\DeclareUnicodeCharacter{2264}{\\ensuremath{\\le}}'
'\\DeclareUnicodeCharacter{2265}{\\ensuremath{\\ge}}'
'\\DeclareUnicodeCharacter{2665}{[unicode-heart]}'
'\\DeclareUnicodeCharacter{2713}{[unicode-checkmark]}'
),
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
# latex_documents = []
latex_documents = [
('contents', 'django.tex', 'Django Documentation',
'Django Software Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(
'ref/django-admin',
'django-admin',
'Utility script for the Django Web framework',
['Django Software Foundation'],
1
), ]
# -- Options for Texinfo output ------------------------------------------------
# List of tuples (startdocname, targetname, title, author, dir_entry,
# description, category, toctree_only)
texinfo_documents = [(
master_doc, "django", "", "", "Django",
"Documentation of the Django framework", "Web development", False
)]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = 'Django Software Foundation'
epub_publisher = 'Django Software Foundation'
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = 'Django'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
epub_theme = 'djangodocs-epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
epub_cover = ('', 'epub-cover.html')
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# -- custom extension options --------------------------------------------------
cve_url = 'https://nvd.nist.gov/view/vuln/detail?vulnId=%s'
ticket_url = 'https://code.djangoproject.com/ticket/%s'
| bsd-3-clause | -5,356,771,640,173,593,000 | 32.439353 | 105 | 0.701112 | false |
babycaseny/audacity | lib-src/lv2/sord/waflib/Tools/gfortran.py | 276 | 1966 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import re
from waflib import Utils
from waflib.Tools import fc,fc_config,fc_scan,ar
from waflib.Configure import conf
@conf
def find_gfortran(conf):
fc=conf.find_program(['gfortran','g77'],var='FC')
fc=conf.cmd_to_list(fc)
conf.get_gfortran_version(fc)
conf.env.FC_NAME='GFORTRAN'
@conf
def gfortran_flags(conf):
v=conf.env
v['FCFLAGS_fcshlib']=['-fPIC']
v['FORTRANMODFLAG']=['-J','']
v['FCFLAGS_DEBUG']=['-Werror']
@conf
def gfortran_modifier_win32(conf):
fc_config.fortran_modifier_win32(conf)
@conf
def gfortran_modifier_cygwin(conf):
fc_config.fortran_modifier_cygwin(conf)
@conf
def gfortran_modifier_darwin(conf):
fc_config.fortran_modifier_darwin(conf)
@conf
def gfortran_modifier_platform(conf):
dest_os=conf.env['DEST_OS']or Utils.unversioned_sys_platform()
gfortran_modifier_func=getattr(conf,'gfortran_modifier_'+dest_os,None)
if gfortran_modifier_func:
gfortran_modifier_func()
@conf
def get_gfortran_version(conf,fc):
version_re=re.compile(r"GNU\s*Fortran",re.I).search
cmd=fc+['--version']
out,err=fc_config.getoutput(conf,cmd,stdin=False)
if out:match=version_re(out)
else:match=version_re(err)
if not match:
conf.fatal('Could not determine the compiler type')
cmd=fc+['-dM','-E','-']
out,err=fc_config.getoutput(conf,cmd,stdin=True)
if out.find('__GNUC__')<0:
conf.fatal('Could not determine the compiler type')
k={}
out=out.split('\n')
import shlex
for line in out:
lst=shlex.split(line)
if len(lst)>2:
key=lst[1]
val=lst[2]
k[key]=val
def isD(var):
return var in k
def isT(var):
return var in k and k[var]!='0'
conf.env['FC_VERSION']=(k['__GNUC__'],k['__GNUC_MINOR__'],k['__GNUC_PATCHLEVEL__'])
def configure(conf):
conf.find_gfortran()
conf.find_ar()
conf.fc_flags()
conf.fc_add_flags()
conf.gfortran_flags()
conf.gfortran_modifier_platform()
| gpl-2.0 | 1,890,840,450,924,494,300 | 27.492754 | 102 | 0.709054 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.