code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import time, os
import paho.mqtt.client as mqtt
from pymongo import MongoClient
cliente = MongoClient(os.getenv('MONGO_SERVER', 'nuvem.sj.ifsc.edu.br'))
db = cliente[os.getenv('DATABASE', 'estacao')]
def on_connect(client, userdata, flags, rc):
client.subscribe(os.getenv('TOPIC', 'estacao/#'))
def on_message(client, userdata, msg):
topico = msg.topic.strip().split('/')
arduino = topico[0]
sensor = topico[1]
valoratual = bytes(msg.payload).decode('utf-8')
data_atual = time.time()
posts = db[sensor]
post = {'nome':arduino, 'valor':valoratual, 'data':data_atual}
post_id = posts.insert_one(post).inserted_id
print('Tópico: ' + topico[0]
+ ', sensor: ' + topico[1]
+ ', valor: ' + valoratual + '\n')
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(os.getenv('MQTT_BROKER', 'nuvem.sj.ifsc.edu.br'), 1883, 60)
client.loop_forever()
| kididcca/mqtt-mongo | assinante.py | Python | mit | 951 |
import rospy
from std_msgs.msg import String
#name, type, readQuery, readResponse
UsedServices = ()
| mpomarlan/UE_ROS_Bridge | scripts/UE_ROS_Bridge_UsedServices.py | Python | bsd-2-clause | 101 |
#! /usr/bin/env python3
### Author: Kevin Pouget, Grenoble, France, July 2014
import queue
import sh
SOURCE_DIR = "/home/kevin/cl-specfem3d/SPECFEM3D_GLOBE"
BUILD_DIR = "/home/kevin/specfem-testsuite"
PAR_FILE = "DATA/Par_file"
DEBUG = False
to_import = ("cd", "mkdir", "make", "sed", "grep", "gnuplot", "{}/configure".format(SOURCE_DIR))
def process_output(line):
print(line)
def bake_echo(name):
def echo(*kwargs):
print("| {} {}".format(name, " ".join(kwargs)))
return "(running {} {})".format(name, " ".join(kwargs))
return echo
for imp in to_import:
name = imp.rpartition("/")[-1]
globals()[name] = sh.__getattr__(imp) if not DEBUG else bake_echo(name)
def lst_to_ctes(*lst):
for cte in lst:
globals()[cte.upper()] = cte
return lst
###############################################################################################
CUDA_CONF = ["CUDA_INC=/usr/local/cuda-5.5.22/include/", "CUDA_LIB=/usr/local/cuda-5.5.22/lib64"]
CONFIGURE_OPT = {
OPENCL: ["--with-opencl"],
CUDA: ["--with-cuda=cuda5"] + CUDA_CONF,
BOTH: ["--with-opencl", "--with-cuda=cuda5"] + CUDA_CONF
}
FLAGS = ("USE_TEXTURES_FIELDS", "USE_TEXTURES_CONSTANTS", "MANUALLY_UNROLLED_LOOPS")
RUNTIMES = lst_to_ctes("opencl", "cuda", "both")
CONFFILE_DEFAULT_OPT = {
"GPU_MODE": ".true.",
"GPU_RUNTIME": "0",
"GPU_PLATFORM": "NVIDIA",
"GPU_DEVICE": "Tesla"
}
GPU_RUNTIME_OPT = {
OPENCL: "2",
CUDA: "1",
BOTH: "0"
}
def analyze_results(scenarii):
cd(BUILD_DIR)
return
to_plot = []
seismo_glob = "{}/inout/OUTPUT_FILES/*.ascii".format(VERSIONS_TO_TEST[0])
to_plot.append("set terminal pdf")
to_plot.append("set output 'out.pdf'")
for seismo in sh.glob(seismo_glob):
plot = "plot "
for i, version in enumerate(VERSIONS_TO_TEST):
right_seismo = seismo.replace(VERSIONS_TO_TEST[0], version)
plot += "'{}' using 1:2 {}".format(right_seismo, "" if i == len(VERSIONS_TO_TEST) - 1 else ", ")
to_plot.append(plot)
to_plot.append("quit")
plotter = gnuplot(_in=[p+"\n" for p in to_plot])
def set_config_options(options=CONFFILE_DEFAULT_OPT):
for opt, val in options.items():
sed("-i", "/{}/d".format(opt), PAR_FILE)
if DEBUG:
print("(add to {}: {} = {})".format(PAR_FILE, opt, val))
continue
with open(PAR_FILE, "a+") as par_file:
print("{} = {}".format(opt, val), file=par_file)
class Scenario:
built_runtimes = []
scenarii = []
cpt = 0
def __init__(self, runtime, make_flags=[], config_options={}):
self.uid = Scenario.cpt
Scenario.cpt += 1
self.runtime = runtime
self.make_flags = make_flags
self.config_options = config_options
Scenario.scenarii.append(self)
def run(self):
print("------<Test #{}>----------".format(Scenario.cpt))
scenario.initialize_build_dir()
scenario.setup_environment()
scenario.run_execution()
print("------</Test #{}>----------\n".format(Scenario.cpt))
def initialize_build_dir(self):
if self.runtime in Scenario.built_runtimes:
set_config_options()
return
cd(BUILD_DIR)
mkdir("{}".format(self.runtime), "-p")
cd("{}".format(self.runtime))
print("Configure {}".format(self.runtime))
print(configure(CONFIGURE_OPT[self.runtime]))
set_config_options()
Scenario.built_runtimes.append(self.runtime)
def setup_environment(self):
print("prepare the example")
make("prepare-example")
set_config_options(self.config_options)
print("building the example")
cflags = "'{}'".format(" ".join(["-D{}".format(flag) for flag in self.make_flags])) if self.make_flags else ""
print(make("CFLAGS={}".format(cflags), _out=process_output, _err=process_output))
print(make("build-example", _out=process_output, _err=process_output))
def run_execution(self):
print("run the example")
print(make("run-example"))
def save_results(self):
print("save the results (TODO)")
if __name__ == "__main__":
mkdir(BUILD_DIR, "-p")
for rt in RUNTIMES:
if rt == OPENCL: continue
if rt == BOTH:
for each in (OPENCL, CUDA):
Scenario(rt, config_options={"GPU_RUNTIME" : GPU_RUNTIME_OPT[each]})
continue
Scenario(rt)
for flag in FLAGS:
Scenario(rt, [flag])
for scenario in Scenario.scenarii:
scenario.run()
analyze_results(Scenario.scenarii)
| geodynamics/specfem3d_globe | utils/BuildBot/test_suite_for_BuildBot.py | Python | gpl-3.0 | 4,675 |
"""Python wrappers around Brain.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
from google.protobuf import text_format as _text_format
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
_assign_add_variable_op_outputs = [""]
def assign_add_variable_op(resource, value, name=None):
r"""Adds a value to the current value of a variable.
Any ReadVariableOp which depends directly or indirectly on this assign is
guaranteed to see the incremented value or a subsequent newer one.
Outputs the incremented value, which can be used to totally order the
increments to this variable.
Args:
resource: A `Tensor` of type `resource`.
handle to the resource in which to store the variable.
value: A `Tensor`. the value by which the variable will be incremented.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("AssignAddVariableOp", resource=resource,
value=value, name=name)
return result
_assign_variable_op_outputs = [""]
def assign_variable_op(resource, value, name=None):
r"""Assigns a new value to a variable.
Any ReadVariableOp with a control dependency on this op is guaranteed to return
this value or a subsequent newer value of the variable.
Args:
resource: A `Tensor` of type `resource`.
handle to the resource in which to store the variable.
value: A `Tensor`. the value to set the new tensor to use.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("AssignVariableOp", resource=resource,
value=value, name=name)
return result
_read_variable_op_outputs = ["value"]
def read_variable_op(resource, dtype, name=None):
r"""Reads the value of a variable.
The tensor returned by this operation is immutable.
The value returned by this operation is guaranteed to be influenced by all the
writes on which this operation depends directly or indirectly, and to not be
influenced by any of the writes which depend directly or indirectly on this
operation.
Args:
resource: A `Tensor` of type `resource`.
handle to the resource in which to store the variable.
dtype: A `tf.DType`. the dtype of the value.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
result = _op_def_lib.apply_op("ReadVariableOp", resource=resource,
dtype=dtype, name=name)
return result
_resource_gather_outputs = ["output"]
def resource_gather(resource, indices, dtype, validate_indices=None,
name=None):
r"""Gather slices from the variable pointed to by `resource` according to `indices`.
`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
```python
# Scalar indices
output[:, ..., :] = params[indices, :, ... :]
# Vector indices
output[i, :, ..., :] = params[indices[i], :, ... :]
# Higher rank indices
output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
```
Args:
resource: A `Tensor` of type `resource`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
dtype: A `tf.DType`.
validate_indices: An optional `bool`. Defaults to `True`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
result = _op_def_lib.apply_op("ResourceGather", resource=resource,
indices=indices, dtype=dtype,
validate_indices=validate_indices, name=name)
return result
_resource_scatter_add_outputs = [""]
def resource_scatter_add(resource, indices, updates, name=None):
r"""Adds sparse updates to the variable referenced by `resource`.
This operation computes
# Scalar indices
ref[indices, ...] += updates[...]
# Vector indices (for each i)
ref[indices[i], ...] += updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="../../images/ScatterAdd.png" alt>
</div>
Args:
resource: A `Tensor` of type `resource`. Should be from a `Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
A tensor of updated values to add to `ref`.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceScatterAdd", resource=resource,
indices=indices, updates=updates, name=name)
return result
_var_handle_op_outputs = ["resource"]
def var_handle_op(dtype, shape, container=None, shared_name=None, name=None):
r"""Creates a handle to a Variable resource.
Args:
dtype: A `tf.DType`. the type of this variable. Must agree with the dtypes
of all ops using this variable.
shape: A `tf.TensorShape` or list of `ints`.
The (possibly partially specified) shape of this variable.
container: An optional `string`. Defaults to `""`.
the container this variable is placed in.
shared_name: An optional `string`. Defaults to `""`.
the name by which this variable is referred to.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `resource`.
"""
result = _op_def_lib.apply_op("VarHandleOp", dtype=dtype, shape=shape,
container=container, shared_name=shared_name,
name=name)
return result
_var_is_initialized_op_outputs = ["is_initialized"]
def var_is_initialized_op(resource, name=None):
r"""Checks whether a resource handle-based variable has been initialized.
Args:
resource: A `Tensor` of type `resource`. the input resource handle.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
a scalar boolean which is true if the variable has been
initialized.
"""
result = _op_def_lib.apply_op("VarIsInitializedOp", resource=resource,
name=name)
return result
def _InitOpDefLibrary():
op_list = _op_def_pb2.OpList()
_text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
_InitOpDefLibrary.op_list_ascii = """op {
name: "AssignAddVariableOp"
input_arg {
name: "resource"
type: DT_RESOURCE
}
input_arg {
name: "value"
type_attr: "dtype"
}
attr {
name: "dtype"
type: "type"
}
}
op {
name: "AssignVariableOp"
input_arg {
name: "resource"
type: DT_RESOURCE
}
input_arg {
name: "value"
type_attr: "dtype"
}
attr {
name: "dtype"
type: "type"
}
}
op {
name: "ReadVariableOp"
input_arg {
name: "resource"
type: DT_RESOURCE
}
output_arg {
name: "value"
type_attr: "dtype"
}
attr {
name: "dtype"
type: "type"
}
}
op {
name: "ResourceGather"
input_arg {
name: "resource"
type: DT_RESOURCE
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
output_arg {
name: "output"
type_attr: "dtype"
}
attr {
name: "validate_indices"
type: "bool"
default_value {
b: true
}
}
attr {
name: "dtype"
type: "type"
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "ResourceScatterAdd"
input_arg {
name: "resource"
type: DT_RESOURCE
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "updates"
type_attr: "dtype"
}
attr {
name: "dtype"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "VarHandleOp"
output_arg {
name: "resource"
type: DT_RESOURCE
}
attr {
name: "container"
type: "string"
default_value {
s: ""
}
}
attr {
name: "shared_name"
type: "string"
default_value {
s: ""
}
}
attr {
name: "dtype"
type: "type"
}
attr {
name: "shape"
type: "shape"
}
is_stateful: true
}
op {
name: "VarIsInitializedOp"
input_arg {
name: "resource"
type: DT_RESOURCE
}
output_arg {
name: "is_initialized"
type: DT_BOOL
}
}
"""
_op_def_lib = _InitOpDefLibrary()
| jjas0nn/solvem | tensorflow/lib/python2.7/site-packages/tensorflow/python/ops/gen_resource_variable_ops.py | Python | mit | 9,981 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('janta_tribesman')
mobileTemplate.setLevel(77)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(3)
mobileTemplate.setMaxSpawnDistance(5)
mobileTemplate.setDeathblow(True)
mobileTemplate.setSocialGroup('janta tribe')
mobileTemplate.setAssistRange(12)
templates = Vector()
templates.add('object/mobile/shared_dantari_male.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/knife/shared_knife_janta.iff', WeaponType.ONEHANDEDMELEE, 1.0, 5, 'kinetic')
weaponTemplates.add(weapontemplate)
weapontemplate = WeaponTemplate('object/weapon/melee/polearm/shared_lance_staff_janta.iff', WeaponType.POLEARMMELEE, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('meleehit')
mobileTemplate.setAttacks(attacks)
lootPoolNames_1 = ['Junk']
lootPoolChances_1 = [100]
lootGroupChance_1 = 65
mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1)
lootPoolNames_2 = ['random_loot_primitives']
lootPoolChances_2 = [100]
lootGroupChance_2 = 35
mobileTemplate.addToLootGroups(lootPoolNames_2,lootPoolChances_2,lootGroupChance_2)
core.spawnService.addMobileTemplate('janta_tribesman', mobileTemplate)
| agry/NGECore2 | scripts/mobiles/dantooine/janta_tribesman.py | Python | lgpl-3.0 | 1,720 |
# WSAdminExtras is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WSAdminExtras is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import logging
from SocketServer import StreamRequestHandler
from ru.durdyev.wsadminextras.exceptions.HeadersNotSetException import HeadersNotSetException
from ru.durdyev.wsadminextras.utils.ServerCodes import ServerCodes
from ru.durdyev.wsadminextras.utils.ServerCommands import ServerCommands
from ru.durdyev.wsadminextras.exceptions.BaseException import BaseException
from ru.durdyev.wsadminextras.utils.NetUtils import NetUtils
from ru.durdyev.wsadminextras.utils.ServerHeaders import ServerHeaders
from ru.durdyev.wsadminextras.utils.ServerMessages import ServerMessages
# base request handler class.
class RequestHandler(StreamRequestHandler):
#request_id
_request_id = None
#comand
_command = None
#headers
_headers = None
#content len
_content_len = None
#content type
_content_type = None
#profile
_profile = None
_server_commands = ServerCommands()
_server_codes = ServerCodes()
_server_headers = ServerHeaders()
_server_messages = ServerMessages()
_wsadminQueue = None
def setQueue(self, queue):
self._wsadminQueue = queue
#override method to catch a packets.
def handle(self):
while True:
self._request_id = NetUtils.generate_request_id()
self._headers = self.request.recv(self._server_headers.headers_len)
regexp_headers = re.search('.*Command:(.*).*', self.headers)
regexp_content_len = re.search(".*Content-length:(.*).*", self.headers)
regexp_content_type = re.search(".*Content-type:(.*).*", self.headers)
regexp_profile = re.search(".*Profile:(.*).*", self.headers)
try:
if regexp_content_len is not None:
self._content_len = regexp_content_len.group(1).strip()
else:
raise HeadersNotSetException(self.server_codes.code_headers_not_recived,
'Content-len not is not recived.')
if regexp_content_type is not None:
self._content_type = regexp_content_type.group(1).strip()
else:
raise HeadersNotSetException(self.server_codes.code_headers_not_recived,
'Content-type is not recived')
if regexp_profile is not None:
self._profile = regexp_profile.group(1).strip()
else:
raise HeadersNotSetException(self.server_codes.code_headers_not_recived,
'Profile not recived.')
if regexp_headers is not None:
comand_code = regexp_headers.group(1).strip()
if len(comand_code) > 0:
self._command = comand_code + '_handler'
else:
logging.info('command doesn\'t set. ')
raise HeadersNotSetException(server_codes.code_headers_not_recived,
'command is not set.')
commandHandler = getattr(self, self.command)
if commandHandler is not None:
commandHandler()
else:
self.send_response(self.server_codes.code_headers_not_recived)
return
except BaseException as e:
#headers not recived
logging.info('Headers not set error.' + e.msg)
self.send_response(self.server_codes.code_headers_not_recived)
return
#recv bytes
@classmethod
def recv_bytes(cls, begin, end):
size = 1
output = ''
while size <= end:
if size >= begin <= end:
output += self.request.recv(size - len(output))
size += 1
return output
#send response to the client
def send_response(self, headers, data):
response_headers = ''
for key in headers:
response_headers += key + headers[key]
self.request.send(response_headers)
if data is not None:
self.request.send(data)
#sending an error
def send_response(self, code, message=None):
response_data = self.request_id
response_data += self.request_id + NetUtils.delimeter_n()
response_data += self.server_headers.response_code + code
response_data += str(NetUtils.delimeter_n())
if message is not None:
response_data += self.server_headers.response_message + message
response_data += str(NetUtils.delimeter_n())
self.request.send(response_data)
@property
def request_id(self):
return self._request_id
@property
def command(self):
return self._command
@property
def headers(self):
return self._headers
@property
def content_len(self):
return int(self._content_len)
@property
def content_type(self):
return self._content_type
@property
def profile(self):
return self._profile
@property
def headers(self):
return self._headers
@property
def server_headers(self):
return self._server_headers
@property
def server_codes(self):
return self._server_codes
@property
def server_commands(self):
return self._server_commands
@property
def server_messages(self):
return self._server_messages
@property
def wsadminQueue(self):
return self._wsadminQueue | durdyev/WSAdminExtras | lib/ru/durdyev/wsadminextras/server/RequestHandler.py | Python | gpl-3.0 | 6,507 |
# in case setuptools become default in RHEL
from setuptools import setup, find_packages
import os, glob, sys, re
from distutils.core import setup
import distutils.sysconfig
name = 'scutools'
version = '0.6.3'
setup(
name = name,
version = version,
packages = find_packages(),
scripts = glob.glob('scripts/*-*'),
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
# install_requires = ['docutils >= 0.3'],
data_files = [
('share/doc/' + name + '-' + version, ['scutools.conf', 'COPYING']),
('share/man/man1', ['man/pexec.1']),
],
# package_data = {
# # If any package contains *.txt or *.rst files, include them:
# '': ['*.txt', '*.rst'],
# # And include any *.msg files found in the 'hello' package, too:
# 'hello': ['*.msg'],
# },
# metadata for upload to PyPI
entry_points = {
'console_scripts': [
'pexec = scutools.app:main',
'pls = scutools.app:main',
'pps = scutools.app:main',
'pcp = scutools.app:main',
'pmv = scutools.app:main',
'prm = scutools.app:main',
'pcat = scutools.app:main',
'pfind = scutools.app:main',
'pdist = scutools.app:main',
'pfps = scutools.app:main',
'pkillps = scutools.app:main',
'pkillu = scutools.app:main',
'ppred = scutools.app:main',
'ptest = scutools.app:main',
'phost = scutools.app:main',
]
},
author = "Somsak Sriprayoonsakul",
author_email = "[email protected]",
description = "Scalable Unix Tool packages",
license = "GPLv3",
keywords = "cluster grid unix",
url = "http://code.google.com/p/scutools", # project home page, if any
# could also include long_description, download_url, classifiers, etc.
)
| somsak/scutools | setup.py | Python | gpl-3.0 | 1,930 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gcp_devrel.testing import eventually_consistent
from google.cloud import pubsub
import pytest
import publisher
TEST_TOPIC = 'publisher-test-topic'
@pytest.fixture
def test_topic():
client = pubsub.Client()
topic = client.topic(TEST_TOPIC)
yield topic
if topic.exists():
topic.delete()
def test_list(test_topic, capsys):
test_topic.create()
@eventually_consistent.call
def _():
publisher.list_topics()
out, _ = capsys.readouterr()
assert test_topic.name in out
def test_create(test_topic):
publisher.create_topic(test_topic.name)
@eventually_consistent.call
def _():
assert test_topic.exists()
def test_delete(test_topic):
test_topic.create()
publisher.delete_topic(test_topic.name)
@eventually_consistent.call
def _():
assert not test_topic.exists()
def test_publish(test_topic, capsys):
test_topic.create()
publisher.publish_message(test_topic.name, 'hello')
out, _ = capsys.readouterr()
assert 'published' in out
| JavaRabbit/CS496_capstone | pubsub/cloud-client/publisher_test.py | Python | apache-2.0 | 1,655 |
# -*- coding: utf-8 -*-
#! \file ./tests/test_text/test_pgen/__init__.py
#! \author Jiří Kučera, <[email protected]>
#! \stamp 2016-04-07 09:49:32 (UTC+01:00, DST+01:00)
#! \project DoIt!: Tools and Libraries for Building DSLs
#! \license MIT
#! \version 0.0.0
#! \fdesc @pyfile.docstr
#
"""\
DoIt! test_pgen package initialization file.\
"""
__license__ = """\
Copyright (c) 2014 - 2017 Jiří Kučera.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.\
"""
import unittest
from . import test_errors, test_utils, test_models, test_readers
def suite():
suite = unittest.TestSuite()
suite.addTest(test_errors.suite())
suite.addTest(test_utils.suite())
suite.addTest(test_models.suite())
suite.addTest(test_readers.suite())
return suite
#-def
| i386x/doit | tests/test_text/test_pgen/__init__.py | Python | mit | 1,811 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 by Filip H.F. "FiXato" Slagter <[email protected]>
#
# Shutup: a quick WeeChat script to replace text from specified users with
# random or preset text as a way to hide their actual text.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# 2014-01-31: FiXato, (freenode.#weechat)
# 0.1 : initial release
#
# requires: WeeChat version 0.3.6 or higher
#
# Thanks go out to nils_2 for providing his skeleton.py template, available at https://github.com/weechatter/weechat-scripts/blob/master/python/skeleton.py
#
# Development is currently hosted at
# https://github.com/FiXato/weechat_scripts
try:
import weechat,re
from random import choice
except Exception:
print "This script must be run under WeeChat."
print "Get WeeChat now at: http://www.weechat.org/"
quit()
SCRIPT_NAME = "shutup"
SCRIPT_AUTHOR = 'Filip H.F. "FiXato" Slagter <[email protected]>'
SCRIPT_VERSION = "0.2"
SCRIPT_LICENSE = "GPL"
SCRIPT_DESC = "Replace text from specified IRC users with random or preset text as a way to hide their actual text. Unlike /filter it won't hide the line (and thus can't be toggled either), and has access to the entire hostmask for comparison. Can be useful to mute users while still seeing that they are active."
OPTIONS = {
'replacement_text' : ('','Replacement text for everything the muted user says. Leave empty to use random lines from the Jabberwocky poem.'),
'muted_masks' : ('','Space-separated regular expressions that will be matched against the [email protected]. Any user matching will get their message muted. Can also include a comma-separated list of channels for every regular expression separated from the regexp by a colon. Prefix regexp with (?i) if you want it to be case insensitive. Example: "@\S+\.aol\.com$:#comcast,#AT&T (?i)!root@\S+" would mute messages in channels #comcast and #AT&T from users whose hosts end in *.aol.com, as well as all users who have any case variation of root as ident regardless of channel.'),
}
DEBUG = False
jabberwocky = """
'Twas brillig, and the slithy toves
Did gyre and gimble in the wabe;
All mimsy were the borogoves,
And the mome raths outgrabe.
"Beware the Jabberwock, my son!
The jaws that bite, the claws that catch!
Beware the Jubjub bird, and shun
The frumious Bandersnatch!"
He took his vorpal sword in hand:
Long time the manxome foe he sought—
So rested he by the Tumtum tree,
And stood awhile in thought.
And as in uffish thought he stood,
The Jabberwock, with eyes of flame,
Came whiffling through the tulgey wood,
And burbled as it came!
One, two! One, two! and through and through
The vorpal blade went snicker-snack!
He left it dead, and with its head
He went galumphing back.
"And hast thou slain the Jabberwock?
Come to my arms, my beamish boy!
O frabjous day! Callooh! Callay!"
He chortled in his joy.
'Twas brillig, and the slithy toves
Did gyre and gimble in the wabe;
All mimsy were the borogoves,
And the mome raths outgrabe.
"""
replacement_lines = filter(None, jabberwocky.splitlines())
def random_replacement_line(lines = replacement_lines):
return choice(lines)
def replacement_line():
global OPTIONS
if OPTIONS['replacement_text'] == '':
return random_replacement_line()
return OPTIONS['replacement_text']
# Easily use weechat colors in the script
# text = substitute_colors('my text ${color:yellow}yellow${color:default} colored.')
# eval_expression(): to match ${color:nn} tags
regex_color=re.compile('\$\{color:([^\{\}]+)\}')
def substitute_colors(text):
if int(version) >= 0x00040200:
return weechat.string_eval_expression(text,{},{},{})
# substitute colors in output
return re.sub(regex_color, lambda match: weechat.color(match.group(1)), text)
# ===================[ weechat options & description ]===================
def init_options():
for option,value in OPTIONS.items():
if not weechat.config_is_set_plugin(option):
weechat.config_set_plugin(option, value[0])
toggle_refresh(None, 'plugins.var.python.' + SCRIPT_NAME + '.' + option, value[0])
else:
toggle_refresh(None, 'plugins.var.python.' + SCRIPT_NAME + '.' + option, weechat.config_get_plugin(option))
weechat.config_set_desc_plugin(option, '%s (default: "%s")' % (value[1], value[0]))
def debug(str):
if DEBUG:
weechat.prnt("", str)
def update_muted_masks(masks):
global muted_masks
muted_masks = {}
for mask in masks.split():
if '#' in mask:
mask, chan = mask.split(':',1)
channels = [channel.lower() for channel in chan.split(',')]
else:
channels = []
muted_masks[mask] = [re.compile(mask), channels]
debug('muted masks: %s' % muted_masks)
def toggle_refresh(pointer, name, value):
global OPTIONS
option = name[len('plugins.var.python.' + SCRIPT_NAME + '.'):] # get optionname
OPTIONS[option] = value # save new value
if option == 'muted_masks':
update_muted_masks(value)
return weechat.WEECHAT_RC_OK
def shutup_cb(data, modifier, modifier_data, string):
dict_in = { "message": string }
message_ht = weechat.info_get_hashtable("irc_message_parse", dict_in)
hostmask = message_ht['host']
arguments = message_ht['arguments']
channel = message_ht['channel']
new_arguments = re.sub(r'^%s :.+' % channel, lambda x: '%s :%s' % (channel, replacement_line()), arguments)
new_string = re.sub(r'%s$' % re.escape(arguments), lambda x: new_arguments, string)
for key, [mask_regexp, channels] in muted_masks.iteritems():
# If there is one or more channels listed for this mask regexp, and none of them match the current channel, continue to the next mute mask
if len(channels) > 0 and channel.lower() not in channels:
debug("%s doesn't match any of the listed channels: %s" % (channel, channels))
continue
# If the hostmask matches the mask regular expression, return the new, manipulated, string.
debug("comparing %s to %s" % (mask_regexp.pattern, hostmask))
if mask_regexp.search(hostmask):
debug(" %s matches %s" % (mask_regexp.pattern, hostmask))
return new_string
# Nothing matches, so return the original, unmodified, string
return string
# ================================[ main ]===============================
if __name__ == "__main__":
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, '', ''):
version = weechat.info_get("version_number", "") or 0
if int(version) >= 0x00030600:
# init options from your script
init_options()
# create a hook for your options
weechat.hook_config( 'plugins.var.python.' + SCRIPT_NAME + '.*', 'toggle_refresh', '' )
else:
weechat.prnt("","%s%s %s" % (weechat.prefix("error"),SCRIPT_NAME,": needs version 0.3.6 or higher"))
weechat.command("","/wait 1ms /python unload %s" % SCRIPT_NAME)
hook = weechat.hook_modifier("irc_in_privmsg", "shutup_cb", "")
| qguv/config | weechat/plugins/python/shutup.py | Python | gpl-3.0 | 7,664 |
""" Test ``dockci.api.jwt`` against the DB """
import json
import pytest
class TestJwtServiceNew(object):
""" Test the ``JwtServiceNew`` resource """
@pytest.mark.usefixtures('db')
def test_agent_role(self, client, admin_user):
""" Test creating a service token with the agent internal role """
response = client.post('/api/v1/jwt/service', headers={
'x_dockci_username': admin_user.email,
'x_dockci_password': 'testpass',
}, data={
'name': 'test',
'roles': ['agent'],
})
assert response.status_code == 201
response_data = json.loads(response.data.decode())
token = response_data.pop('token')
assert response_data == {}
response = client.get('/api/v1/me/jwt', headers={
'x_dockci_api_key': token
})
response_data = json.loads(response.data.decode())
response_data.pop('iat')
assert response_data == dict(
name='test',
roles=['agent'],
sub='service',
sub_detail='/api/v1/users/service',
)
@pytest.mark.usefixtures('db')
def test_non_admin(self, client, user):
""" Test creating a service token without admin """
response = client.post('/api/v1/jwt/service', headers={
'x_dockci_username': user.email,
'x_dockci_password': 'testpass',
}, data={
'name': 'test',
'roles': ['agent'],
})
assert response.status_code == 401
@pytest.mark.usefixtures('db')
def test_unknown_role(self, client, admin_user):
""" Test creating a service token with the agent internal role """
response = client.post('/api/v1/jwt/service', headers={
'x_dockci_username': admin_user.email,
'x_dockci_password': 'testpass',
}, data={
'name': 'test',
'roles': ['faketest'],
})
assert response.status_code == 400
response_data = json.loads(response.data.decode())
assert response_data == {
'message': {'roles': 'Roles not found: faketest'}
}
| RickyCook/DockCI | tests/db/api/test_jwt_api_db.py | Python | isc | 2,180 |
from unittest.mock import Mock, MagicMock, patch, call, DEFAULT, mock_open
from .util import mock_globbing, mock_path, mock_multiple_opens
import toml
import itertools
import sh
import click
import os
import sys
import glob
from click.testing import CliRunner
from borg_summon import command_line, config_parser
from locale import getpreferredencoding
DEFAULT_ENCODING = getpreferredencoding() or "utf-8"
def mock_default_config(config_name):
with open(os.path.join(os.path.dirname(__file__), 'configs', config_name), 'r') as f:
m = mock_multiple_opens(itertools.chain([f.read()], itertools.repeat('')))
def decorator(func):
@patch('borg_summon.config_parser.open', m, create=True)
def func_wrapper(*args, **kwargs):
func(*args, **kwargs)
return func_wrapper
return decorator
def call_matches(call, args, options, env):
cname, cargs, ckwargs = call
options = options.copy()
args = args.copy()
cargs = list(cargs)
cargs[1] = cargs[1].decode(DEFAULT_ENCODING)
cargs[2] = list(map(lambda a: a.decode(DEFAULT_ENCODING), cargs[2]))
print("Match?", cargs, args)
if not cargs[0] == os.P_WAIT:
return False
if not cargs[1] == args[0]:
print("No match because of command name")
return False
if not cargs[3] == env:
print("No match because of environment")
return False
for arg in cargs[2]:
if arg.startswith('--'):
eqpos = arg.find('=')
if eqpos == -1:
if arg[2:] in options and options[arg[2:]] == True:
del options[arg[2:]]
else:
print("No match because of missing option A", arg)
return False
else:
if arg[2:eqpos] in options and options[arg[2:eqpos]] == arg[(eqpos+1):]:
del options[arg[2:eqpos]]
else:
print("No match because of missing option B", arg)
return False
else:
if len(args) > 0 and args[0] == arg:
del args[0]
else:
print("No match because of missing option C", arg)
return False
if len(args) == 0 and len(options) == 0:
return True
else:
print("No match because of not empty")
return False
def any_call_matches(mock, args, options, env):
for call in mock.mock_calls:
if call_matches(call, args, options, env):
return True
return False
def test_root_command_help():
runner = CliRunner()
result = runner.invoke(command_line.main, ['--help'])
assert result.exit_code == 0
def test_root_backup_help():
runner = CliRunner()
result = runner.invoke(command_line.main, ['backup', '--help'])
assert result.exit_code == 0
def test_root_maintain_help():
runner = CliRunner()
result = runner.invoke(command_line.main, ['maintain', '--help'])
assert result.exit_code == 0
@mock_path
@mock_default_config('minimal.toml')
@patch('os.spawnve', return_value=0)
def test_backup_init_minimal(borg_mock):
# Set up
runner = CliRunner()
# Perform
result = runner.invoke(command_line.main, ['--config=test.toml', 'backup', '--init'], catch_exceptions=False)
# Assert
assert any_call_matches(os.spawnve,
['/path/borg', 'init', 'remote_location_a/source_A'], {}, {})
assert any_call_matches(os.spawnve,
['/path/borg', 'init', 'remote_location_a/source_B'], {}, {})
assert any_call_matches(os.spawnve,
['/path/borg', 'init', 'remote_location_b/source_A'], {}, {})
assert any_call_matches(os.spawnve,
['/path/borg', 'init', 'remote_location_b/source_B'], {}, {})
# assert os.spawnve.call_count == 4
assert result.exit_code == 0
@mock_path
@mock_default_config('maximal.toml')
@patch('os.spawnve', return_value=0)
def test_backup_init_maximal(spawnve):
# Set up
runner = CliRunner()
# Perform
result = runner.invoke(command_line.main, ['backup', '--init'], catch_exceptions=False)
# Assert
env = {
'BORG_RSH': 'ssh_command',
'BORG_PASSPHRASE': 'passphrase1',
'BORG_DISPLAY_PASSPHRASE': 'n',
}
kwargs = {
'append-only': True,
'encryption': 'repokey',
'info':True,
'remote-path': 'remote_borg_path',
'umask': '0007',
}
assert any_call_matches(os.spawnve, ['/path/sudo', '-S', '/path/borg', 'init',
'remote_location_a/source_A'], kwargs, env)
assert any_call_matches(os.spawnve, ['/path/sudo', '-S', '/path/borg', 'init',
'remote_location_b/source_A'], kwargs, env)
env['BORG_PASSPHRASE'] = 'passphrase2'
assert any_call_matches(os.spawnve, ['/path/sudo', '-S', '-u', 'user_b', '/path/borg', 'init',
'remote_location_b/source_B'], kwargs, env)
# assert os.spawnve.call_count == 3
assert result.exit_code == 0
@mock_path
@mock_default_config('minimal.toml')
@patch('os.spawnve', return_value=0)
def test_backup_create_minimal(borg_mock):
# Set up
mock_globbing()
runner = CliRunner()
# Perform
result = runner.invoke(command_line.main, ['backup'], catch_exceptions=False)
# Assert
assert any_call_matches(os.spawnve, ['/path/borg', 'create',
'remote_location_a/source_A::archive_name', 'pathA1', 'pathA2'], {}, {})
assert any_call_matches(os.spawnve, ['/path/borg', 'create',
'remote_location_a/source_B::archive_name', 'pathB1', 'pathB2'], {}, {})
assert any_call_matches(os.spawnve, ['/path/borg', 'create',
'remote_location_b/source_A::archive_name', 'pathA1', 'pathA2'], {}, {})
assert any_call_matches(os.spawnve, ['/path/borg', 'create',
'remote_location_b/source_B::archive_name', 'pathB1', 'pathB2'], {}, {})
# assert os.spawnve.call_count == 4
assert result.exit_code == 0
@mock_path
@mock_default_config('maximal.toml')
@patch('os.spawnve', return_value=0)
def test_backup_create_maximal(borg_mock):
# TODO verify order of hook executions
# Set up
mock_globbing()
runner = CliRunner()
# Perform
result = runner.invoke(command_line.main, ['backup', '--create'], catch_exceptions=False)
# Assert
env = {
'BORG_RSH': 'ssh_command',
'BORG_PASSPHRASE': 'passphrase1',
'BORG_DISPLAY_PASSPHRASE': 'n',
}
kwargs = {
'info': True,
'remote-path': 'remote_borg_path',
'umask': '0007',
'stats': True,
'progress': True,
'exclude-from': 'exclude_file',
'exclude-caches': True,
'one-file-system': True,
'compression': 'lz4',
}
assert any_call_matches(os.spawnve, ['/path/pre-create-A.sh'], {}, {})
assert any_call_matches(os.spawnve, ['/path/sudo', '-S', '/path/borg', 'create',
'remote_location_a/source_A::archive_name', 'pathA1', 'pathA2'], kwargs, env)
assert any_call_matches(os.spawnve, ['/path/sudo', '-S', '/path/borg', 'create',
'remote_location_b/source_A::archive_name', 'pathA1', 'pathA2'], kwargs, env)
assert any_call_matches(os.spawnve, ['/path/sudo', '-S', '/path/post-create-A.sh'], {}, {})
env['BORG_PASSPHRASE'] = 'passphrase2'
assert any_call_matches(os.spawnve, ['/path/sudo', '-S', '-u', 'user_b', '/path/pre-create-B.sh'], {'verbose': True}, {})
assert any_call_matches(os.spawnve, ['/path/sudo', '-S', '-u', 'user_b', '/path/borg', 'create',
'remote_location_b/source_B::archive_name', 'pathB1', 'pathB2'], kwargs, env)
assert any_call_matches(os.spawnve, ['/path/sudo', '-S', '-u', 'hook_user', '/path/post-create-B.sh'], {}, {})
# assert os.spawnve.call_count == 7
assert result.exit_code == 0
| grensjo/borg-summon | test/test_cmd.py | Python | mit | 7,815 |
"""Compute statistics over midi files.
Usage
-----
# Run with default parameters
$ python midi_tools.py "data/*.mid" stats.json
# Run with full verbosity
$ python midi_tools.py "data/*.mid" stats.json --verbose 50
# Run with only one CPU
$ python midi_tools.py "data/*.mid" stats.json --n_jobs 1
# Run with two CPUs and a verbosity level of 20
$ python midi_tools.py "data/*.mid" stats.json --n_jobs 2 --verbose 20
"""
import argparse
import glob
from joblib import Parallel, delayed
import json
import os
import pretty_midi
def compute_pitch_histogram(filename):
"""Compute weighted pitch counts over a MIDI file.
Parameters
----------
filename : str
Path to a midi file on disk.
Returns
-------
counts : dict
Pitch counts over the file, keyed by pitch class.
"""
pitch_counts = {pc: 0 for pc in range(12)}
name = os.path.split(filename)[-1]
try:
midi = pretty_midi.PrettyMIDI(filename)
for inst in midi.instruments:
if inst.is_drum:
continue
for note in inst.notes:
pc = note.pitch % 12
pitch_counts[pc] += (note.end - note.start)
except IOError as derp:
print("woah buddy, {} died: {}".format(name, derp))
finally:
return {'name': name,
'pitches': pitch_counts}
def process_many(filenames, n_jobs, verbose):
pool = Parallel(verbose=verbose, n_jobs=n_jobs)
fx = delayed(compute_pitch_histogram)
return pool(fx(fn) for fn in filenames)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"filepattern", type=str,
help="Filepattern for finding MIDI files, e.g. 'data/*.mid'")
parser.add_argument(
"output_file", type=str,
help="Output file for writing results, e.g. 'data.json'")
parser.add_argument(
"--n_jobs", metavar='n_jobs', type=int, default=-2,
help="Number of CPUs to use for processing.")
parser.add_argument(
"--verbose", metavar='verbose', type=int, default=0,
help="Verbosity level for writing outputs.")
args = parser.parse_args()
filenames = glob.glob(args.filepattern)
results = process_many(filenames, args.n_jobs, args.verbose)
with open(args.output_file, 'w') as fp:
json.dump(results, fp, indent=2)
| ejhumphrey/osr-intro | midi_tools.py | Python | mit | 2,374 |
#!/usr/bin/env python
# coding: utf-8
from .settings_manager import Settings
__all__ = ['settings']
settings = Settings()
| ymero/workin | workin/conf/__init__.py | Python | bsd-3-clause | 125 |
import random
import threading
class Proxy(object):
def __init__(self, proxy_string=None):
if proxy_string:
self.parse_proxy_string(proxy_string)
else:
self.proxy_string = None
def parse_proxy_string(self, proxy_string):
split_string = proxy_string.strip('\n').split(':')
self.ip = split_string[0]
self.port = split_string[1]
self.proxy_string = '{0}:{1}'.format(self.ip, self.port)
self.authenticated = len(split_string) == 4
if self.authenticated:
self.username = split_string[2]
self.password = split_string[3]
self.proxy_string = '{0}:{1}@{2}'.format(self.username, self.password, self.proxy_string)
def get_dict(self):
return {
'http': 'http://{}'.format(self.proxy_string),
'https': 'https://{}'.format(self.proxy_string)
} if self.proxy_string else {}
class ProxyManager(object):
def __init__(self, proxy_file_path=None):
self.proxies = self.load_proxies_from_file(proxy_file_path) if proxy_file_path else [Proxy()]
self.lock = threading.Lock()
self.current_proxy = 0
@staticmethod
def load_proxies_from_file(proxy_file_path):
proxies = []
with open(proxy_file_path) as proxy_file:
for proxy_string in proxy_file.readlines():
proxies.append(Proxy(proxy_string))
return proxies
def random_proxy(self):
return random.choice(self.proxies)
def next_proxy(self):
if self.current_proxy >= len(self.proxies):
self.current_proxy = 0
with self.lock:
proxy = self.proxies[self.current_proxy]
self.current_proxy += 1
return proxy
| kfichter/proxy-manager | proxymanager/manager.py | Python | mit | 1,780 |
import ssl
from amqpstorm import AMQPInvalidArgument
from amqpstorm import Connection
from amqpstorm.tests.utility import TestFramework
class ConnectionExceptionTests(TestFramework):
def test_connection_set_hostname(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
self.assertEqual(connection.parameters['username'], 'guest')
def test_connection_set_username(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
self.assertEqual(connection.parameters['username'], 'guest')
def test_connection_set_password(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
self.assertEqual(connection.parameters['username'], 'guest')
def test_connection_set_parameters(self):
connection = Connection(
'127.0.0.1', 'guest', 'guest',
virtual_host='travis',
heartbeat=120,
timeout=180,
ssl=True,
ssl_options={
'ssl_version': ssl.PROTOCOL_TLSv1
},
lazy=True
)
self.assertEqual(connection.parameters['virtual_host'], 'travis')
self.assertEqual(connection.parameters['heartbeat'], 120)
self.assertEqual(connection.parameters['timeout'], 180)
self.assertEqual(connection.parameters['ssl'], True)
self.assertEqual(connection.parameters['ssl_options']['ssl_version'],
ssl.PROTOCOL_TLSv1)
def test_connection_invalid_hostname(self):
self.assertRaisesRegex(
AMQPInvalidArgument,
'hostname should be a string',
Connection, 1, 'guest', 'guest', lazy=True
)
def test_connection_invalid_username(self):
self.assertRaisesRegex(
AMQPInvalidArgument,
'username should be a string',
Connection, '127.0.0.1', 2, 'guest', lazy=True
)
self.assertRaisesRegex(
AMQPInvalidArgument,
'username should be a string',
Connection, '127.0.0.1', None, 'guest', lazy=True
)
def test_connection_invalid_password(self):
self.assertRaisesRegex(
AMQPInvalidArgument,
'password should be a string',
Connection, '127.0.0.1', 'guest', 3, lazy=True
)
self.assertRaisesRegex(
AMQPInvalidArgument,
'password should be a string',
Connection, '127.0.0.1', 'guest', None, lazy=True
)
def test_connection_invalid_virtual_host(self):
self.assertRaisesRegex(
AMQPInvalidArgument,
'virtual_host should be a string',
Connection, '127.0.0.1', 'guest', 'guest', virtual_host=4,
lazy=True
)
self.assertRaisesRegex(
AMQPInvalidArgument,
'virtual_host should be a string',
Connection, '127.0.0.1', 'guest', 'guest', virtual_host=None,
lazy=True
)
def test_connection_invalid_port(self):
self.assertRaisesRegex(
AMQPInvalidArgument,
'port should be an integer',
Connection, '127.0.0.1', 'guest', 'guest', port='', lazy=True
)
self.assertRaisesRegex(
AMQPInvalidArgument,
'port should be an integer',
Connection, '127.0.0.1', 'guest', 'guest', port=None, lazy=True
)
def test_connection_invalid_heartbeat(self):
self.assertRaisesRegex(
AMQPInvalidArgument,
'heartbeat should be an integer',
Connection, '127.0.0.1', 'guest', 'guest', heartbeat='5',
lazy=True
)
self.assertRaisesRegex(
AMQPInvalidArgument,
'heartbeat should be an integer',
Connection, '127.0.0.1', 'guest', 'guest', heartbeat=None,
lazy=True
)
def test_connection_invalid_timeout(self):
self.assertRaisesRegex(
AMQPInvalidArgument,
'timeout should be an integer or float',
Connection, '127.0.0.1', 'guest', 'guest', timeout='6', lazy=True
)
self.assertRaisesRegex(
AMQPInvalidArgument,
'timeout should be an integer or float',
Connection, '127.0.0.1', 'guest', 'guest', timeout=None, lazy=True
)
def test_connection_invalid_timeout_on_channel(self):
connection = Connection(
'127.0.0.1', 'guest', 'guest', timeout=0.1,
lazy=True
)
self.assertRaisesRegex(
AMQPInvalidArgument,
'rpc_timeout should be an integer',
connection.channel, None
)
| eandersson/amqpstorm | amqpstorm/tests/unit/connection/test_connection_exception.py | Python | mit | 4,722 |
#!/usr/bin/env python3
"""Get a TSV of (insertion size, length, seq, genome) sampled from a HAL
file."""
from argparse import ArgumentParser
from sonLib.bioio import popenCatch, system, getTempFile, fastaRead
from jobTree.scriptTree.stack import Stack
from jobTree.scriptTree.target import Target
def countMaskedBases(string):
ret = 0
for char in string:
if char != 'A' and char != 'C' and char != 'T' and char != 'G':
ret += 1
return ret
class Setup(Target):
def __init__(self, opts):
Target.__init__(self)
self.opts = opts
def run(self):
genomes = popenCatch("halStats --genomes %s" % self.opts.halPath).split()
# main outputs, entirely inserted sequence outputs, total inserted bases outputs
outputss = [[], [], []]
for genome in genomes:
# Get a temp file to hold the genome's output, which will
# be concatenated with the others at the end
tempOutput = getTempFile(rootDir=self.getGlobalTempDir())
outputss[0].append(tempOutput)
# Create a temp file to hold entirely inserted seqs, if needed
tempEntirelyInsertedSequencesPath = None
if self.opts.entirelyInsertedSequencesPath is not None:
tempEntirelyInsertedSequencesPath = getTempFile(rootDir=self.getGlobalTempDir())
outputss[1].append(tempEntirelyInsertedSequencesPath)
# Create a temp file to hold total inserted bases, if needed
tempTotalInsertedBasesPath = None
if self.opts.totalInsertedBasesPath is not None:
tempTotalInsertedBasesPath = getTempFile(rootDir=self.getGlobalTempDir())
outputss[2].append(tempTotalInsertedBasesPath)
self.addChildTarget(ExtractInsertions(self.opts.halPath, genome, tempOutput, self.opts.samplePerGenome, self.opts.samples, self.opts.noGaps, tempEntirelyInsertedSequencesPath, tempTotalInsertedBasesPath))
self.setFollowOnTarget(ReduceOutputs(outputss, [self.opts.output, self.opts.entirelyInsertedSequencesPath, self.opts.totalInsertedBasesPath], [not self.opts.samplePerGenome, False, False], [self.opts.samples, None, None], ['insertionSize\tgenome\tseq\tmaskedBases', 'insertionSize\tgenome\tseq\tmaskedBases', 'genome\ttotalInsertedBases']))
class ExtractInsertions(Target):
def __init__(self, halPath, genome, output, doSampling, numSamples, removeGaps, entirelyInsertedSequencePath, totalInsertedBasesPath):
Target.__init__(self)
self.halPath = halPath
self.genome = genome
self.output = output
self.doSampling = doSampling
self.numSamples = numSamples
self.removeGaps = removeGaps
self.entirelyInsertedSequencePath = entirelyInsertedSequencePath
self.totalInsertedBasesPath = totalInsertedBasesPath
def getFastaDict(self):
temp = getTempFile(rootDir=self.getGlobalTempDir())
system("hal2fasta %s %s > %s" % (self.halPath, self.genome, temp))
ret = {}
for header, seq in fastaRead(temp):
ret[header] = seq
return ret
def logEntirelyInsertedSequences(self, fastaDict, chromSizes, insertionBed):
outFile = open(self.entirelyInsertedSequencePath, 'w')
for line in open(insertionBed):
fields = line.split()
if len(fields) >= 3:
seq = fields[0]
start = int(fields[1])
end = int(fields[2])
if end - start == chromSizes[seq]:
dna = fastaDict[seq][start:end]
maskedBases = countMaskedBases(dna)
outFile.write("%d\t%s\t%s\t%s\n" % (end - start, self.genome, seq, maskedBases))
def logTotalInsertedBases(self, insertionBed):
outFile = open(self.totalInsertedBasesPath, 'w')
total = 0
for line in open(insertionBed):
fields = line.split()
if len(fields) >= 3:
total += int(fields[2]) - int(fields[1])
outFile.write('%s\t%d\n' % (self.genome, total))
def run(self):
fastaDict = self.getFastaDict()
chromSizes = dict([(x[0], len(x[1])) for x in list(fastaDict.items())])
insertionBed = getTempFile(rootDir=self.getGlobalTempDir())
system("halAlignedExtract --complement %s %s > %s" % (self.halPath, self.genome, insertionBed))
if self.entirelyInsertedSequencePath is not None:
# Look for insertions that cover an entire sequence
self.logEntirelyInsertedSequences(fastaDict, chromSizes, insertionBed)
if self.totalInsertedBasesPath is not None:
# Output the total number of inserted bases in this genome
self.logTotalInsertedBases(insertionBed)
if self.doSampling:
# Sample per-genome instead of overall
temp = getTempFile(rootDir=self.getGlobalTempDir())
system("shuf %s | head -n %d > %s" % (insertionBed, self.numSamples, temp))
system("mv %s %s" % (temp, insertionBed))
outFile = open(self.output, 'w')
for line in open(insertionBed):
fields = line.split()
if len(fields) >= 3:
seq = fields[0]
start = int(fields[1])
end = int(fields[2])
dna = fastaDict[seq][start:end]
if self.removeGaps:
# Get rid of gaps.
if 'N' in dna or 'n' in dna:
# Found a gap
continue
maskedBases = countMaskedBases(dna)
outFile.write("%d\t%s\t%s\t%s\n" % (end - start, self.genome, seq, maskedBases))
outFile.close()
class ReduceOutputs(Target):
def __init__(self, outputss, outPaths, doSamplings, numSampless, headers):
Target.__init__(self)
self.outputss = outputss
self.outPaths = outPaths
self.doSamplings = doSamplings
self.numSampless = numSampless
self.headers = headers
def run(self):
for outputs, outPath, doSampling, numSamples, header in zip(self.outputss, self.outPaths, self.doSamplings, self.numSampless, self.headers):
if outPath is None:
# No output for this file
continue
outFile = open(outPath, 'w')
if header is not None:
outFile.write('%s\n' % (header))
for output in outputs:
for line in open(output):
outFile.write(line)
if doSampling:
# Sample overall instead of per-genome
temp = getTempFile(rootDir=self.getGlobalTempDir())
system("shuf %s | head -n %d > %s" % (outPath, numSamples, temp))
system("mv %s %s" % (temp, outPath))
if __name__ == '__main__':
from getInsertionStats import * # required for jobTree
parser = ArgumentParser(description=__doc__)
parser.add_argument('halPath', help='hal file')
parser.add_argument('output', help='output tsv for a sample of insertions')
parser.add_argument('--samples', help='number of samples', default=10000, type=int)
parser.add_argument('--samplePerGenome', action="store_true", help="sample n samples per genome instead of n samples overall")
parser.add_argument('--noGaps', action="store_true", help="remove any sequences with Ns in them")
parser.add_argument('--entirelyInsertedSequencesPath', help="tsv to store information about any sequences that are completely unaligned")
parser.add_argument('--totalInsertedBasesPath', help="tsv to store total inserted bases per-genome")
Stack.addJobTreeOptions(parser)
opts = parser.parse_args()
Stack(Setup(opts)).startJobTree(opts)
| glennhickey/hal | extra/insertionStats/getInsertionStats.py | Python | mit | 7,825 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet.green import httplib
from lxml import etree
import mox
from oslo.config import cfg
import webob
import webob.dec
import webob.exc
from nova.api import ec2
from nova import context
from nova import exception
from nova.openstack.common import timeutils
from nova import test
from nova import wsgi
CONF = cfg.CONF
@webob.dec.wsgify
def conditional_forbid(req):
"""Helper wsgi app returns 403 if param 'die' is 1."""
if 'die' in req.params and req.params['die'] == '1':
raise webob.exc.HTTPForbidden()
return 'OK'
class LockoutTestCase(test.NoDBTestCase):
"""Test case for the Lockout middleware."""
def setUp(self): # pylint: disable=C0103
super(LockoutTestCase, self).setUp()
timeutils.set_time_override()
self.lockout = ec2.Lockout(conditional_forbid)
def tearDown(self): # pylint: disable=C0103
timeutils.clear_time_override()
super(LockoutTestCase, self).tearDown()
def _send_bad_attempts(self, access_key, num_attempts=1):
"""Fail x."""
for i in xrange(num_attempts):
req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key)
self.assertEqual(req.get_response(self.lockout).status_int, 403)
def _is_locked_out(self, access_key):
"""Sends a test request to see if key is locked out."""
req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key)
return (req.get_response(self.lockout).status_int == 403)
def test_lockout(self):
self._send_bad_attempts('test', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test'))
def test_timeout(self):
self._send_bad_attempts('test', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test'))
timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test'))
def test_multiple_keys(self):
self._send_bad_attempts('test1', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
def test_window_timeout(self):
self._send_bad_attempts('test', CONF.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
timeutils.advance_time_seconds(CONF.lockout_window * 60)
self._send_bad_attempts('test', CONF.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
class ExecutorTestCase(test.NoDBTestCase):
def setUp(self):
super(ExecutorTestCase, self).setUp()
self.executor = ec2.Executor()
def _execute(self, invoke):
class Fake(object):
pass
fake_ec2_request = Fake()
fake_ec2_request.invoke = invoke
fake_wsgi_request = Fake()
fake_wsgi_request.environ = {
'nova.context': context.get_admin_context(),
'ec2.request': fake_ec2_request,
}
return self.executor(fake_wsgi_request)
def _extract_message(self, result):
tree = etree.fromstring(result.body)
return tree.findall('./Errors')[0].find('Error/Message').text
def _extract_code(self, result):
tree = etree.fromstring(result.body)
return tree.findall('./Errors')[0].find('Error/Code').text
def test_instance_not_found(self):
def not_found(context):
raise exception.InstanceNotFound(instance_id=5)
result = self._execute(not_found)
self.assertIn('i-00000005', self._extract_message(result))
self.assertEqual('InvalidInstanceID.NotFound',
self._extract_code(result))
def test_instance_not_found_none(self):
def not_found(context):
raise exception.InstanceNotFound(instance_id=None)
# NOTE(mikal): we want no exception to be raised here, which was what
# was happening in bug/1080406
result = self._execute(not_found)
self.assertIn('None', self._extract_message(result))
self.assertEqual('InvalidInstanceID.NotFound',
self._extract_code(result))
def test_snapshot_not_found(self):
def not_found(context):
raise exception.SnapshotNotFound(snapshot_id=5)
result = self._execute(not_found)
self.assertIn('snap-00000005', self._extract_message(result))
self.assertEqual('InvalidSnapshot.NotFound',
self._extract_code(result))
def test_volume_not_found(self):
def not_found(context):
raise exception.VolumeNotFound(volume_id=5)
result = self._execute(not_found)
self.assertIn('vol-00000005', self._extract_message(result))
self.assertEqual('InvalidVolume.NotFound', self._extract_code(result))
class FakeResponse(object):
reason = "Test Reason"
def __init__(self, status=400):
self.status = status
def read(self):
return '{}'
class KeystoneAuthTestCase(test.NoDBTestCase):
def setUp(self):
super(KeystoneAuthTestCase, self).setUp()
self.kauth = ec2.EC2KeystoneAuth(conditional_forbid)
def _validate_ec2_error(self, response, http_status, ec2_code):
self.assertEqual(response.status_code, http_status,
'Expected HTTP status %s' % http_status)
root_e = etree.XML(response.body)
self.assertEqual(root_e.tag, 'Response',
"Top element must be Response.")
errors_e = root_e.find('Errors')
error_e = errors_e[0]
code_e = error_e.find('Code')
self.assertIsNotNone(code_e, "Code element must be present.")
self.assertEqual(code_e.text, ec2_code)
def test_no_signature(self):
req = wsgi.Request.blank('/test')
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
def test_no_key_id(self):
req = wsgi.Request.blank('/test')
req.GET['Signature'] = 'test-signature'
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
def test_communication_failure(self):
req = wsgi.Request.blank('/test')
req.GET['Signature'] = 'test-signature'
req.GET['AWSAccessKeyId'] = 'test-key-id'
conn = httplib.HTTPConnection('/mock')
self.mox.StubOutWithMock(httplib.HTTPConnection, 'request')
self.mox.StubOutWithMock(httplib.HTTPConnection, 'getresponse')
conn.request('POST', mox.IgnoreArg(), body=mox.IgnoreArg(),
headers=mox.IgnoreArg())
resp = FakeResponse()
conn.getresponse().AndReturn(resp)
self.mox.ReplayAll()
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
def test_no_result_data(self):
req = wsgi.Request.blank('/test')
req.GET['Signature'] = 'test-signature'
req.GET['AWSAccessKeyId'] = 'test-key-id'
conn = httplib.HTTPConnection('/mock')
self.mox.StubOutWithMock(httplib.HTTPConnection, 'request')
self.mox.StubOutWithMock(httplib.HTTPConnection, 'getresponse')
self.mox.StubOutWithMock(httplib.HTTPConnection, 'close')
conn.request('POST', mox.IgnoreArg(), body=mox.IgnoreArg(),
headers=mox.IgnoreArg())
resp = FakeResponse(200)
conn.getresponse().AndReturn(resp)
conn.close()
self.mox.ReplayAll()
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
| berrange/nova | nova/tests/api/ec2/test_middleware.py | Python | apache-2.0 | 8,424 |
from enum import IntEnum
class Commands:
WALK_FORWARD = 1
DO_PUSHUPS = 2
STAND_UP = 3
STOP = 5
HOME = 6
READY = 10
DONE = 19
SUCCESS = 20
FAILURE = 21 | bobbyluig/Eclipse | src/cerebral/pack2/commands.py | Python | mit | 191 |
# -*- coding: utf-8 -*-
'''Statystyka liter
'''
# http://www.prezydent.pl/prezydent/priorytety#nowoczesny_patriotyzm
TEXT = '''
W opinii Prezydenta RP patriotyzm – pozostając miłością Ojczyzny i troską o jej bezpieczeństwo – przejawia się w budowaniu nowoczesnego państwa, w działaniach obywatelskich na rzecz regionu oraz miejsca, w którym mieszkamy i pracujemy. Osiągnięcia 25-lat wolności kształtują poczucie dumy z sukcesu Polski. Dla Bronisława Komorowskiego szczególne znaczenie mają zaplanowane na cały 2014 rok inicjatywy, które będą przypominały światu, że przemiany demokratyczne w Europie Środkowo-Wschodniej i upadek komunizmu miały początek właśnie w Polsce.
'''
# http://budapest.hu/Lapok/default.aspx
#TEXT = '''
#A városvezetés kész tervekkel rendelkezik a ciklusra; elfogadták Budapest új városfejlesztési koncepcióját, a területfejlesztési koncepciót, a kerületekkel közösen megalkották a tematikus fejlesztési programokat, elkészült az új, integrált településfejlesztési stratégia, elfogadták a közlekedésfejlesztésről szóló Balázs Mór-terv egyeztetési változatát, a Duna-menti területek fejlesztési tanulmánytervét, valamint szintén elkészült a Margitsziget fejlesztési koncepciója.
#'''
# http://www.insse.ro/cms/ro/content/prezentare-generala
#TEXT = '''
#Statistica oficiala în România se desfasoara prin serviciile de statistica oficiala si este organizata si coordonata de Institutul National de Statistica, organ de specialitate al administratiei publice centrale, cu personalitate juridica, în subordinea Guvernului, finantat de la bugetul de stat.
#'''
def main():
pass
if __name__ == '__main__':
main()
# vim: ts=4:sw=4:et:fdm=indent:ff=unix
| CodeCarrots/warsztaty | sesja03/letters_stat.py | Python | cc0-1.0 | 1,796 |
# -*- coding: utf-8 -*-
import requests
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework.auth import Auth
from website import util
from website import settings
from website.project import new_node
from website.models import Node, MailRecord
def record_message(message, nodes_created, users_created):
record = MailRecord.objects.create(
data=message.raw,
)
record.users_created.add(*users_created),
record.nodes_created.add(*nodes_created)
record.save()
def get_or_create_node(title, user):
"""Get or create node by title and creating user.
:param str title: Node title
:param User user: User creating node
:return: Tuple of (node, created)
"""
try:
node = Node.find_one(
Q('title', 'iexact', title)
& Q('contributors', 'eq', user)
)
return node, False
except ModularOdmException:
node = new_node('project', title, user)
return node, True
def provision_node(conference, message, node, user):
"""
:param Conference conference:
:param ConferenceMessage message:
:param Node node:
:param User user:
"""
auth = Auth(user=user)
node.update_node_wiki('home', message.text, auth)
if conference.admins.exists():
node.add_contributors(prepare_contributors(conference.admins.all()), log=False)
if not message.is_spam and conference.public_projects:
node.set_privacy('public', meeting_creation=True, auth=auth)
node.add_tag(message.conference_name, auth=auth)
node.add_tag(message.conference_category, auth=auth)
for systag in ['emailed', message.conference_name, message.conference_category]:
node.add_system_tag(systag, save=False)
if message.is_spam:
node.add_system_tag('spam', save=False)
node.save()
def prepare_contributors(admins):
return [
{
'user': admin,
'permissions': ['read', 'write', 'admin'],
'visible': False,
}
for admin in admins
]
def upload_attachment(user, node, attachment):
attachment.seek(0)
name = '/' + (attachment.filename or settings.MISSING_FILE_NAME)
content = attachment.read()
upload_url = util.waterbutler_url_for('upload', 'osfstorage', name, node, user=user, _internal=True)
requests.put(
upload_url,
data=content,
)
def upload_attachments(user, node, attachments):
for attachment in attachments:
upload_attachment(user, node, attachment)
| monikagrabowska/osf.io | website/conferences/utils.py | Python | apache-2.0 | 2,561 |
import gdb
import bp
from objdumpfile import ObjDumpFile
import subprocess
class MEMMAPIO:
cycles, cyclesMSB, wasteCycles, wasteCyclesH, \
cyclesSinceReset, cyclesSinceCP, addrOfCP, addrOfRestoreCP, \
resetAfterCycles, do_reset, do_logging, wdt_seed, \
wdt_val, md5_0, md5_1, md5_2, md5_3, md5_4 = range(0x80000000,0x80000000+4*18,4)
def get_pc():
return gdb.parse_and_eval("$pc")
def cpu_reset():
"""Performs a hard reset on the processor"""
gdb.execute("set *{}=1".format(hex(MEMMAPIO.do_reset)));
def cycle_reset():
"""Sets the cyclesSinceReset to 0"""
gdb.execute("set *{}=0".format(hex(MEMMAPIO.cyclesSinceReset)));
def readword(addr):
"""Returns a word from a given memory address"""
output = gdb.execute("x/wx {}".format(addr), False, True)
return int(output.split()[1],16)
def readgword(addr):
"""Returns 2 words from a given memory address"""
output = gdb.execute("x/gwx {}".format(addr), False, True)
return int(output.split()[1],16)
def writeword(addr,val):
gdb.execute("set *{}={}".format(addr, val))
def logging(on):
if on:
writeword(MEMMAPIO.do_logging, 1)
else:
writeword(MEMMAPIO.do_logging, 0)
def start_sim(path, binfile, outf):
return subprocess.Popen([path, "-g", binfile], stdout=outf, stderr=outf)
def exit_handler(exit_event):
print 'exit_handler'
#print "exitcode: {}".format(exit_event.exit_code)
#gdb.execute("quit")
def setup(fname):
"""
Connects to thumbulator, registers our breakpoint handler, inserts our exit breakpoint
"""
cmd = 'file {}'.format(fname)
gdb.execute(cmd)
gdb.execute('target remote :272727')
#gdb.execute("set confirm off")
gdb.execute("set pagination off")
# register breakpoint handler
gdb.events.stop.connect(bp.stop_handler)
gdb.events.exited.connect(exit_handler)
#gdb.events.exited.connect(bp.stop_handler)
#dumpf = "".join(fname.split('.')[:-1])
#obj = ObjDumpFile("{}.lst".format(dumpf))
#addr = obj.get_addresses()
#return bp.HashBP("*{}".format(addr['exit']), True)
def cont():
gdb.execute("c")
def get_hash():
gdb.execute("set *{}=1".format(hex(MEMMAPIO.md5_0)))
hi = gdb.execute("x/xg {}".format(MEMMAPIO.md5_1),False, True).split()[1]
lo = gdb.execute("x/xg {}".format(MEMMAPIO.md5_3),False, True).split()[1]
return hi + lo[2:]
def cycles_since_fail():
return readword(MEMMAPIO.cyclesSinceReset)
def total_cycles():
return readgword(MEMMAPIO.cycles)
| impedimentToProgress/Ratchet | thumbulator/bareBench/python/commands.py | Python | mit | 2,442 |
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^auth/signin/$', 'auth.views.signin', name='auth.signin'),
url(r'^auth/signup/$', 'auth.views.signup', name='auth.signup'),
url(r'^auth/signout/$', 'auth.views.signout', name='auth.signout'),
url(r'^auth/forgot/initialized/$', 'auth.views.forgot_initialized', name='auth.forgot_initialized'),
url(r'^auth/forgot/$', 'auth.views.forgot', name='auth.forgot'),
url(r'^auth/', include('django.contrib.auth.urls')),
url(r'^auth/$', 'auth.views.auth', name="auth"),
url('', include('social.apps.django_app.urls', namespace='social')), # social login
url('', include('django.contrib.auth.urls', namespace='auth')),
) | rajeshvaya/aftore.com | src/aftore/auth/urls.py | Python | mit | 731 |
import unittest
import itertools
import pickle
import rpy2.rinterface as rinterface
import sys, os, subprocess, time, tempfile, signal
rinterface.initr()
def onlyAQUAorWindows(function):
def res(self):
platform = rinterface.baseenv.get('.Platform')
platform_gui = [e for i, e in enumerate(platform.do_slot('names')) if e == 'GUI'][0]
platform_ostype = [e for i, e in enumerate(platform.do_slot('names')) if e == 'OS.type'][0]
if (platform_gui != 'AQUA') and (platform_ostype != 'windows'):
self.assertTrue(False) # cannot be tested outside GUI==AQUA or OS.type==windows
return None
else:
return function(self)
class CustomException(Exception):
pass
class EmbeddedRTestCase(unittest.TestCase):
def testConsolePrint(self):
tmp_file = tempfile.NamedTemporaryFile()
stdout = sys.stdout
sys.stdout = tmp_file
try:
rinterface.consolePrint('haha')
except Exception, e:
sys.stdout = stdout
raise e
sys.stdout = stdout
tmp_file.flush()
tmp_file.seek(0)
self.assertEquals('haha', ''.join(tmp_file.readlines()))
tmp_file.close()
def testCallErrorWhenEndedR(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
self.assertTrue(False) # cannot be tested with Python < 2.6
return None
import multiprocessing
def foo(queue):
import rpy2.rinterface as rinterface
rdate = rinterface.baseenv['date']
rinterface.endr(1)
try:
tmp = rdate()
res = (False, None)
except RuntimeError, re:
res = (True, re)
except Exception, e:
res = (False, e)
queue.put(res)
q = multiprocessing.Queue()
p = multiprocessing.Process(target = foo, args = (q,))
p.start()
res = q.get()
p.join()
self.assertTrue(res[0])
def testStr_typeint(self):
t = rinterface.baseenv['letters']
self.assertEquals('STRSXP', rinterface.str_typeint(t.typeof))
t = rinterface.baseenv['pi']
self.assertEquals('REALSXP', rinterface.str_typeint(t.typeof))
def testStr_typeint_invalid(self):
self.assertRaises(LookupError, rinterface.str_typeint, 99)
def testGet_initoptions(self):
options = rinterface.get_initoptions()
self.assertEquals(len(rinterface.initoptions),
len(options))
for o1, o2 in itertools.izip(rinterface.initoptions, options):
self.assertEquals(o1, o2)
def testSet_initoptions(self):
self.assertRaises(RuntimeError, rinterface.set_initoptions,
('aa', '--verbose', '--no-save'))
def testInterruptR(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
self.assertTrue(False) # Test unit currently requires Python >= 2.6
rpy_code = tempfile.NamedTemporaryFile(mode = 'w', suffix = '.py',
delete = False)
rpy_code_str = """
import sys
import rpy2.robjects as ro
def f(x):
pass
ro.rinterface.set_writeconsole(f)
rcode = "i <- 0; "
rcode += "while(TRUE) { "
rcode += "i <- i+1; "
rcode += "Sys.sleep(0.01); "
rcode += "}"
try:
ro.r(rcode)
except Exception, e:
sys.exit(0)
"""
rpy_code.write(rpy_code_str)
rpy_code.close()
child_proc = subprocess.Popen(('python', rpy_code.name))
time.sleep(1) # required for the SIGINT to function
# (appears like a bug w/ subprocess)
# (the exact sleep time migth be machine dependent :( )
child_proc.send_signal(signal.SIGINT)
time.sleep(1) # required for the SIGINT to function
ret_code = child_proc.poll()
self.assertFalse(ret_code is None) # Interruption failed
class CallbacksTestCase(unittest.TestCase):
def tearDown(self):
rinterface.set_writeconsole(rinterface.consolePrint)
rinterface.set_readconsole(rinterface.consoleRead)
rinterface.set_readconsole(rinterface.consoleFlush)
rinterface.set_choosefile(rinterface.chooseFile)
def testSetWriteConsole(self):
buf = []
def f(x):
buf.append(x)
rinterface.set_writeconsole(f)
self.assertEquals(rinterface.get_writeconsole(), f)
code = rinterface.SexpVector(["3", ], rinterface.STRSXP)
rinterface.baseenv["print"](code)
self.assertEquals('[1] "3"\n', str.join('', buf))
def testWriteConsoleWithError(self):
def f(x):
raise CustomException("Doesn't work.")
rinterface.set_writeconsole(f)
tmp_file = tempfile.NamedTemporaryFile()
stderr = sys.stderr
sys.stderr = tmp_file
try:
code = rinterface.SexpVector(["3", ], rinterface.STRSXP)
rinterface.baseenv["print"](code)
except Exception, e:
sys.stderr = stderr
raise e
sys.stderr = stderr
tmp_file.flush()
tmp_file.seek(0)
errorstring = ''.join(tmp_file.readlines())
self.assertTrue(errorstring.startswith('Traceback'))
tmp_file.close()
@onlyAQUAorWindows
def testSetFlushConsole(self):
flush = {'count': 0}
def f():
flush['count'] = flush['count'] + 1
rinterface.set_flushconsole(f)
self.assertEquals(rinterface.get_flushconsole(), f)
rinterface.baseenv.get("flush.console")()
self.assertEquals(1, flush['count'])
rinterface.set_writeconsole(rinterface.consoleFlush)
@onlyAQUAorWindows
def testFlushConsoleWithError(self):
def f(prompt):
raise Exception("Doesn't work.")
rinterface.set_flushconsole(f)
tmp_file = tempfile.NamedTemporaryFile()
stderr = sys.stderr
sys.stderr = tmp_file
try:
res = rinterface.baseenv.get("flush.console")()
except Exception, e:
sys.stderr = stderr
raise e
sys.stderr = stderr
tmp_file.flush()
tmp_file.seek(0)
errorstring = ''.join(tmp_file.readlines())
self.assertTrue(errorstring.startswith('Traceback'))
tmp_file.close()
def testSetReadConsole(self):
yes = "yes\n"
def sayyes(prompt):
return yes
rinterface.set_readconsole(sayyes)
self.assertEquals(rinterface.get_readconsole(), sayyes)
res = rinterface.baseenv["readline"]()
self.assertEquals(yes.strip(), res[0])
rinterface.set_readconsole(rinterface.consoleRead)
def testReadConsoleWithError(self):
def f(prompt):
raise Exception("Doesn't work.")
rinterface.set_readconsole(f)
tmp_file = tempfile.NamedTemporaryFile()
stderr = sys.stderr
sys.stderr = tmp_file
try:
res = rinterface.baseenv["readline"]()
except Exception, e:
sys.stderr = stderr
raise e
sys.stderr = stderr
tmp_file.flush()
tmp_file.seek(0)
errorstring = ''.join(tmp_file.readlines())
self.assertTrue(errorstring.startswith('Traceback'))
tmp_file.close()
def testSetShowMessage(self):
def f(message):
return "foo"
rinterface.set_showmessage(f)
#FIXME: incomplete test
def testShowMessageWithError(self):
def f(prompt):
raise Exception("Doesn't work.")
rinterface.set_showmessage(f)
#FIXME: incomplete test
def testSetChooseFile(self):
me = "me"
def chooseMe(prompt):
return me
rinterface.set_choosefile(chooseMe)
self.assertEquals(rinterface.get_choosefile(), chooseMe)
res = rinterface.baseenv["file.choose"]()
self.assertEquals(me, res[0])
rinterface.set_choosefile(rinterface.chooseFile)
def testChooseFileWithError(self):
def noconsole(x):
pass
rinterface.set_writeconsole(noconsole) # reverted by the tearDown method
def f(prompt):
raise Exception("Doesn't work.")
rinterface.set_choosefile(f)
tmp_file = tempfile.NamedTemporaryFile()
stderr = sys.stderr
sys.stderr = tmp_file
try:
res = rinterface.baseenv["file.choose"]()
except rinterface.RRuntimeError, rre:
pass
except Exception, e:
sys.stderr = stderr
raise e
sys.stderr = stderr
tmp_file.flush()
tmp_file.seek(0)
errorstring = ''.join(tmp_file.readlines())
self.assertTrue(errorstring.startswith('Traceback'))
tmp_file.close()
def testSetShowFiles(self):
sf = []
def f(fileheaders, wtitle, fdel, pager):
sf.append(wtitle)
for tf in fileheaders:
sf.append(tf)
rinterface.set_showfiles(f)
file_path = rinterface.baseenv["file.path"]
r_home = rinterface.baseenv["R.home"]
filename = file_path(r_home(rinterface.StrSexpVector(("doc", ))),
rinterface.StrSexpVector(("COPYRIGHTS", )))
res = rinterface.baseenv["file.show"](filename)
self.assertEquals(filename[0], sf[1][1])
self.assertEquals('R Information', sf[0])
def testShowFilesWithError(self):
def f(fileheaders, wtitle, fdel, pager):
raise Exception("Doesn't work")
rinterface.set_showfiles(f)
file_path = rinterface.baseenv["file.path"]
r_home = rinterface.baseenv["R.home"]
filename = file_path(r_home(rinterface.StrSexpVector(("doc", ))),
rinterface.StrSexpVector(("COPYRIGHTS", )))
tmp_file = tempfile.NamedTemporaryFile()
stderr = sys.stderr
sys.stderr = tmp_file
try:
res = rinterface.baseenv["file.show"](filename)
except rinterface.RRuntimeError, rre:
pass
except Exception, e:
sys.stderr = stderr
raise e
sys.stderr = stderr
tmp_file.flush()
tmp_file.seek(0)
errorstring = ''.join(tmp_file.readlines())
self.assertTrue(errorstring.startswith('Traceback'))
tmp_file.close()
def testSetCleanUp(self):
orig_cleanup = rinterface.get_cleanup()
def f(saveact, status, runlast):
return False
rinterface.set_cleanup(f)
rinterface.set_cleanup(orig_cleanup)
def testCleanUp(self):
orig_cleanup = rinterface.get_cleanup()
def f(saveact, status, runlast):
return None
r_quit = rinterface.baseenv['q']
rinterface.set_cleanup(f)
self.assertRaises(rinterface.RRuntimeError, r_quit)
rinterface.set_cleanup(orig_cleanup)
class ObjectDispatchTestCase(unittest.TestCase):
def testObjectDispatchLang(self):
formula = rinterface.globalenv.get('formula')
obj = formula(rinterface.StrSexpVector(['y ~ x', ]))
self.assertTrue(isinstance(obj, rinterface.SexpVector))
self.assertEquals(rinterface.LANGSXP, obj.typeof)
def testObjectDispatchVector(self):
letters = rinterface.globalenv.get('letters')
self.assertTrue(isinstance(letters, rinterface.SexpVector))
def testObjectDispatchClosure(self):
#import pdb; pdb.set_trace()
help = rinterface.globalenv.get('sum')
self.assertTrue(isinstance(help, rinterface.SexpClosure))
def testObjectDispatchRawVector(self):
raw = rinterface.baseenv.get('raw')
rawvec = raw(rinterface.IntSexpVector((10, )))
self.assertEquals(rinterface.RAWSXP, rawvec.typeof)
class SerializeTestCase(unittest.TestCase):
def testUnserialize(self):
x = rinterface.IntSexpVector([1,2,3])
x_serialized = x.__getstate__()
x_again = rinterface.unserialize(x_serialized, x.typeof)
identical = rinterface.baseenv["identical"]
self.assertFalse(x.rsame(x_again))
self.assertTrue(identical(x, x_again)[0])
def testPickle(self):
x = rinterface.IntSexpVector([1,2,3])
f = tempfile.NamedTemporaryFile()
pickle.dump(x, f)
f.flush()
f.seek(0)
x_again = pickle.load(f)
f.close()
identical = rinterface.baseenv["identical"]
self.assertTrue(identical(x, x_again)[0])
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(EmbeddedRTestCase)
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(CallbacksTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ObjectDispatchTestCase))
return suite
if __name__ == '__main__':
tr = unittest.TextTestRunner(verbosity = 2)
tr.run(suite())
| lbouma/Cyclopath | pyserver/bin/rpy2/rinterface/tests/test_EmbeddedR.py | Python | apache-2.0 | 12,996 |
# Copyright (c) 2011 Jeff Garzik
#
# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
#
# Copyright (c) 2007 Jan-Klaas Kollhof
#
# This file is part of jsonrpc.
#
# jsonrpc is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""HTTP proxy for opening RPC connection to bitcoind.
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
"""
import base64
import decimal
from http import HTTPStatus
import http.client
import json
import logging
import os
import socket
import time
import urllib.parse
HTTP_TIMEOUT = 30
USER_AGENT = "AuthServiceProxy/0.1"
log = logging.getLogger("VCoreRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error, http_status=None):
try:
errmsg = '%(message)s (%(code)i)' % rpc_error
except (KeyError, TypeError):
errmsg = ''
super().__init__(errmsg)
self.error = rpc_error
self.http_status = http_status
def EncodeDecimal(o):
if isinstance(o, decimal.Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
class AuthServiceProxy():
__id_count = 0
# ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
self.__service_url = service_url
self._service_name = service_name
self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
self.__url = urllib.parse.urlparse(service_url)
user = None if self.__url.username is None else self.__url.username.encode('utf8')
passwd = None if self.__url.password is None else self.__url.password.encode('utf8')
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
self.timeout = timeout
self._set_conn(connection)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self._service_name is not None:
name = "%s.%s" % (self._service_name, name)
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
def _request(self, method, path, postdata):
'''
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
'''
headers = {'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'}
if os.name == 'nt':
# Windows somehow does not like to re-use connections
# TODO: Find out why the connection would disconnect occasionally and make it reusable on Windows
self._set_conn()
try:
self.__conn.request(method, path, postdata, headers)
return self._get_response()
except http.client.BadStatusLine as e:
if e.line == "''": # if connection was closed, try again
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
else:
raise
except (BrokenPipeError, ConnectionResetError):
# Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset
# ConnectionResetError happens on FreeBSD with Python 3.4
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
def get_request(self, *args, **argsn):
AuthServiceProxy.__id_count += 1
log.debug("-{}-> {} {}".format(
AuthServiceProxy.__id_count,
self._service_name,
json.dumps(args or argsn, default=EncodeDecimal, ensure_ascii=self.ensure_ascii),
))
if args and argsn:
raise ValueError('Cannot handle both named and positional arguments')
return {'version': '1.1',
'method': self._service_name,
'params': args or argsn,
'id': AuthServiceProxy.__id_count}
def __call__(self, *args, **argsn):
postdata = json.dumps(self.get_request(*args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
response, status = self._request('POST', self.__url.path, postdata.encode('utf-8'))
if response['error'] is not None:
raise JSONRPCException(response['error'], status)
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'}, status)
elif status != HTTPStatus.OK:
raise JSONRPCException({
'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status)
else:
return response['result']
def batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
log.debug("--> " + postdata)
response, status = self._request('POST', self.__url.path, postdata.encode('utf-8'))
if status != HTTPStatus.OK:
raise JSONRPCException({
'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status)
return response
def _get_response(self):
req_start_time = time.time()
try:
http_response = self.__conn.getresponse()
except socket.timeout:
raise JSONRPCException({
'code': -344,
'message': '%r RPC took longer than %f seconds. Consider '
'using larger timeout for calls that take '
'longer to return.' % (self._service_name,
self.__conn.timeout)})
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
content_type = http_response.getheader('Content-Type')
if content_type != 'application/json':
raise JSONRPCException(
{'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)},
http_response.status)
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
elapsed = time.time() - req_start_time
if "error" in response and response["error"] is None:
log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
else:
log.debug("<-- [%.6f] %s" % (elapsed, responsedata))
return response, http_response.status
def __truediv__(self, relative_uri):
return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn)
def _set_conn(self, connection=None):
port = 80 if self.__url.port is None else self.__url.port
if connection:
self.__conn = connection
self.timeout = connection.timeout
elif self.__url.scheme == 'https':
self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=self.timeout)
else:
self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=self.timeout)
| gjhiggins/vcoincore | test/functional/test_framework/authproxy.py | Python | mit | 8,665 |
from openerp import models, fields, api
from openerp.tools.translate import _
import logging
#from fingerprint import Fingerprint
from dateutil import relativedelta
from datetime import datetime as dt
from dateutil import parser
import xlsxwriter
import StringIO
from io import BytesIO
import base64
import hashlib
import xmltodict
from math import modf
from lxml import etree
#from xmljson import badgerfish as bf
from xml.etree.ElementTree import fromstring
from json import dumps
import pywaves as pw
import requests
import base58
import rethinkdb as r
from subprocess import call
import os
import ast
import json
from openerp.exceptions import UserError
from web3 import Web3, HTTPProvider, IPCProvider
import hashlib
_logger = logging.getLogger(__name__)
class sign_sale_order(models.Model):
_inherit = 'sale.order'
signature_status = fields.Boolean('Sign')
signature_hash = fields.Char('Signature Hash')
gas_for_signature = fields.Float('Gas for signature',compute='_gas_for_signature')
gas_limit = fields.Float('Gas limit',compute='_gas_limit')
signature_timestamp = fields.Char('Signature timestamp')
result_of_check = fields.Char(default='Not checked')
@api.one
def getDocumentMD5(self):
return hashlib.md5(str(self.incoterm)).hexdigest()
@api.one
def get_ethereum_addres(self):
ethereum_address = self.env['setting.connect'].search([('platforma','=','ethereum')])
result_ethereum_dic = {}
if ethereum_address:
result_ethereum_dic.update({'ethereum_address':ethereum_address[0].ethereum_pk,
'ethereum_interface': ethereum_address[0].ethereum_address,
'address_node':ethereum_address[0].ethereum_node_address})
return result_ethereum_dic
def _gas_for_signature(self):
ethereum_setting = {}
if self.get_ethereum_addres()[0].keys() == {}:
result_of_gas_estimate = 0
else:
date_of_synchronization = dt.now()
ethereum_setting = self.get_ethereum_addres()
ethereum_setting = ethereum_setting[0]
web3 = Web3(HTTPProvider(ethereum_setting['address_node']))
abi_json = ethereum_setting['ethereum_interface']
ethereum_contract_address = ethereum_setting['ethereum_address']
contract = web3.eth.contract(abi = json.loads(abi_json), address=ethereum_contract_address)
hash_of_synchronaze = '"'+base58.b58encode(str(date_of_synchronization))+'"'
md5 = self.getDocumentMD5()
md5_for_solidity = '"'+md5[0]+'"'
print hash_of_synchronaze
try:
result_of_gas_estimate = contract.estimateGas().setDocumentHash(str(hash_of_synchronaze),md5_for_solidity)
except:
result_of_gas_estimate = 0
self.gas_for_signature = result_of_gas_estimate
return result_of_gas_estimate
def _gas_limit(self):
ethereum_setting = {}
if self.get_ethereum_addres()[0].keys() == {}:
result_of_gas_limit = 0
else:
ethereum_setting = self.get_ethereum_addres()
ethereum_setting = ethereum_setting[0]
web3 = Web3(HTTPProvider(ethereum_setting['address_node']))
abi_json = ethereum_setting['ethereum_interface']
ethereum_contract_address = ethereum_setting['ethereum_address']
contract = web3.eth.contract(abi = json.loads(abi_json), address=ethereum_contract_address)
result_of_gas_limit = contract.call().getGasLimit()
self.gas_limit = result_of_gas_limit
return result_of_gas_limit
def signature_action(self):
ethereum_setting = {}
date_of_synchronization = dt.now()
ethereum_setting = {}
ethereum_setting = self.get_ethereum_addres()
ethereum_setting = ethereum_setting[0]
web3 = Web3(HTTPProvider(ethereum_setting['address_node']))
abi_json = ethereum_setting['ethereum_interface']
ethereum_contract_address = ethereum_setting['ethereum_address']
contract = web3.eth.contract(abi = json.loads(abi_json), address=ethereum_contract_address)
hash_of_synchronaze = '"'+base58.b58encode(str(date_of_synchronization))+'"'
print hash_of_synchronaze
md5 = self.getDocumentMD5()
md5_for_solidity = '"'+md5[0]+'"'
TransactionHashEthereum = contract.transact().setDocumentHash(str(hash_of_synchronaze),str(md5_for_solidity))
self.signature_timestamp = str(date_of_synchronization)
self.signature_hash = TransactionHashEthereum
self.signature_status = True
self.env['journal.signature'].create({'name':self.name,
'checksum':md5[0],
'hash_of_signature':TransactionHashEthereum,
'timestamp_of_document':self.signature_timestamp,
'date_of_signature':date_of_synchronization})
root = etree.Element("data")
sale_order_name = etree.SubElement(root,'name')
sale_order_name.text=self.name
sale_order_hash = etree.SubElement(root,'transaction_hash')
sale_order_hash.text=TransactionHashEthereum
sale_order_md5 = etree.SubElement(root,'md5')
sale_order_md5.text=md5[0]
xml_result = etree.tostring(root, pretty_print=False)
#xml_result = xml_result.replace('"','\\"')
#-------------------------------------------- write xml to temp file
file_to_save_with_path = '/tmp/'+self.name+str(date_of_synchronization)
temp_file = open(file_to_save_with_path,'w')
temp_file.write(xml_result)
temp_file.close()
string = '/usr/bin/putbigchaindb.py --xml="'+file_to_save_with_path+'"'
os.system(string)
def check_signature_action(self):
date_of_synchronization = dt.now()
ethereum_setting = self.get_ethereum_addres()
ethereum_setting = ethereum_setting[0]
web3 = Web3(HTTPProvider(ethereum_setting['address_node']))
abi_json = ethereum_setting['ethereum_interface']
ethereum_contract_address = ethereum_setting['ethereum_address']
contract = web3.eth.contract(abi = json.loads(abi_json), address=ethereum_contract_address)
get_transact = web3.eth.getTransaction(self.signature_hash)
timestamp = str(contract.call(get_transact).getDocumentHash().replace('"',''))
md5 = self.getDocumentMD5()
md5_from_contract = contract.call(get_transact).getDocumentMD5()
if str(md5_from_contract).replace('"', '') == md5[0]:
self.result_of_check = 'OK'
else:
self.result_of_check = 'Error Checksum'
class JournalOfSignature(models.Model):
_name = 'journal.signature'
name = fields.Char('Document Number')
hash_of_signature = fields.Char('Hash of signature')
checksum = fields.Char('Check sum of Document')
timestamp_of_document = fields.Char('Timestamp')
date_of_signature = fields.Date('Date of signature')
| stanta/darfchain | darfchain/models/sale_order.py | Python | gpl-3.0 | 7,313 |
import flask
from flask_mongoengine import MongoEngine
from tests import FlaskMongoEngineTestCase
class DummyEncoder(flask.json.JSONEncoder):
'''
An example encoder which a user may create and override
the apps json_encoder with.
This class is a NO-OP, but used to test proper inheritance.
'''
class JSONAppTestCase(FlaskMongoEngineTestCase):
def dictContains(self, superset, subset):
for k, v in subset.items():
if not superset[k] == v:
return False
return True
def assertDictContains(self, superset, subset):
return self.assertTrue(self.dictContains(superset, subset))
def setUp(self):
super(JSONAppTestCase, self).setUp()
self.app.config['MONGODB_DB'] = 'testing'
self.app.config['TESTING'] = True
self.app.json_encoder = DummyEncoder
db = MongoEngine()
db.init_app(self.app)
self.db = db
def test_inheritance(self):
self.assertTrue(issubclass(self.app.json_encoder, DummyEncoder))
json_encoder_name = self.app.json_encoder.__name__
# Since the class is dynamically derrived, must compare class names
# rather than class objects.
self.assertEqual(json_encoder_name, 'MongoEngineJSONEncoder')
| losintikfos/flask-mongoengine | tests/test_json.py | Python | bsd-3-clause | 1,287 |
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/arcstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *arc_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("arc-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| ArcticCore/arcticcoin | share/qt/extract_strings_qt.py | Python | mit | 1,848 |
###
# Copyright 2008-2018 Diamond Light Source Ltd.
# This file is part of Diffcalc.
#
# Diffcalc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diffcalc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diffcalc. If not, see <http://www.gnu.org/licenses/>.
###
import platform
from math import pi, sqrt, acos, cos
try:
from numpy import matrix
except ImportError:
from numjy import matrix
DEBUG = False
try:
from gda.device.scannable.scannablegroup import \
ScannableMotionBase
except ImportError:
from diffcalc.gdasupport.minigda.scannable import \
ScannableMotionBase
from diffcalc.util import DiffcalcException, bound, SMALL, dot3
class _DynamicDocstringMetaclass(type):
def _get_doc(self):
return Qtrans.dynamic_docstring
__doc__ = property(_get_doc) # @ReservedAssignment
class Qtrans(ScannableMotionBase):
if platform.system() != 'Java':
__metaclass__ = _DynamicDocstringMetaclass # TODO: Removed to fix Jython
dynamic_docstring = 'qtrans scannable'
def _get_doc(self):
return Qtrans.dynamic_docstring
__doc__ = property(_get_doc) # @ReservedAssignment
def __init__(self, name, diffractometerObject, diffcalcObject):
self.diffhw = diffractometerObject
self._diffcalc = diffcalcObject
self.setName(name)
self.setInputNames([name])
self.setOutputFormat(['%7.5f'])
self.dynamic_class_doc = 'qtrans scannable'
def asynchronousMoveTo(self, newpos):
pos = self.diffhw.getPosition() # a tuple
(hkl_pos , _) = self._diffcalc.angles_to_hkl(pos)
nref_hkl = [i[0] for i in self._diffcalc._ub.ubcalc.n_hkl.tolist()]
pol, az_nref, sc = self._diffcalc._ub.ubcalc.calc_offset_for_hkl(hkl_pos, nref_hkl)
if pol < SMALL:
az_nref = 0
sc_nref_hkl = [sc * v for v in nref_hkl]
_ubm = self._diffcalc._ub.ubcalc._get_UB()
qvec = _ubm * matrix(hkl_pos).T
qvec_rlu = sqrt(dot3(qvec, qvec)) * self._diffcalc._ub.ubcalc.get_hkl_plane_distance(nref_hkl) / (2.*pi)
try:
newpol = acos(bound(newpos / qvec_rlu))
except AssertionError:
raise DiffcalcException("Scattering vector projection value of %.5f r.l.u. unreachable." % newpos)
try:
hkl_offset = self._diffcalc._ub.ubcalc.calc_hkl_offset(*sc_nref_hkl, pol=newpol, az=az_nref)
(pos, _) = self._diffcalc.hkl_to_angles(*hkl_offset)
except DiffcalcException, e:
if DEBUG:
raise
else:
raise DiffcalcException(e.message)
self.diffhw.asynchronousMoveTo(pos)
def getPosition(self):
pos = self.diffhw.getPosition() # a tuple
(hkl_pos , _) = self._diffcalc.angles_to_hkl(pos)
nref_hkl = [i[0] for i in self._diffcalc._ub.ubcalc.n_hkl.tolist()]
pol = self._diffcalc._ub.ubcalc.calc_offset_for_hkl(hkl_pos, nref_hkl)[0]
_ubm = self._diffcalc._ub.ubcalc._get_UB()
qvec = _ubm * matrix(hkl_pos).T
sc = sqrt(dot3(qvec, qvec)) * self._diffcalc._ub.ubcalc.get_hkl_plane_distance(nref_hkl) / (2.*pi)
res = sc * cos(pol)
return res
def isBusy(self):
return self.diffhw.isBusy()
def waitWhileBusy(self):
return self.diffhw.waitWhileBusy()
def simulateMoveTo(self, newpos):
pos = self.diffhw.getPosition() # a tuple
(hkl_pos , _) = self._diffcalc.angles_to_hkl(pos)
nref_hkl = [i[0] for i in self._diffcalc._ub.ubcalc.n_hkl.tolist()]
pol, az_nref, sc = self._diffcalc._ub.ubcalc.calc_offset_for_hkl(hkl_pos, nref_hkl)
if pol < SMALL:
az_nref = 0
sc_nref_hkl = [sc * v for v in nref_hkl]
_ubm = self._diffcalc._ub.ubcalc._get_UB()
qvec = _ubm * matrix(hkl_pos).T
qvec_rlu = sqrt(dot3(qvec, qvec)) * self._diffcalc._ub.ubcalc.get_hkl_plane_distance(nref_hkl) / (2.*pi)
try:
newpol = acos(bound(newpos / qvec_rlu))
except AssertionError:
raise DiffcalcException("Scattering vector projection value of %.5f r.l.u. unreachable." % newpos)
try:
hkl_offset = self._diffcalc._ub.ubcalc.calc_hkl_offset(*sc_nref_hkl, pol=newpol, az=az_nref)
(pos, params) = self._diffcalc.hkl_to_angles(*hkl_offset)
except DiffcalcException, e:
if DEBUG:
raise
else:
raise DiffcalcException(e.message)
width = max(len(k) for k in (params.keys() + list(self.diffhw.getInputNames())))
fmt = ' %' + str(width) + 's : % 9.4f'
lines = ['simulated hkl: %9.4f %.4f %.4f' % (hkl_offset[0],hkl_offset[1],hkl_offset[2]),
self.diffhw.getName() + ' would move to:']
for idx, name in enumerate(self.diffhw.getInputNames()):
lines.append(fmt % (name, pos[idx]))
lines[-1] = lines[-1] + '\n'
for k in sorted(params):
lines.append(fmt % (k, params[k]))
return '\n'.join(lines)
| DiamondLightSource/diffcalc | diffcalc/gdasupport/scannable/qtrans.py | Python | gpl-3.0 | 5,532 |
##
#
# Copyright 2013 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
import sys
sys.path.append('../utils')
import time
import eureka
import jobs
import os
# the S3 prefix where the tests are located
GENIE_TEST_PREFIX = os.getenv("GENIE_TEST_PREFIX")
# get the serviceUrl from the eureka client
serviceUrl = eureka.EurekaClient().getServiceBaseUrl() + '/genie/v0/jobs'
def testJsonSubmitjob():
print "Running testJsonSubmitjob "
payload = '''
{
"jobInfo":
{
"jobName": "HIVE-VERSION-TEST",
"description": "This is a test",
"userName" : "genietest",
"groupName" : "hadoop",
"userAgent" : "laptop",
"jobType": "hive",
"configuration": "prod",
"schedule": "adHoc",
"hiveVersion": "0.8.1.7",
"cmdArgs": "-f hive.q",
"disableLogArchival": "true",
"fileDependencies":"''' + GENIE_TEST_PREFIX + '''/hive.q"
}
}
'''
print payload
print "\n"
return jobs.submitJob(serviceUrl, payload)
# driver method for all tests
if __name__ == "__main__":
print "Running unit tests:\n"
jobID = testJsonSubmitjob()
print "\n"
while True:
print jobs.getJobInfo(serviceUrl, jobID)
print "\n"
status = jobs.getJobStatus(serviceUrl, jobID)
print status
print "\n"
if (status != 'RUNNING') and (status != 'INIT'):
print "Final status: ", status
print "Job has terminated - exiting"
break
time.sleep(5)
| korrelate/genie | genie-web/src/test/python/jobs/hiveVersionOverrideTest.py | Python | apache-2.0 | 2,176 |
#!/usr/bin/env python
import rospy
from cv_bridge import CvBridge
import cv, cv2
import numpy
from sensor_msgs.msg import Image
import sys
bridge = CvBridge()
pub = rospy.Publisher("/image_out", Image)
shift_by = 128
def image_callback(image):
""" Shifts bit(s) from image and displays the result. """
image_cv = bridge.imgmsg_to_cv(image)
image_cv2 = numpy.asarray(image_cv)
image_cv2 = cv2.blur(image_cv2, (5, 5))
image_cv2 = image_cv2 // shift_by * shift_by
image.data = bridge.cv_to_imgmsg(cv.fromarray(image_cv2),
encoding=image.encoding).data
pub.publish(image)
if __name__ == "__main__":
rospy.init_node("bit_shifter")
shift_by = int(sys.argv[1])
rospy.loginfo('Shifting by a factor of {0}'.format(shift_by))
rospy.Subscriber("/camera/rgb/image_color", Image, image_callback)
rospy.spin()
| OSUrobotics/privacy-interfaces | filtering/text_filters/scripts/bit_shifter.py | Python | mit | 888 |
#!/usr/bin/python
# coding: utf-8
import commands
import re
import datetime
import os.path
now = datetime.datetime.today()
timestamp = now.strftime("%Y-%m-%d %H:%M:%S")
# recording state
recording_size = os.path.getsize("../chinachu/data/recording.json")
recording_state = "standby"
if 2 < recording_size:
# when not recording, this file is "[]"
recording_state = "recording"
# HDD state
hdparm = commands.getoutput("sudo hdparm -C /dev/disk/by-uuid/e297668b-f29c-494a-8836-0d40aedd5c37")
hdd_state = re.search(r"drive state is\:[ ]+(.+)", hdparm).group(1).strip()
# CPU temperature
sensors = commands.getoutput("sudo sensors")
cpu_temp = re.search(r"CPUTIN\:[ ]+\+([0-9.]+)", sensors).group(1).strip()
# Drive temperature
# hddtemp = commands.getoutput("sudo hddtemp /dev/sdb")
# hdd_temp_groups = re.search(r"\: ([0-9.]+)", hddtemp)
# if hdd_temp_groups:
# hdd_temp = hdd_temp_groups.group(1).strip()
# else:
# hdd_temp = ""
# USBRH
usb_temp = ""
usb_humi = ""
usbrh = commands.getoutput("sudo usbrh")
match = re.search(r"([0-9\-.]+) ([0-9\-.]+)", usbrh)
if match:
usb_temp = match.group(1).strip()
usb_humi = match.group(2).strip()
print timestamp + "," + recording_state + "," + hdd_state + "," + cpu_temp + "," + usb_temp + "," + usb_humi
| remonbonbon/rec-stats | cron.py | Python | mit | 1,259 |
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2008 Lukáš Lalinský
# Copyright (C) 2014 Sophist-UK
# Copyright (C) 2014, 2018, 2020-2021 Laurent Monin
# Copyright (C) 2016-2018 Sambhav Kothari
# Copyright (C) 2018 Vishal Choudhary
# Copyright (C) 2019-2021 Philipp Wolfer
# Copyright (C) 2021 Bob Swift
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import uuid
from PyQt5 import (
QtCore,
QtGui,
QtWidgets,
)
from picard import log
from picard.config import (
Option,
get_config,
)
from picard.const import DOCS_BASE_URL
from picard.const.sys import (
IS_MACOS,
IS_WIN,
)
from picard.util import (
restore_method,
webbrowser2,
)
if IS_MACOS:
FONT_FAMILY_MONOSPACE = 'Menlo'
elif IS_WIN:
FONT_FAMILY_MONOSPACE = 'Consolas'
else:
FONT_FAMILY_MONOSPACE = 'Monospace'
class PreserveGeometry:
defaultsize = None
def __init__(self):
Option.add_if_missing("persist", self.opt_name(), QtCore.QByteArray())
Option.add_if_missing("persist", self.splitters_name(), {})
if getattr(self, 'finished', None):
self.finished.connect(self.save_geometry)
def opt_name(self):
return 'geometry_' + self.__class__.__name__
def splitters_name(self):
return 'splitters_' + self.__class__.__name__
def _get_lineage(self, widget):
"""Try to develop a unique lineage / ancestry to identify the specified widget.
Args:
widget (QtWidget): Widget to process.
Returns:
generator: full ancestry for the specified widget.
"""
parent = widget.parent()
if parent:
yield from self._get_lineage(parent)
yield widget.objectName() if widget.objectName() else widget.__class__.__name__
def _get_name(self, widget):
"""Return the name of the widget.
Args:
widget (QtWidget): Widget to process.
Returns:
str: The name of the widget or the lineage if there is no name assigned.
"""
name = widget.objectName()
if not name:
name = '.'.join(self._get_lineage(widget))
log.debug("Splitter does not have objectName(): %s" % name)
return name
@property
def _get_splitters(self):
try:
return {
self._get_name(splitter): splitter
for splitter in self.findChildren(QtWidgets.QSplitter)
}
except AttributeError:
return {}
@restore_method
def restore_geometry(self):
config = get_config()
geometry = config.persist[self.opt_name()]
if not geometry.isNull():
self.restoreGeometry(geometry)
elif self.defaultsize:
self.resize(self.defaultsize)
splitters = config.persist[self.splitters_name()]
seen = set()
for name, splitter in self._get_splitters.items():
if name in splitters:
splitter.restoreState(splitters[name])
seen.add(name)
# remove unused saved states that don't match any existing splitter names
for name in set(splitters) - seen:
del config.persist[self.splitters_name()][name]
def save_geometry(self):
config = get_config()
config.persist[self.opt_name()] = self.saveGeometry()
config.persist[self.splitters_name()] = {
name: bytearray(splitter.saveState())
for name, splitter in self._get_splitters.items()
}
class SingletonDialog:
_instance = None
@classmethod
def get_instance(cls, *args, **kwargs):
if not cls._instance:
cls._instance = cls(*args, **kwargs)
cls._instance.finished.connect(cls._on_dialog_finished)
return cls._instance
@classmethod
def show_instance(cls, *args, **kwargs):
instance = cls.get_instance(*args, **kwargs)
# Get the current parent
if hasattr(instance, 'parent'):
if callable(instance.parent):
parent = instance.parent()
else:
parent = instance.parent
else:
parent = None
# Update parent if changed
if 'parent' in kwargs and parent != kwargs['parent']:
instance.setParent(kwargs['parent'])
instance.show()
instance.raise_()
instance.activateWindow()
return instance
@classmethod
def _on_dialog_finished(cls):
cls._instance = None
class PicardDialog(QtWidgets.QDialog, PreserveGeometry):
help_url = None
flags = QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint | QtCore.Qt.WindowCloseButtonHint
ready_for_display = QtCore.pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent, self.flags)
self.__shown = False
self.ready_for_display.connect(self.restore_geometry)
def keyPressEvent(self, event):
if event.matches(QtGui.QKeySequence.Close):
self.close()
elif event.matches(QtGui.QKeySequence.HelpContents) and self.help_url:
self.show_help()
else:
super().keyPressEvent(event)
def showEvent(self, event):
if not self.__shown:
self.ready_for_display.emit()
self.__shown = True
return super().showEvent(event)
def show_help(self):
if self.help_url:
url = self.help_url
if url.startswith('/'):
url = DOCS_BASE_URL + url
webbrowser2.open(url)
# With py3, QObjects are no longer hashable unless they have
# an explicit __hash__ implemented.
# See: http://python.6.x6.nabble.com/QTreeWidgetItem-is-not-hashable-in-Py3-td5212216.html
class HashableTreeWidgetItem(QtWidgets.QTreeWidgetItem):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.id = uuid.uuid4()
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(str(self.id))
class HashableListWidgetItem(QtWidgets.QListWidgetItem):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.id = uuid.uuid4()
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(str(self.id))
| musicbrainz/picard | picard/ui/__init__.py | Python | gpl-2.0 | 7,063 |
import engine
import re
from sqlalchemy.sql import select
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import MetaData, Table
from sqlalchemy import Column, Integer, String, Text
Base = declarative_base()
class Message(Base):
__tablename__ = 'messages'
DASHES = '-'
id = Column(Integer, primary_key=True)
recipient = Column(String(255))
sender = Column(String(255))
who_from = Column(String(255))
subject = Column(String(255))
body_plain = Column(Text)
stripped_text = Column(Text)
timestamp = Column(Integer)
signature = Column(String(255))
message_headers = Column(Text)
def __init__(self, attributes):
underscorize = re.compile(self.DASHES, re.MULTILINE)
for key in attributes.keys():
setattr(self, underscorize.sub('_', key), attributes[key])
def __repr__(self):
return "<Message('%s','%s', '%s')>" % (self.id, self.who_from, self.timestamp)
def as_json(self):
return {
"id": self.id,
"recipient": self.recipient,
"sender": self.sender,
'who_from': self.who_from,
'subject': self.subject,
'body_plain': self.body_plain,
'stripped_text': self.stripped_text,
'timestamp': self.timestamp,
'signature': self.signature,
'message_headers': self.message_headers
}
def latest():
conn = engine.build_engine().connect()
results = conn.execute(select([" * FROM messages"])).fetchall()
return [build_message(r) for r in results]
def build_message(result):
return Message({
"id": int(result[0]),
"recipient": result[1],
"sender": result[2],
'who_from': result[3],
'subject': result[4],
'body_plain': result[5],
'stripped_text': result[6],
'timestamp': result[7],
'signature': result[8],
'message_headers': result[9]
})
if __name__ == "__main__":
e = engine.build_engine()
metadata = MetaData(bind=e)
messages_table = Table('messages', metadata,
Column('id', Integer, primary_key=True),
Column('recipient', String(255)),
Column('sender', String(255)),
Column('who_from', String(255)),
Column('subject', String(255)),
Column('body_plain', Text),
Column('stripped_text', Text),
Column('timestamp', Integer),
Column('signature', String(255)),
Column('message_headers', Text),
)
metadata.create_all()
| parkr/steve | message.py | Python | mit | 2,527 |
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../..'))
from cocos.layer import *
from cocos.text import *
from cocos.actions import *
import pyglet
from pyglet.gl import *
from status import status
class BackgroundLayer( Layer ):
def __init__(self):
super( BackgroundLayer, self ).__init__()
self.img = pyglet.resource.image('background.png')
def draw( self ):
glPushMatrix()
self.transform()
self.img.blit(0,0)
glPopMatrix()
class ScoreLayer( Layer ):
def __init__(self):
w,h = director.get_window_size()
super( ScoreLayer, self).__init__()
# transparent layer
self.add( ColorLayer(32,32,32,32, width=w, height=48),z=-1 )
self.position = (0,h-48)
self.score= Label('Score:', font_size=36,
font_name='Edit Undo Line BRK',
color=(255,255,255,255),
anchor_x='left',
anchor_y='bottom')
self.score.position=(0,0)
self.add( self.score)
self.lines= Label('Lines:', font_size=36,
font_name='Edit Undo Line BRK',
color=(255,255,255,255),
anchor_x='left',
anchor_y='bottom')
self.lines.position=(235,0)
self.add( self.lines)
self.lvl= Label('Lvl:', font_size=36,
font_name='Edit Undo Line BRK',
color=(255,255,255,255),
anchor_x='left',
anchor_y='bottom')
self.lvl.position=(450,0)
self.add( self.lvl)
def draw(self):
super( ScoreLayer, self).draw()
self.score.element.text = 'Score:%d' % status.score
self.lines.element.text = 'Lines:%d' % max(0, (status.level.lines - status.lines))
lvl = status.level_idx or 0
self.lvl.element.text = 'Lvl:%d' % lvl
if status.next_piece:
status.next_piece.draw()
class MessageLayer( Layer ):
def show_message( self, msg, callback=None ):
w,h = director.get_window_size()
self.msg = Label( msg,
font_size=52,
font_name='Edit Undo Line BRK',
anchor_y='center',
anchor_x='center' )
self.msg.position=(w//2.0, h)
self.add( self.msg )
actions = Accelerate(MoveBy( (0,-h/2.0), duration=0.5)) + \
Delay(1) + \
Accelerate(MoveBy( (0,-h/2.0), duration=0.5)) + \
Hide()
if callback:
actions += CallFunc( callback )
self.msg.do( actions )
class HUD( Layer ):
def __init__( self ):
super( HUD, self).__init__()
self.add( ScoreLayer() )
self.add( MessageLayer(), name='msg' )
def show_message( self, msg, callback = None ):
self.get('msg').show_message( msg, callback )
| shujunqiao/cocos2d-python | samples/tetrico/HUD.py | Python | bsd-3-clause | 3,045 |
import psycopg2
from flask import Blueprint, current_app, render_template, session, request, redirect, url_for
groups_app = Blueprint('groups_app', __name__)
@groups_app.route('/create_group')
def create_group():
if not session.get('user_id'):
return redirect(url_for('home_page'))
with psycopg2.connect(current_app.config['dsn']) as conn:
crs = conn.cursor()
crs.execute("select * from users where ID in (select followed_id from user_follow where follower_id = %s)", (session['user_id'],))
conn.commit()
data = crs.fetchall()
return render_template('listfollowed.html', data=data)
@groups_app.route('/addtogroup', methods = ['POST'])
def addtogroup():
name = request.form['name']
desc = request.form['desc']
members = request.form.getlist('members')
with psycopg2.connect(current_app.config['dsn']) as conn:
crs = conn.cursor()
crs.execute("insert into user_groups (group_name, gp_path, group_exp) values (%s, %s, %s) returning group_id", (name, "/", desc))
conn.commit()
data = crs.fetchone()
id = data[0]
for m in members:
crs.execute("insert into group_members(group_id, user_id, time, member_status, role) values (%s, %s, now(), 'active', 'admin')", (id, m))
conn.commit()
return redirect(url_for('groups_app.show_group', group_id = id))
@groups_app.route('/show_group/<group_id>')
def show_group(group_id):
with psycopg2.connect(current_app.config['dsn']) as conn:
crs = conn.cursor()
crs.execute("select u.username, u.id from group_members as g inner join users as u on u.id = g.user_id where group_id = %s", (group_id, ))
memberdata = crs.fetchall()
crs.execute("select group_name, gp_path, group_exp from user_groups where group_id = %s", (group_id,))
data = crs.fetchone()
conn.commit()
return render_template('groupinfo.html', data=data, memberdata=memberdata) # bu rotaya gelen kişiler şablonun içinin veri tabanından alınmış verilerle doldurulmuş halini görsünler
@groups_app.route('/allgroups')
def allgroups():
with psycopg2.connect(current_app.config['dsn']) as conn:
crs = conn.cursor()
crs.execute("select group_name, group_exp, group_id from user_groups")
data = crs.fetchall()
crs.execute("select u.username, u.id from group_members as g inner join users as u on u.id = g.user_id")
memberdata = crs.fetchall()
return render_template('allgroups.html',data=data,memberdata=memberdata)
@groups_app.route('/delete_member/<id>')
def delete_member(id):
with psycopg2.connect(current_app.config['dsn']) as conn:
crs = conn.cursor()
crs.execute("delete from group_members where user_id = %s", id)
conn.commit()
return render_template('message.html', message="Successfully removed.")
@groups_app.route('/delete_group/<id>')
def delete_group(id):
with psycopg2.connect(current_app.config['dsn']) as conn:
crs = conn.cursor()
crs.execute("delete from user_groups where group_id = %s", (id, ))
conn.commit()
return redirect(url_for('groups_app.allgroups'))
@groups_app.route('/updateform')
def updateform():
return render_template('update_group.html')
@groups_app.route('/update_group',methods=["POST"])
def update_group():
old_name = request.form['oldname']
new_name = request.form['name']
desc = request.form['desc']
with psycopg2.connect(current_app.config['dsn']) as conn:
crs = conn.cursor()
crs.execute("update user_groups set group_name=%s, group_exp=%s where group_name = %s", (new_name, desc, old_name, ))
return redirect(url_for('groups_app.allgroups')) | itucsdb1621/itucsdb1621 | groups.py | Python | gpl-3.0 | 3,749 |
# -*- coding: utf-8 -*-
from unittest import TestSuite, TestLoader
from flask import url_for
from flask.ext.security import url_for_security
from lxml.html import fromstring
from spkrepo.ext import db
from spkrepo.tests.common import BaseTestCase, BuildFactory, UserFactory
class IndexTestCase(BaseTestCase):
def test_get_anonymous(self):
response = self.client.get(url_for('frontend.index'))
self.assert200(response)
self.assertIn('Login', response.data.decode(response.charset))
self.assertIn('Register', response.data.decode(response.charset))
def test_get_logged_user(self):
with self.logged_user():
response = self.client.get(url_for('frontend.index'))
self.assert200(response)
self.assertIn('Logout', response.data.decode(response.charset))
self.assertIn('Profile', response.data.decode(response.charset))
class PackagesTestCase(BaseTestCase):
def test_get_active_stable(self):
build = BuildFactory(version__report_url=None, active=True)
db.session.commit()
response = self.client.get(url_for('frontend.packages'))
self.assert200(response)
self.assertIn(build.version.displaynames['enu'].displayname, response.data.decode(response.charset))
self.assertNotIn('beta', response.data.decode(response.charset))
def test_get_active_not_stable(self):
build = BuildFactory(active=True)
db.session.commit()
response = self.client.get(url_for('frontend.packages'))
self.assert200(response)
self.assertIn(build.version.displaynames['enu'].displayname, response.data.decode(response.charset))
self.assertIn('beta', response.data.decode(response.charset))
def test_get_not_active_not_stable(self):
build = BuildFactory(active=False)
db.session.commit()
response = self.client.get(url_for('frontend.packages'))
self.assert200(response)
self.assertNotIn(build.version.displaynames['enu'].displayname, response.data.decode(response.charset))
self.assertNotIn('beta', response.data.decode(response.charset))
def test_get_not_active_stable(self):
build = BuildFactory(active=False)
db.session.commit()
response = self.client.get(url_for('frontend.packages'))
self.assert200(response)
self.assertNotIn(build.version.displaynames['enu'].displayname, response.data.decode(response.charset))
self.assertNotIn('beta', response.data.decode(response.charset))
class PackageTestCase(BaseTestCase):
def test_get(self):
build = BuildFactory(version__package__author=UserFactory(), version__report_url=None, active=True)
db.session.commit()
response = self.client.get(url_for('frontend.package', name=build.version.package.name))
self.assert200(response)
for a in build.architectures:
self.assertIn(a.code, response.data.decode(response.charset))
self.assertIn(build.version.package.author.username, response.data.decode(response.charset))
self.assertIn(build.version.displaynames['enu'].displayname, response.data.decode(response.charset))
self.assertIn(build.version.descriptions['enu'].description, response.data.decode(response.charset))
def test_get_no_package(self):
response = self.client.get(url_for('frontend.package', name='no-package'))
self.assert404(response)
class ProfileTestCase(BaseTestCase):
def test_get_anonymous(self):
self.assert302(self.client.get(url_for('frontend.profile')))
def test_get_user(self):
with self.logged_user():
response = self.client.get(url_for('frontend.profile'))
self.assert200(response)
self.assertNotIn('API key', response.data.decode(response.charset))
def test_get_developer(self):
with self.logged_user('developer'):
response = self.client.get(url_for('frontend.profile'))
self.assert200(response)
self.assertIn('API key', response.data.decode(response.charset))
def test_get_no_api_key_by_default(self):
with self.logged_user('developer', api_key=None):
response = self.client.get(url_for('frontend.profile'))
html = fromstring(response.data.decode(response.charset))
self.assertTrue(html.forms[0].fields['api_key'] == '')
def test_post_generate_api_key_developer(self):
with self.logged_user('developer', api_key=None):
response = self.client.post(url_for('frontend.profile'), data=dict(),
follow_redirects=True)
self.assert200(response)
html = fromstring(response.data.decode(response.charset))
self.assertTrue(html.forms[0].fields['api_key'] != '')
def test_post_generate_api_key_not_developer(self):
with self.logged_user(api_key=None):
response = self.client.post(url_for('frontend.profile'), data=dict())
self.assert200(response)
class RegisterTestCase(BaseTestCase):
def test_unique_user_username(self):
data = dict(username='test', email='[email protected]', password='password', password_confirm='password')
self.client.post(url_for_security('register'), data=data)
response = self.client.post(url_for_security('register'), data=data)
self.assertIn('Username already taken', response.data.decode(response.charset))
def suite():
suite = TestSuite()
suite.addTest(TestLoader().loadTestsFromTestCase(IndexTestCase))
suite.addTest(TestLoader().loadTestsFromTestCase(PackagesTestCase))
suite.addTest(TestLoader().loadTestsFromTestCase(PackageTestCase))
suite.addTest(TestLoader().loadTestsFromTestCase(ProfileTestCase))
suite.addTest(TestLoader().loadTestsFromTestCase(RegisterTestCase))
return suite
| Dr-Bean/spkrepo | spkrepo/tests/test_frontend.py | Python | mit | 5,912 |
#!/usr/bin/env python
from subprocess import check_output
from netInfo import Information_Gathering
info = Information_Gathering()
iface = info.IFACE()
def getIP(iface=iface):
ifconfig = check_output(["ifconfig", "%s"%iface])
ip, mac = ifconfig.split()[6], ifconfig.split()[4]
return ip.split(":")[1], mac
| securecurebt5/ARPySnitch | mylocalip.py | Python | gpl-3.0 | 315 |
import wget
import os
import tensorflow as tf
# The URLs where the MNIST data can be downloaded.
_DATA_URL = 'http://yann.lecun.com/exdb/mnist/'
_TRAIN_DATA_FILENAME = 'train-images-idx3-ubyte.gz'
_TRAIN_LABELS_FILENAME = 'train-labels-idx1-ubyte.gz'
_TEST_DATA_FILENAME = 't10k-images-idx3-ubyte.gz'
_TEST_LABELS_FILENAME = 't10k-labels-idx1-ubyte.gz'
dataset_dir = '~/datasets/mnist'
#save_dir = os.getcwd()
#os.chdir(dataset_dir)
def _download_dataset_mnist(dataset_dir):
"""Downloads MNIST locally.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
dataset_dir = os.path.expandvars(dataset_dir)
dataset_dir = os.path.expanduser(dataset_dir)
if not os.path.exists(dataset_dir):
os.mkdir(dataset_dir)
for filename in [_TRAIN_DATA_FILENAME,
_TRAIN_LABELS_FILENAME,
_TEST_DATA_FILENAME,
_TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
if not os.path.exists(filepath):
print('Downloading file %s...' % filename)
wget.download(_DATA_URL + filename, out=dataset_dir)
print()
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
else:
print('%s file is already downloaded' %url)
def main():
_download_dataset_mnist(dataset_dir)
#restore the dir
#os.chdir(save_dir)
if __name__ == '__main__':
main() | ybao2016/tf-slim-model | download_data_wget.py | Python | apache-2.0 | 1,531 |
################################################################################
# $Id: output.py 2552 2007-02-08 21:40:46Z b4rt $
# $Date: 2007-02-08 15:40:46 -0600 (Thu, 08 Feb 2007) $
# $Revision: 2552 $
################################################################################
# #
# LICENSE #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License (GPL) #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# To read the license please visit http://www.gnu.org/copyleft/gpl.html #
# #
# #
################################################################################
# standard-imports
import sys
import time
################################################################################
""" ------------------------------------------------------------------------ """
""" getPrefix """
""" ------------------------------------------------------------------------ """
def getPrefix():
return time.strftime('[%Y/%m/%d - %H:%M:%S]') + " "
""" ------------------------------------------------------------------------ """
""" getOutput """
""" ------------------------------------------------------------------------ """
def getOutput(message):
return getPrefix() + message + "\n"
""" ------------------------------------------------------------------------ """
""" printMessage """
""" ------------------------------------------------------------------------ """
def printMessage(message):
sys.stdout.write(getOutput(message))
sys.stdout.flush()
""" ------------------------------------------------------------------------ """
""" printError """
""" ------------------------------------------------------------------------ """
def printError(message):
sys.stderr.write(getOutput(message))
sys.stderr.flush()
""" ------------------------------------------------------------------------ """
""" printException """
""" ------------------------------------------------------------------------ """
def printException():
print getPrefix(), sys.exc_info()
sys.stdout.flush()
| sulaweyo/torrentflux-b4rt-php7 | html/bin/clients/fluazu/fluazu/output.py | Python | gpl-2.0 | 3,408 |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Class representing instrumentation test apk and jar."""
import os
from devil.android import apk_helper
from pylib.instrumentation import test_jar
class TestPackage(test_jar.TestJar):
def __init__(self, apk_path, jar_path, test_support_apk_path):
test_jar.TestJar.__init__(self, jar_path)
if not os.path.exists(apk_path):
raise Exception('%s not found, please build it' % apk_path)
self._apk_path = apk_path
self._apk_name = os.path.splitext(os.path.basename(apk_path))[0]
self._package_name = apk_helper.GetPackageName(self._apk_path)
self._test_support_apk_path = test_support_apk_path
def GetApkPath(self):
"""Returns the absolute path to the APK."""
return self._apk_path
def GetApkName(self):
"""Returns the name of the apk without the suffix."""
return self._apk_name
def GetPackageName(self):
"""Returns the package name of this APK."""
return self._package_name
# Override.
def Install(self, device):
device.Install(self.GetApkPath())
if (self._test_support_apk_path and
os.path.exists(self._test_support_apk_path)):
device.Install(self._test_support_apk_path)
| Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/build/android/pylib/instrumentation/test_package.py | Python | mit | 1,335 |
#!/usr/bin/python3
# requirements: btrfs on /dev/sdc1 with qgroups 0/257, 1/0 and 2/0
from storage import *
from storageitu import *
set_logger(get_logfile_logger())
environment = Environment(False)
storage = Storage(environment)
storage.probe()
staging = storage.get_staging()
print(staging)
blk_device = BlkDevice.find_by_name(staging, "/dev/sdc1")
btrfs = to_btrfs(blk_device.get_blk_filesystem())
qgroup1 = btrfs.find_btrfs_qgroup_by_id(BtrfsQgroupId(0, 257))
qgroup2 = btrfs.find_btrfs_qgroup_by_id(BtrfsQgroupId(1, 0))
qgroup3 = btrfs.find_btrfs_qgroup_by_id(BtrfsQgroupId(2, 0))
if not qgroup2.is_assigned(qgroup1):
qgroup2.assign(qgroup1)
else:
qgroup2.unassign(qgroup1)
if not qgroup3.is_assigned(qgroup2):
qgroup3.assign(qgroup2)
else:
qgroup3.unassign(qgroup2)
print(staging)
commit(storage)
| aschnell/libstorage-ng | integration-tests/filesystems/btrfs/quota/assign-qgroup.py | Python | gpl-2.0 | 836 |
#!/usr/bin/env python
from __future__ import with_statement
import sys
from distutils.core import setup, Extension, Command
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError, \
DistutilsPlatformError
IS_PYPY = hasattr(sys, 'pypy_translation_info')
VERSION = '3.1.2'
DESCRIPTION = "Simple, fast, extensible JSON encoder/decoder for Python"
with open('README.rst', 'r') as f:
LONG_DESCRIPTION = f.read()
CLASSIFIERS = filter(None, map(str.strip,
"""
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: MIT License
License :: OSI Approved :: Academic Free License (AFL)
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.5
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Topic :: Software Development :: Libraries :: Python Modules
""".splitlines()))
if sys.platform == 'win32' and sys.version_info > (2, 6):
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
# It can also raise ValueError http://bugs.python.org/issue7511
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError,
IOError, ValueError)
else:
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed()
class TestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import sys, subprocess
raise SystemExit(
subprocess.call([sys.executable,
# Turn on deprecation warnings
'-Wd',
'simplejson/tests/__init__.py']))
def run_setup(with_binary):
cmdclass = dict(test=TestCommand)
if with_binary:
kw = dict(
ext_modules = [
Extension("simplejson._speedups", ["simplejson/_speedups.c"]),
],
cmdclass=dict(cmdclass, build_ext=ve_build_ext),
)
else:
kw = dict(cmdclass=cmdclass)
setup(
name="simplejson",
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
author="Bob Ippolito",
author_email="[email protected]",
url="http://github.com/simplejson/simplejson",
license="MIT License",
packages=['simplejson', 'simplejson.tests'],
platforms=['any'],
**kw)
try:
run_setup(not IS_PYPY)
except BuildFailed:
BUILD_EXT_WARNING = ("WARNING: The C extension could not be compiled, "
"speedups are not enabled.")
print('*' * 75)
print(BUILD_EXT_WARNING)
print("Failure information, if any, is above.")
print("I'm retrying the build without the C extension now.")
print('*' * 75)
run_setup(False)
print('*' * 75)
print(BUILD_EXT_WARNING)
print("Plain-Python installation succeeded.")
print('*' * 75)
| dbbhattacharya/kitsune | vendor/packages/simplejson/setup.py | Python | bsd-3-clause | 3,691 |
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from duelify_app.views import logout_page, discussions,\
discussion_add_edit, register_page, RegisterSuccess, ChooseCategoryView, CategoryCreate, CategoryUpdate, CategoryDelete,\
filter_discussions, friends_accept, topics_discuss, voteup_discussion,\
feedback, side_login, main_login, login_invited, score_reset, faq,\
punch_edit
from django.views.generic.list import ListView
from duelify_app.models import Ring, Category
from django.views.generic.base import TemplateView
from duelify_app.sitemap import Sitemap
js_info_dict = {
'packages': ('duelify_app',),
}
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
sitemaps = {
'discussions':Sitemap,
}
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'duelify.views.home', name='home'),
# url(r'^duelify/', include('duelify.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Authentication
url(r'', include('social_auth.urls')),
(r'^peyman/', include(admin.site.urls)),
(r'^side_login/$', side_login),
(r'^login/$', main_login),
(r'^logout/$', logout_page),
(r'^register/$', register_page),
(r'^register/success/$', RegisterSuccess.as_view(template_name='registration/register_success.html')),
# Misc
(r'^i18n/$', include('django.conf.urls.i18n')),
(r'^jsi18n/$', 'django.views.i18n.javascript_catalog', js_info_dict),
(r'^feedback/$', feedback),
(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),
(r'^faq/$', faq),
(r'^tinymce/', include('tinymce.urls')),
#Password reset
(r'^password_reset/$','django.contrib.auth.views.password_reset'),
(r'^password_reset_done/$','django.contrib.auth.views.password_reset_done'),
(r'^password_reset_confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$','django.contrib.auth.views.password_reset_confirm'),
(r'^password_reset_complete/$','django.contrib.auth.views.password_reset_complete'),
(r'^$', discussions),
#(r'^discussion/(?P<discussion_id>\d+)/$', discussion_display),
#(r'^discussion/delete/(?P<discussion_id>\d+)/$', discussion_delete),
#(r'^discussion/edit/(?P<discussion_id>\d+)/$', discussion_add_edit),
#(r'^discussion/add/', DiscussionAddEdit.as_view()),
(r'^topics/add/$', discussion_add_edit),
url(r'^discussion/edit/(?P<ring_id>\d+)/(?P<slug>[-\w\d]+)/$', discussion_add_edit, name='edit-ring'),
url(r'^discussion/edit/(?P<punch_id>\d+)/$', punch_edit, name='edit-punch'),
url(r'^topics/discuss/(?P<ring_id>\d+)/(?P<slug>[-\w\d]+)/$', topics_discuss, name='discuss-topic'),
(r'^vote-up/discussion/(?P<punch_id>\d+)/$', voteup_discussion),
#(r'^topics/search/', discussion_search),
# url(r'^topics/search/$', ListView.as_view(
# queryset=Ring.objects.order_by('-datetime'),
# context_object_name='ring_list',
# template_name='discussions.html'), name='topic-search'),
#url(r'^topics/search/$', ChooseCategoryView.as_view(), name='topic-search'),
url(r'^topics/search/$', filter_discussions, name='topic-search'),
url(r'^topics/filter/$', ChooseCategoryView.as_view(), name='topic-filter'),
url(r'^categories/$', ListView.as_view(model=Category, context_object_name='categories', template_name='categories.html'), name='category-list'),
# url(r'^category/add/$', CategoryCreate.as_view(), name='author-add'),
# url(r'^category/edit/(?P<pk>\d+)/$', CategoryUpdate.as_view(), name='author-edit'),
# url(r'^category/delete/(?P<pk>\d+)/$', CategoryDelete.as_view(), name='author-delete'),
#(r'^duel/invite/$', duel_invite),
(r'^duel/accept/(\w+)/$', friends_accept),
#url(r'^register-invite/$', register_invite, name='register-invite'),
url(r'^new-users-invited/$', login_invited, name='new-users-invited'),
url(r'^login-invited/$', login_invited, name='login-invited'),
url(r'^score-reset/$', score_reset, name='score-reset'),
url(r'^signup-error/$', TemplateView.as_view(template_name="error.html"), name='signup-error'),
)
urlpatterns += staticfiles_urlpatterns()
| houmie/duelify | duelify/urls.py | Python | gpl-2.0 | 4,489 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './Helpviewer/CookieJar/CookieDetailsDialog.ui'
#
# Created: Tue Nov 18 17:53:58 2014
# by: PyQt5 UI code generator 5.3.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_CookieDetailsDialog(object):
def setupUi(self, CookieDetailsDialog):
CookieDetailsDialog.setObjectName("CookieDetailsDialog")
CookieDetailsDialog.resize(400, 300)
CookieDetailsDialog.setSizeGripEnabled(True)
self.verticalLayout = QtWidgets.QVBoxLayout(CookieDetailsDialog)
self.verticalLayout.setObjectName("verticalLayout")
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.label = QtWidgets.QLabel(CookieDetailsDialog)
self.label.setObjectName("label")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label)
self.domainEdit = QtWidgets.QLineEdit(CookieDetailsDialog)
self.domainEdit.setReadOnly(True)
self.domainEdit.setObjectName("domainEdit")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.domainEdit)
self.label_2 = QtWidgets.QLabel(CookieDetailsDialog)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.nameEdit = QtWidgets.QLineEdit(CookieDetailsDialog)
self.nameEdit.setReadOnly(True)
self.nameEdit.setObjectName("nameEdit")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.nameEdit)
self.label_3 = QtWidgets.QLabel(CookieDetailsDialog)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.pathEdit = QtWidgets.QLineEdit(CookieDetailsDialog)
self.pathEdit.setReadOnly(True)
self.pathEdit.setObjectName("pathEdit")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.pathEdit)
self.label_6 = QtWidgets.QLabel(CookieDetailsDialog)
self.label_6.setObjectName("label_6")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_6)
self.secureCheckBox = QtWidgets.QCheckBox(CookieDetailsDialog)
self.secureCheckBox.setText("")
self.secureCheckBox.setCheckable(False)
self.secureCheckBox.setObjectName("secureCheckBox")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.secureCheckBox)
self.label_4 = QtWidgets.QLabel(CookieDetailsDialog)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.expirationEdit = QtWidgets.QLineEdit(CookieDetailsDialog)
self.expirationEdit.setReadOnly(True)
self.expirationEdit.setObjectName("expirationEdit")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.expirationEdit)
self.label_5 = QtWidgets.QLabel(CookieDetailsDialog)
self.label_5.setObjectName("label_5")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.valueEdit = QtWidgets.QPlainTextEdit(CookieDetailsDialog)
self.valueEdit.setReadOnly(True)
self.valueEdit.setObjectName("valueEdit")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.valueEdit)
self.verticalLayout.addLayout(self.formLayout)
self.buttonBox = QtWidgets.QDialogButtonBox(CookieDetailsDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(CookieDetailsDialog)
self.buttonBox.accepted.connect(CookieDetailsDialog.accept)
self.buttonBox.rejected.connect(CookieDetailsDialog.reject)
QtCore.QMetaObject.connectSlotsByName(CookieDetailsDialog)
CookieDetailsDialog.setTabOrder(self.domainEdit, self.nameEdit)
CookieDetailsDialog.setTabOrder(self.nameEdit, self.pathEdit)
CookieDetailsDialog.setTabOrder(self.pathEdit, self.secureCheckBox)
CookieDetailsDialog.setTabOrder(self.secureCheckBox, self.expirationEdit)
CookieDetailsDialog.setTabOrder(self.expirationEdit, self.valueEdit)
CookieDetailsDialog.setTabOrder(self.valueEdit, self.buttonBox)
def retranslateUi(self, CookieDetailsDialog):
_translate = QtCore.QCoreApplication.translate
CookieDetailsDialog.setWindowTitle(_translate("CookieDetailsDialog", "Cookie Details"))
self.label.setText(_translate("CookieDetailsDialog", "Domain:"))
self.label_2.setText(_translate("CookieDetailsDialog", "Name:"))
self.label_3.setText(_translate("CookieDetailsDialog", "Path:"))
self.label_6.setText(_translate("CookieDetailsDialog", "Secure:"))
self.label_4.setText(_translate("CookieDetailsDialog", "Expires:"))
self.label_5.setText(_translate("CookieDetailsDialog", "Contents:"))
| davy39/eric | Helpviewer/CookieJar/Ui_CookieDetailsDialog.py | Python | gpl-3.0 | 5,194 |
#!/usr/bin/env python
'''
Multithreaded video processing sample.
Usage:
video_threaded.py {<video device number>|<video file name>}
Shows how python threading capabilities can be used
to organize parallel captured frame processing pipeline
for smoother playback.
Keyboard shortcuts:
ESC - exit
space - switch between multi and single threaded processing
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2
from multiprocessing.pool import ThreadPool
from collections import deque
from common import clock, draw_str, StatValue
import video
class DummyTask:
def __init__(self, data):
self.data = data
def ready(self):
return True
def get(self):
return self.data
if __name__ == '__main__':
import sys
print(__doc__)
try:
fn = sys.argv[1]
except:
fn = 0
cap = video.create_capture(fn)
def process_frame(frame, t0):
# some intensive computation...
frame = cv2.medianBlur(frame, 19)
frame = cv2.medianBlur(frame, 19)
return frame, t0
threadn = cv2.getNumberOfCPUs()
pool = ThreadPool(processes = threadn)
pending = deque()
threaded_mode = True
latency = StatValue()
frame_interval = StatValue()
last_frame_time = clock()
while True:
while len(pending) > 0 and pending[0].ready():
res, t0 = pending.popleft().get()
latency.update(clock() - t0)
draw_str(res, (20, 20), "threaded : " + str(threaded_mode))
draw_str(res, (20, 40), "latency : %.1f ms" % (latency.value*1000))
draw_str(res, (20, 60), "frame interval : %.1f ms" % (frame_interval.value*1000))
cv2.imshow('threaded video', res)
if len(pending) < threadn:
ret, frame = cap.read()
t = clock()
frame_interval.update(t - last_frame_time)
last_frame_time = t
if threaded_mode:
task = pool.apply_async(process_frame, (frame.copy(), t))
else:
task = DummyTask(process_frame(frame, t))
pending.append(task)
ch = 0xFF & cv2.waitKey(1)
if ch == ord(' '):
threaded_mode = not threaded_mode
if ch == 27:
break
cv2.destroyAllWindows()
| DamianPilot382/Rubiks-Cube-Solver | opencv/sources/samples/python/video_threaded.py | Python | apache-2.0 | 2,365 |
from flask import g
from .opml_import import opml_to_dict
from server import app, init_db
from server.database.models import Feed
import sys
from urllib.request import urlopen, URLError
feeds_dir = app.root_path + "/test_resources/feeds/"
app.config['DATABASE_PATH'] = 'sqlite:///server/test_resources/posts.db'
if len(sys.argv) != 2:
sys.exit("Usage: {name} opml_file".format(name=sys.argv[0]))
feeds = opml_to_dict(sys.argv[1])
with app.app_context():
init_db()
session = g.db
i = 0
for feed in feeds:
try:
response = urlopen(feed["url"])
filename = feeds_dir + "feed{}.xml".format(i)
with open(filename, "wb+") as f:
f.write(response.read())
session.add(Feed(feed['title'], "file://" + filename))
i += 1
except URLError:
print("{} doesn't seem to exist, skipping".format(feed["url"]))
session.commit()
| flacerdk/smoke-signal | utils/create_local_feeds.py | Python | mit | 935 |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 15:10:34 2016
@author: mhurst
"""
#import modules
import matplotlib
#matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
#setup figure
fig = plt.figure(1,figsize=(6,6))
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
# choose colour map
ColourMap = cm.hot
# get filename
FileName = "../driver_files/scalby_8cm_testX.dat"
# open the file
f = open(FileName,'r')
# get the lines and find out how many lines
Lines = f.readlines()
NoLines = len(Lines)
#Get header info and setup X coord
Header = Lines[0].strip().split(" ")
NXNodes = float(Header[0])
PlatformWidth = float(Header[1])
MaxTime = float(Lines[1].strip().split(" ")[0])
for i in range(1,NoLines,4):
#Get data
X = np.array(Lines[i].strip().split(" ")[1:], dtype="float64")
ZPlatform = np.array(Lines[i+1].strip().split(" ")[1:], dtype="float64")
ZBeach = np.array(Lines[i+2].strip().split(" ")[1:], dtype="float64")
N = np.array(Lines[i+3].strip().split(" ")[1:], dtype="float64")
Time = float(Lines[i].strip().split(" ")[0])
#mask for NDVs
mask = ZBeach != -9999
Zbeach = ZBeach[mask]
Zplat = ZPlatform[mask]
Xplot = X[mask]
Nplot = N[mask]
Colour = Time/MaxTime
ax1.plot(Xplot,Zbeach,'--',c=ColourMap(Colour))
ax1.plot(Xplot,Zplat,'-',c=ColourMap(Colour))
ax2.plot(Xplot,Nplot,'-',c=ColourMap(Colour))
ax2.set_xlabel("Distance (m)")
ax2.set_ylabel(r"$^{10}$Be Concentration")
ax1.set_ylabel("Elevation (m)")
#limit x axis to 250m
#plt.xlim(0,250)
plt.savefig("test_output_SY_8cm_testX.png", dpi=300)
plt.show() | mdhurst1/RoBoCoP_CRN | plotting_functions/plot_RockyCoastCRN_output.py | Python | gpl-3.0 | 1,667 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'WorkFlowStep.fields_list'
db.alter_column(u'calc_workflowstep', 'fields_list', self.gf('dbarray.fields.CharArrayField')(max_length=100, null=True))
def backwards(self, orm):
# Changing field 'WorkFlowStep.fields_list'
db.alter_column(u'calc_workflowstep', 'fields_list', self.gf('dbarray.fields.CharArrayField')(max_length=10, null=True))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'calc.experimentindexpage': {
'Meta': {'object_name': 'ExperimentIndexPage', '_ormbases': [u'wagtailcore.Page']},
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'calc.experimentpage': {
'Meta': {'object_name': 'ExperimentPage', '_ormbases': [u'wagtailcore.Page']},
'body': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'calc.experimentrelatedlink': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'ExperimentRelatedLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'related_links'", 'to': u"orm['calc.ExperimentPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'calc.workflowindexpage': {
'Meta': {'object_name': 'WorkflowIndexPage', '_ormbases': [u'wagtailcore.Page']},
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'calc.workflowpage': {
'Meta': {'object_name': 'WorkflowPage', '_ormbases': [u'wagtailcore.Page']},
'example_file': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'calc.workflowstep': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'WorkFlowStep'},
'example_input': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'fields_list': ('dbarray.fields.CharArrayField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
'page': ('modelcluster.fields.ParentalKey', [], {'blank': 'True', 'related_name': "'workflow_steps'", 'null': 'True', 'to': u"orm['calc.WorkflowPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'wagtailcore.page': {
'Meta': {'object_name': 'Page'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': u"orm['contenttypes.ContentType']"}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'expire_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'go_live_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'has_unpublished_changes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'live': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_pages'", 'null': 'True', 'to': u"orm['auth.User']"}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'search_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'seo_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'show_in_menus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'wagtaildocs.document': {
'Meta': {'object_name': 'Document'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded_by_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['calc'] | thesgc/shergar | shergar/calc/migrations/0003_auto__chg_field_workflowstep_fields_list.py | Python | mit | 9,991 |
import re
import requests
from flask import Flask
from flask import request
from flask import Response
from werkzeug import LocalProxy
from ws4py.client.geventclient import WebSocketClient
app = Flask(__name__)
app.debug = True
PROXY_DOMAIN = "127.0.0.1:8888"
PROXY_FORMAT = u"http://%s/%s" % (PROXY_DOMAIN, u"%s")
PROXY_REWRITE_REGEX = re.compile(
r'((?:src|action|[^_]href|project-url|kernel-url|baseurl)'
'\s*[=:]\s*["\']?)/',
re.IGNORECASE
)
websocket = LocalProxy(lambda: request.environ.get('wsgi.websocket', None))
websockets = {}
class WebSocketProxy(WebSocketClient):
def __init__(self, to, *args, **kwargs):
self.to = to
print(("Proxy to", self.to))
super(WebSocketProxy, self).__init__(*args, **kwargs)
def opened(self):
m = self.to.receive()
print("<= %d %s" % (len(m), str(m)))
self.send(m)
def closed(self, code, reason):
print(("Closed down", code, reason))
def received_message(self, m):
print("=> %d %s" % (len(m), str(m)))
self.to.send(m)
methods = ["GET", "POST", "PUT", "DELETE", "HEAD", "OPTIONS", "PATCH",
"CONNECT"]
@app.route('/proxy/', defaults={'url': ''}, methods=methods)
@app.route('/proxy/<path:url>', methods=methods)
def proxy(url):
with app.test_request_context():
if websocket:
while True:
data = websocket.receive()
websocket_url = 'ws://{}/{}'.format(PROXY_DOMAIN, url)
if websocket_url not in websockets:
client = WebSocketClient(websocket_url,
protocols=['http-only', 'chat'])
websockets[websocket_url] = client
else:
client = websockets[websocket_url]
client.connect()
if data:
client.send(data)
client_data = client.receive()
if client_data:
websocket.send(client_data)
return Response()
if request.method == "GET":
url_ending = "%s?%s" % (url, request.query_string)
url = PROXY_FORMAT % url_ending
resp = requests.get(url)
elif request.method == "POST":
if url == 'kernels':
url_ending = "%s?%s" % (url, request.query_string)
url = PROXY_FORMAT % url_ending
else:
url = PROXY_FORMAT % url
resp = requests.post(url, request.data)
else:
url = PROXY_FORMAT % url
resp = requests.request(url, request.method, request.data)
content = resp.content
if content:
content = PROXY_REWRITE_REGEX.sub(r'\1/proxy/', content)
headers = resp.headers
if "content-type" in headers:
mimetype = headers["content-type"].split(";")[0].split(",")[0]
else:
mimetype = None
response = Response(
content,
headers=dict(headers),
mimetype=mimetype,
status=resp.status_code
)
return response
proxy.provide_automatic_options = False
if __name__ == '__main__':
app.run()
| versae/hidra | hidra/hidra.py | Python | mit | 3,120 |
# Testing if feature collisions are detected accross recursive features
expected_results = {
"K64F": {
"desc": "test recursive feature collisions",
"exception_msg": "Configuration conflict. The feature UVISOR both added and removed."
}
}
| arostm/mbed-os | tools/test/config_test/test25/test_data.py | Python | apache-2.0 | 264 |
# coding=utf-8
def get_remote_addr(request):
addr = request.META.get('REMOTE_ADDR')
if not addr:
addr = request.META.get('HTTP_X_FORWARDED_FOR').split(',')[-1:][0].strip()
return addr
| manazag/hopper.pw | hopperpw/main/utils.py | Python | bsd-3-clause | 206 |
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors import KDTree
from sklearn.preprocessing import normalize
from umap import distances as dist
from umap.umap_ import (
nearest_neighbors,
smooth_knn_dist,
)
# ===================================================
# Nearest Neighbour Test cases
# ===================================================
# nearest_neighbours metric parameter validation
# -----------------------------------------------
def test_nn_bad_metric(nn_data):
with pytest.raises(ValueError):
nearest_neighbors(nn_data, 10, 42, {}, False, np.random)
def test_nn_bad_metric_sparse_data(sparse_nn_data):
with pytest.raises(ValueError):
nearest_neighbors(
sparse_nn_data,
10,
"seuclidean",
{},
False,
np.random,
)
# -------------------------------------------------
# Utility functions for Nearest Neighbour
# -------------------------------------------------
def knn(indices, nn_data): # pragma: no cover
tree = KDTree(nn_data)
true_indices = tree.query(nn_data, 10, return_distance=False)
num_correct = 0.0
for i in range(nn_data.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], indices[i]))
return num_correct / (nn_data.shape[0] * 10)
def smooth_knn(nn_data, local_connectivity=1.0):
knn_indices, knn_dists, _ = nearest_neighbors(
nn_data, 10, "euclidean", {}, False, np.random
)
sigmas, rhos = smooth_knn_dist(
knn_dists, 10.0, local_connectivity=local_connectivity
)
shifted_dists = knn_dists - rhos[:, np.newaxis]
shifted_dists[shifted_dists < 0.0] = 0.0
vals = np.exp(-(shifted_dists / sigmas[:, np.newaxis]))
norms = np.sum(vals, axis=1)
return norms
@pytest.mark.skip()
def test_nn_descent_neighbor_accuracy(nn_data): # pragma: no cover
knn_indices, knn_dists, _ = nearest_neighbors(
nn_data, 10, "euclidean", {}, False, np.random
)
percent_correct = knn(knn_indices, nn_data)
assert (
percent_correct >= 0.85
), "NN-descent did not get 89% accuracy on nearest neighbors"
@pytest.mark.skip()
def test_nn_descent_neighbor_accuracy_low_memory(nn_data): # pragma: no cover
knn_indices, knn_dists, _ = nearest_neighbors(
nn_data, 10, "euclidean", {}, False, np.random, low_memory=True
)
percent_correct = knn(knn_indices, nn_data)
assert (
percent_correct >= 0.89
), "NN-descent did not get 89% accuracy on nearest neighbors"
@pytest.mark.skip()
def test_angular_nn_descent_neighbor_accuracy(nn_data): # pragma: no cover
knn_indices, knn_dists, _ = nearest_neighbors(
nn_data, 10, "cosine", {}, True, np.random
)
angular_data = normalize(nn_data, norm="l2")
percent_correct = knn(knn_indices, angular_data)
assert (
percent_correct >= 0.85
), "NN-descent did not get 89% accuracy on nearest neighbors"
@pytest.mark.skip()
def test_sparse_nn_descent_neighbor_accuracy(sparse_nn_data): # pragma: no cover
knn_indices, knn_dists, _ = nearest_neighbors(
sparse_nn_data, 20, "euclidean", {}, False, np.random
)
percent_correct = knn(knn_indices, sparse_nn_data.todense())
assert (
percent_correct >= 0.75
), "Sparse NN-descent did not get 90% accuracy on nearest neighbors"
@pytest.mark.skip()
def test_sparse_nn_descent_neighbor_accuracy_low_memory(
sparse_nn_data,
): # pragma: no cover
knn_indices, knn_dists, _ = nearest_neighbors(
sparse_nn_data, 20, "euclidean", {}, False, np.random, low_memory=True
)
percent_correct = knn(knn_indices, sparse_nn_data.todense())
assert (
percent_correct >= 0.85
), "Sparse NN-descent did not get 90% accuracy on nearest neighbors"
@pytest.mark.skip()
def test_nn_descent_neighbor_accuracy_callable_metric(nn_data): # pragma: no cover
knn_indices, knn_dists, _ = nearest_neighbors(
nn_data, 10, dist.euclidean, {}, False, np.random
)
percent_correct = knn(knn_indices, nn_data)
assert (
percent_correct >= 0.95
), "NN-descent did not get 95% accuracy on nearest neighbors with callable metric"
@pytest.mark.skip()
def test_sparse_angular_nn_descent_neighbor_accuracy(
sparse_nn_data,
): # pragma: no cover
knn_indices, knn_dists, _ = nearest_neighbors(
sparse_nn_data, 20, "cosine", {}, True, np.random
)
angular_data = normalize(sparse_nn_data, norm="l2").toarray()
percent_correct = knn(knn_indices, angular_data)
assert (
percent_correct >= 0.90
), "Sparse NN-descent did not get 90% accuracy on nearest neighbors"
def test_smooth_knn_dist_l1norms(nn_data):
norms = smooth_knn(nn_data)
assert_array_almost_equal(
norms,
1.0 + np.log2(10) * np.ones(norms.shape[0]),
decimal=3,
err_msg="Smooth knn-dists does not give expected" "norms",
)
def test_smooth_knn_dist_l1norms_w_connectivity(nn_data):
norms = smooth_knn(nn_data, local_connectivity=1.75)
assert_array_almost_equal(
norms,
1.0 + np.log2(10) * np.ones(norms.shape[0]),
decimal=3,
err_msg="Smooth knn-dists does not give expected"
"norms for local_connectivity=1.75",
)
| lmcinnes/umap | umap/tests/test_umap_nn.py | Python | bsd-3-clause | 5,348 |
'''
Collection of tests to verify presets parsing correctness
'''
from nose.tools import eq_, raises
from presets.presetManager import Preset
from presets.presetManager import PresetMissingFieldException
from presets.presetManager import PresetFieldTypeException
from presets.presetManager import PresetException
def test_creation():
preset = {
"id": "id_test",
"properties": []
}
p = Preset(preset)
eq_(p.id, preset['id'])
@raises(PresetMissingFieldException)
def test_field_id_missing():
''' id is not optional'''
preset = {
"properties": []
}
Preset(preset)
@raises(PresetException)
def test_field_id_empty():
''' id is not optional'''
preset = {
"id": "",
"properties": []
}
Preset(preset)
@raises(PresetFieldTypeException)
def test_field_id_type():
''' id must be a string '''
preset = {
"id": {},
"properties": []
}
Preset(preset)
def test_field_allowUpload():
''' properties must be dict'''
preset = {
"id": "id_test",
"properties": [],
"allow_upload": True
}
p = Preset(preset)
eq_(p.allow_upload, preset['allow_upload'])
preset['allow_upload'] = False
p = Preset(preset)
eq_(p.allow_upload, preset['allow_upload'])
def test_field_allowUpload_default():
preset = {
"id": "id_test",
"properties": []
}
p = Preset(preset)
eq_(p.allow_upload, True)
@raises(PresetFieldTypeException)
def test_field_allowUpload_type():
''' allow_upload must be bool'''
preset = {
"id": "id_test",
"properties": [],
"allow_upload": "test"
}
Preset(preset)
@raises(PresetMissingFieldException)
def test_field_properties_missing():
''' properties is not optional'''
preset = {
"id": "id_test"
}
Preset(preset)
@raises(PresetFieldTypeException)
def test_field_properties_type():
''' properties must be dict'''
preset = {
"id": "id_test",
"properties": "asd"
}
Preset(preset)
def test_properties_empty():
preset = {
"id": "id_test",
"properties": []
}
p = Preset(preset)
eq_(len(p.properties), 0)
@raises(PresetException)
def test_id_empty():
preset = {
"id": "",
"properties": []
}
Preset(preset)
@raises(PresetException)
def test_properties_id_empty():
preset = {
"id": "id_test",
"properties": [{"id": ""}]
}
Preset(preset)
@raises(PresetException)
def test_properties_duplicate_id():
preset = {
"id": "id_test",
"properties": [ {"id": "1_p"},
{"id": "1_p"} ]
}
Preset(preset)
def test_properties_num():
preset = {
"id": "id_test",
"properties": [ {"id": "1_p"},
{"id": "2_p"},
{"id": "3_p"}
]
}
p = Preset(preset)
eq_(len(p.properties),3)
for i,prop in enumerate(preset['properties']):
eq_(prop['id'], p.properties[i].id)
def test_properties_all():
preset = {
"id": "id_test",
"properties": [ { "id": "prop_test",
"description": "description test",
"required": True,
"type" : "string" } ]
}
p = Preset(preset)
eq_(len(p.properties),1)
eq_(p.properties[0].id, preset['properties'][0]['id'])
eq_(p.properties[0].description, preset['properties'][0]['description'])
eq_(p.properties[0].required, preset['properties'][0]['required'])
eq_(p.properties[0].type, preset['properties'][0]['type'])
def test_properties_defaults():
preset = {
"id" : "id_test",
"properties": [ {"id": "prop_test"} ]
}
p = Preset(preset)
eq_(len(p.properties),1)
eq_(p.properties[0].id, preset['properties'][0]['id'])
eq_(p.properties[0].required, False)
eq_(p.properties[0].type, "string")
@raises(PresetException)
def test_properties_type():
'''test type value not valid'''
preset = {
"id" : "id_test",
"properties": [{ "id": "prop_test",
"type" : "### non_esiste ###" }]
}
Preset(preset)
@raises(PresetMissingFieldException)
def test_properties_type_enum_missing_values():
preset = {
"id": "id_test",
"properties": [{ "id": "prop_test",
"type" : "enum"
}]
}
Preset(preset)
@raises(PresetFieldTypeException)
def test_properties_type_enum_values_type():
preset = {
"id": "id_test",
"properties": [{ "id": "prop_test",
"type" : "enum",
"values": "errorrrre"
}]
}
Preset(preset)
def test_properties_type_enum_values():
preset = {
"id": "id_test",
"properties": [{ "id": "prop_test",
"type": "enum",
"values": ['uno','due','dieci']
}]
}
p = Preset(preset)
eq_(len(p.properties),1)
eq_(p.properties[0].id, preset['properties'][0]['id'])
eq_(p.properties[0].type, preset['properties'][0]['type'])
eq_(p.properties[0].values, preset['properties'][0]['values'])
| leophys/libreant | presets/test/test_preset_parsing.py | Python | agpl-3.0 | 5,337 |
from setuptools import setup, find_packages
version = "5.2.0"
with open("requirements.txt", "r") as f:
install_requires = f.readlines()
setup(
name='erpnext',
version=version,
description='Open Source ERP',
author='Frappe Technologies',
author_email='[email protected]',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| netfirms/erpnext | setup.py | Python | agpl-3.0 | 415 |
from django.contrib.admin import (
HORIZONTAL, VERTICAL, AdminSite, ModelAdmin, StackedInline, TabularInline,
autodiscover, site,
)
from django.contrib.gis.admin.options import GeoModelAdmin, OSMGeoAdmin
from django.contrib.gis.admin.widgets import OpenLayersWidget
__all__ = [
'autodiscover', 'site', 'AdminSite', 'ModelAdmin', 'StackedInline',
'TabularInline', 'HORIZONTAL', 'VERTICAL', 'GeoModelAdmin', 'OSMGeoAdmin',
'OpenLayersWidget',
]
| huang4fstudio/django | django/contrib/gis/admin/__init__.py | Python | bsd-3-clause | 464 |
import re
import sys
import copy
import types
import inspect
import keyword
import builtins
import functools
import _thread
__all__ = ['dataclass',
'field',
'Field',
'FrozenInstanceError',
'InitVar',
'MISSING',
# Helper functions.
'fields',
'asdict',
'astuple',
'make_dataclass',
'replace',
'is_dataclass',
]
# Conditions for adding methods. The boxes indicate what action the
# dataclass decorator takes. For all of these tables, when I talk
# about init=, repr=, eq=, order=, unsafe_hash=, or frozen=, I'm
# referring to the arguments to the @dataclass decorator. When
# checking if a dunder method already exists, I mean check for an
# entry in the class's __dict__. I never check to see if an attribute
# is defined in a base class.
# Key:
# +=========+=========================================+
# + Value | Meaning |
# +=========+=========================================+
# | <blank> | No action: no method is added. |
# +---------+-----------------------------------------+
# | add | Generated method is added. |
# +---------+-----------------------------------------+
# | raise | TypeError is raised. |
# +---------+-----------------------------------------+
# | None | Attribute is set to None. |
# +=========+=========================================+
# __init__
#
# +--- init= parameter
# |
# v | | |
# | no | yes | <--- class has __init__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __repr__
#
# +--- repr= parameter
# |
# v | | |
# | no | yes | <--- class has __repr__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __setattr__
# __delattr__
#
# +--- frozen= parameter
# |
# v | | |
# | no | yes | <--- class has __setattr__ or __delattr__ in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because not adding these methods would break the "frozen-ness"
# of the class.
# __eq__
#
# +--- eq= parameter
# |
# v | | |
# | no | yes | <--- class has __eq__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __lt__
# __le__
# __gt__
# __ge__
#
# +--- order= parameter
# |
# v | | |
# | no | yes | <--- class has any comparison method in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because to allow this case would interfere with using
# functools.total_ordering.
# __hash__
# +------------------- unsafe_hash= parameter
# | +----------- eq= parameter
# | | +--- frozen= parameter
# | | |
# v v v | | |
# | no | yes | <--- class has explicitly defined __hash__
# +=======+=======+=======+========+========+
# | False | False | False | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | False | True | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | True | False | None | | <-- the default, not hashable
# +-------+-------+-------+--------+--------+
# | False | True | True | add | | Frozen, so hashable, allows override
# +-------+-------+-------+--------+--------+
# | True | False | False | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | False | True | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | False | add | raise | Not frozen, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | True | add | raise | Frozen, so hashable
# +=======+=======+=======+========+========+
# For boxes that are blank, __hash__ is untouched and therefore
# inherited from the base class. If the base is object, then
# id-based hashing is used.
#
# Note that a class may already have __hash__=None if it specified an
# __eq__ method in the class body (not one that was created by
# @dataclass).
#
# See _hash_action (below) for a coded version of this table.
# Raised when an attempt is made to modify a frozen class.
class FrozenInstanceError(AttributeError): pass
# A sentinel object for default values to signal that a default
# factory will be used. This is given a nice repr() which will appear
# in the function signature of dataclasses' constructors.
class _HAS_DEFAULT_FACTORY_CLASS:
def __repr__(self):
return '<factory>'
_HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS()
# A sentinel object to detect if a parameter is supplied or not. Use
# a class to give it a better repr.
class _MISSING_TYPE:
pass
MISSING = _MISSING_TYPE()
# Since most per-field metadata will be unused, create an empty
# read-only proxy that can be shared among all fields.
_EMPTY_METADATA = types.MappingProxyType({})
# Markers for the various kinds of fields and pseudo-fields.
class _FIELD_BASE:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
_FIELD = _FIELD_BASE('_FIELD')
_FIELD_CLASSVAR = _FIELD_BASE('_FIELD_CLASSVAR')
_FIELD_INITVAR = _FIELD_BASE('_FIELD_INITVAR')
# The name of an attribute on the class where we store the Field
# objects. Also used to check if a class is a Data Class.
_FIELDS = '__dataclass_fields__'
# The name of an attribute on the class that stores the parameters to
# @dataclass.
_PARAMS = '__dataclass_params__'
# The name of the function, that if it exists, is called at the end of
# __init__.
_POST_INIT_NAME = '__post_init__'
# String regex that string annotations for ClassVar or InitVar must match.
# Allows "identifier.identifier[" or "identifier[".
# https://bugs.python.org/issue33453 for details.
_MODULE_IDENTIFIER_RE = re.compile(r'^(?:\s*(\w+)\s*\.)?\s*(\w+)')
class _InitVarMeta(type):
def __getitem__(self, params):
return self
class InitVar(metaclass=_InitVarMeta):
pass
# Instances of Field are only ever created from within this module,
# and only from the field() function, although Field instances are
# exposed externally as (conceptually) read-only objects.
#
# name and type are filled in after the fact, not in __init__.
# They're not known at the time this class is instantiated, but it's
# convenient if they're available later.
#
# When cls._FIELDS is filled in with a list of Field objects, the name
# and type fields will have been populated.
class Field:
__slots__ = ('name',
'type',
'default',
'default_factory',
'repr',
'hash',
'init',
'compare',
'metadata',
'_field_type', # Private: not to be used by user code.
)
def __init__(self, default, default_factory, init, repr, hash, compare,
metadata):
self.name = None
self.type = None
self.default = default
self.default_factory = default_factory
self.init = init
self.repr = repr
self.hash = hash
self.compare = compare
self.metadata = (_EMPTY_METADATA
if metadata is None or len(metadata) == 0 else
types.MappingProxyType(metadata))
self._field_type = None
def __repr__(self):
return ('Field('
f'name={self.name!r},'
f'type={self.type!r},'
f'default={self.default!r},'
f'default_factory={self.default_factory!r},'
f'init={self.init!r},'
f'repr={self.repr!r},'
f'hash={self.hash!r},'
f'compare={self.compare!r},'
f'metadata={self.metadata!r},'
f'_field_type={self._field_type}'
')')
# This is used to support the PEP 487 __set_name__ protocol in the
# case where we're using a field that contains a descriptor as a
# default value. For details on __set_name__, see
# https://www.python.org/dev/peps/pep-0487/#implementation-details.
#
# Note that in _process_class, this Field object is overwritten
# with the default value, so the end result is a descriptor that
# had __set_name__ called on it at the right time.
def __set_name__(self, owner, name):
func = getattr(type(self.default), '__set_name__', None)
if func:
# There is a __set_name__ method on the descriptor, call
# it.
func(self.default, owner, name)
class _DataclassParams:
__slots__ = ('init',
'repr',
'eq',
'order',
'unsafe_hash',
'frozen',
)
def __init__(self, init, repr, eq, order, unsafe_hash, frozen):
self.init = init
self.repr = repr
self.eq = eq
self.order = order
self.unsafe_hash = unsafe_hash
self.frozen = frozen
def __repr__(self):
return ('_DataclassParams('
f'init={self.init!r},'
f'repr={self.repr!r},'
f'eq={self.eq!r},'
f'order={self.order!r},'
f'unsafe_hash={self.unsafe_hash!r},'
f'frozen={self.frozen!r}'
')')
# This function is used instead of exposing Field creation directly,
# so that a type checker can be told (via overloads) that this is a
# function whose type depends on its parameters.
def field(*, default=MISSING, default_factory=MISSING, init=True, repr=True,
hash=None, compare=True, metadata=None):
"""Return an object to identify dataclass fields.
default is the default value of the field. default_factory is a
0-argument function called to initialize a field's value. If init
is True, the field will be a parameter to the class's __init__()
function. If repr is True, the field will be included in the
object's repr(). If hash is True, the field will be included in
the object's hash(). If compare is True, the field will be used
in comparison functions. metadata, if specified, must be a
mapping which is stored but not otherwise examined by dataclass.
It is an error to specify both default and default_factory.
"""
if default is not MISSING and default_factory is not MISSING:
raise ValueError('cannot specify both default and default_factory')
return Field(default, default_factory, init, repr, hash, compare,
metadata)
def _tuple_str(obj_name, fields):
# Return a string representing each field of obj_name as a tuple
# member. So, if fields is ['x', 'y'] and obj_name is "self",
# return "(self.x,self.y)".
# Special case for the 0-tuple.
if not fields:
return '()'
# Note the trailing comma, needed if this turns out to be a 1-tuple.
return f'({",".join([f"{obj_name}.{f.name}" for f in fields])},)'
# This function's logic is copied from "recursive_repr" function in
# reprlib module to avoid dependency.
def _recursive_repr(user_function):
# Decorator to make a repr function return "..." for a recursive
# call.
repr_running = set()
@functools.wraps(user_function)
def wrapper(self):
key = id(self), _thread.get_ident()
if key in repr_running:
return '...'
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
return wrapper
def _create_fn(name, args, body, *, globals=None, locals=None,
return_type=MISSING):
# Note that we mutate locals when exec() is called. Caller
# beware! The only callers are internal to this module, so no
# worries about external callers.
if locals is None:
locals = {}
# __builtins__ may be the "builtins" module or
# the value of its "__dict__",
# so make sure "__builtins__" is the module.
if globals is not None and '__builtins__' not in globals:
globals['__builtins__'] = builtins
return_annotation = ''
if return_type is not MISSING:
locals['_return_type'] = return_type
return_annotation = '->_return_type'
args = ','.join(args)
body = '\n'.join(f' {b}' for b in body)
# Compute the text of the entire function.
txt = f'def {name}({args}){return_annotation}:\n{body}'
exec(txt, globals, locals)
return locals[name]
def _field_assign(frozen, name, value, self_name):
# If we're a frozen class, then assign to our fields in __init__
# via object.__setattr__. Otherwise, just use a simple
# assignment.
#
# self_name is what "self" is called in this function: don't
# hard-code "self", since that might be a field name.
if frozen:
return f'__builtins__.object.__setattr__({self_name},{name!r},{value})'
return f'{self_name}.{name}={value}'
def _field_init(f, frozen, globals, self_name):
# Return the text of the line in the body of __init__ that will
# initialize this field.
default_name = f'_dflt_{f.name}'
if f.default_factory is not MISSING:
if f.init:
# This field has a default factory. If a parameter is
# given, use it. If not, call the factory.
globals[default_name] = f.default_factory
value = (f'{default_name}() '
f'if {f.name} is _HAS_DEFAULT_FACTORY '
f'else {f.name}')
else:
# This is a field that's not in the __init__ params, but
# has a default factory function. It needs to be
# initialized here by calling the factory function,
# because there's no other way to initialize it.
# For a field initialized with a default=defaultvalue, the
# class dict just has the default value
# (cls.fieldname=defaultvalue). But that won't work for a
# default factory, the factory must be called in __init__
# and we must assign that to self.fieldname. We can't
# fall back to the class dict's value, both because it's
# not set, and because it might be different per-class
# (which, after all, is why we have a factory function!).
globals[default_name] = f.default_factory
value = f'{default_name}()'
else:
# No default factory.
if f.init:
if f.default is MISSING:
# There's no default, just do an assignment.
value = f.name
elif f.default is not MISSING:
globals[default_name] = f.default
value = f.name
else:
# This field does not need initialization. Signify that
# to the caller by returning None.
return None
# Only test this now, so that we can create variables for the
# default. However, return None to signify that we're not going
# to actually do the assignment statement for InitVars.
if f._field_type is _FIELD_INITVAR:
return None
# Now, actually generate the field assignment.
return _field_assign(frozen, f.name, value, self_name)
def _init_param(f):
# Return the __init__ parameter string for this field. For
# example, the equivalent of 'x:int=3' (except instead of 'int',
# reference a variable set to int, and instead of '3', reference a
# variable set to 3).
if f.default is MISSING and f.default_factory is MISSING:
# There's no default, and no default_factory, just output the
# variable name and type.
default = ''
elif f.default is not MISSING:
# There's a default, this will be the name that's used to look
# it up.
default = f'=_dflt_{f.name}'
elif f.default_factory is not MISSING:
# There's a factory function. Set a marker.
default = '=_HAS_DEFAULT_FACTORY'
return f'{f.name}:_type_{f.name}{default}'
def _init_fn(fields, frozen, has_post_init, self_name):
# fields contains both real fields and InitVar pseudo-fields.
# Make sure we don't have fields without defaults following fields
# with defaults. This actually would be caught when exec-ing the
# function source code, but catching it here gives a better error
# message, and future-proofs us in case we build up the function
# using ast.
seen_default = False
for f in fields:
# Only consider fields in the __init__ call.
if f.init:
if not (f.default is MISSING and f.default_factory is MISSING):
seen_default = True
elif seen_default:
raise TypeError(f'non-default argument {f.name!r} '
'follows default argument')
globals = {'MISSING': MISSING,
'_HAS_DEFAULT_FACTORY': _HAS_DEFAULT_FACTORY}
body_lines = []
for f in fields:
line = _field_init(f, frozen, globals, self_name)
# line is None means that this field doesn't require
# initialization (it's a pseudo-field). Just skip it.
if line:
body_lines.append(line)
# Does this class have a post-init function?
if has_post_init:
params_str = ','.join(f.name for f in fields
if f._field_type is _FIELD_INITVAR)
body_lines.append(f'{self_name}.{_POST_INIT_NAME}({params_str})')
# If no body lines, use 'pass'.
if not body_lines:
body_lines = ['pass']
locals = {f'_type_{f.name}': f.type for f in fields}
return _create_fn('__init__',
[self_name] + [_init_param(f) for f in fields if f.init],
body_lines,
locals=locals,
globals=globals,
return_type=None)
def _repr_fn(fields):
fn = _create_fn('__repr__',
('self',),
['return self.__class__.__qualname__ + f"(' +
', '.join([f"{f.name}={{self.{f.name}!r}}"
for f in fields]) +
')"'])
return _recursive_repr(fn)
def _frozen_get_del_attr(cls, fields):
# XXX: globals is modified on the first call to _create_fn, then
# the modified version is used in the second call. Is this okay?
globals = {'cls': cls,
'FrozenInstanceError': FrozenInstanceError}
if fields:
fields_str = '(' + ','.join(repr(f.name) for f in fields) + ',)'
else:
# Special case for the zero-length tuple.
fields_str = '()'
return (_create_fn('__setattr__',
('self', 'name', 'value'),
(f'if type(self) is cls or name in {fields_str}:',
' raise FrozenInstanceError(f"cannot assign to field {name!r}")',
f'super(cls, self).__setattr__(name, value)'),
globals=globals),
_create_fn('__delattr__',
('self', 'name'),
(f'if type(self) is cls or name in {fields_str}:',
' raise FrozenInstanceError(f"cannot delete field {name!r}")',
f'super(cls, self).__delattr__(name)'),
globals=globals),
)
def _cmp_fn(name, op, self_tuple, other_tuple):
# Create a comparison function. If the fields in the object are
# named 'x' and 'y', then self_tuple is the string
# '(self.x,self.y)' and other_tuple is the string
# '(other.x,other.y)'.
return _create_fn(name,
('self', 'other'),
[ 'if other.__class__ is self.__class__:',
f' return {self_tuple}{op}{other_tuple}',
'return NotImplemented'])
def _hash_fn(fields):
self_tuple = _tuple_str('self', fields)
return _create_fn('__hash__',
('self',),
[f'return hash({self_tuple})'])
def _is_classvar(a_type, typing):
# This test uses a typing internal class, but it's the best way to
# test if this is a ClassVar.
return (a_type is typing.ClassVar
or (type(a_type) is typing._GenericAlias
and a_type.__origin__ is typing.ClassVar))
def _is_initvar(a_type, dataclasses):
# The module we're checking against is the module we're
# currently in (dataclasses.py).
return a_type is dataclasses.InitVar
def _is_type(annotation, cls, a_module, a_type, is_type_predicate):
# Given a type annotation string, does it refer to a_type in
# a_module? For example, when checking that annotation denotes a
# ClassVar, then a_module is typing, and a_type is
# typing.ClassVar.
# It's possible to look up a_module given a_type, but it involves
# looking in sys.modules (again!), and seems like a waste since
# the caller already knows a_module.
# - annotation is a string type annotation
# - cls is the class that this annotation was found in
# - a_module is the module we want to match
# - a_type is the type in that module we want to match
# - is_type_predicate is a function called with (obj, a_module)
# that determines if obj is of the desired type.
# Since this test does not do a local namespace lookup (and
# instead only a module (global) lookup), there are some things it
# gets wrong.
# With string annotations, cv0 will be detected as a ClassVar:
# CV = ClassVar
# @dataclass
# class C0:
# cv0: CV
# But in this example cv1 will not be detected as a ClassVar:
# @dataclass
# class C1:
# CV = ClassVar
# cv1: CV
# In C1, the code in this function (_is_type) will look up "CV" in
# the module and not find it, so it will not consider cv1 as a
# ClassVar. This is a fairly obscure corner case, and the best
# way to fix it would be to eval() the string "CV" with the
# correct global and local namespaces. However that would involve
# a eval() penalty for every single field of every dataclass
# that's defined. It was judged not worth it.
match = _MODULE_IDENTIFIER_RE.match(annotation)
if match:
ns = None
module_name = match.group(1)
if not module_name:
# No module name, assume the class's module did
# "from dataclasses import InitVar".
ns = sys.modules.get(cls.__module__).__dict__
else:
# Look up module_name in the class's module.
module = sys.modules.get(cls.__module__)
if module and module.__dict__.get(module_name) is a_module:
ns = sys.modules.get(a_type.__module__).__dict__
if ns and is_type_predicate(ns.get(match.group(2)), a_module):
return True
return False
def _get_field(cls, a_name, a_type):
# Return a Field object for this field name and type. ClassVars
# and InitVars are also returned, but marked as such (see
# f._field_type).
# If the default value isn't derived from Field, then it's only a
# normal default value. Convert it to a Field().
default = getattr(cls, a_name, MISSING)
if isinstance(default, Field):
f = default
else:
if isinstance(default, types.MemberDescriptorType):
# This is a field in __slots__, so it has no default value.
default = MISSING
f = field(default=default)
# Only at this point do we know the name and the type. Set them.
f.name = a_name
f.type = a_type
# Assume it's a normal field until proven otherwise. We're next
# going to decide if it's a ClassVar or InitVar, everything else
# is just a normal field.
f._field_type = _FIELD
# In addition to checking for actual types here, also check for
# string annotations. get_type_hints() won't always work for us
# (see https://github.com/python/typing/issues/508 for example),
# plus it's expensive and would require an eval for every stirng
# annotation. So, make a best effort to see if this is a ClassVar
# or InitVar using regex's and checking that the thing referenced
# is actually of the correct type.
# For the complete discussion, see https://bugs.python.org/issue33453
# If typing has not been imported, then it's impossible for any
# annotation to be a ClassVar. So, only look for ClassVar if
# typing has been imported by any module (not necessarily cls's
# module).
typing = sys.modules.get('typing')
if typing:
if (_is_classvar(a_type, typing)
or (isinstance(f.type, str)
and _is_type(f.type, cls, typing, typing.ClassVar,
_is_classvar))):
f._field_type = _FIELD_CLASSVAR
# If the type is InitVar, or if it's a matching string annotation,
# then it's an InitVar.
if f._field_type is _FIELD:
# The module we're checking against is the module we're
# currently in (dataclasses.py).
dataclasses = sys.modules[__name__]
if (_is_initvar(a_type, dataclasses)
or (isinstance(f.type, str)
and _is_type(f.type, cls, dataclasses, dataclasses.InitVar,
_is_initvar))):
f._field_type = _FIELD_INITVAR
# Validations for individual fields. This is delayed until now,
# instead of in the Field() constructor, since only here do we
# know the field name, which allows for better error reporting.
# Special restrictions for ClassVar and InitVar.
if f._field_type in (_FIELD_CLASSVAR, _FIELD_INITVAR):
if f.default_factory is not MISSING:
raise TypeError(f'field {f.name} cannot have a '
'default factory')
# Should I check for other field settings? default_factory
# seems the most serious to check for. Maybe add others. For
# example, how about init=False (or really,
# init=<not-the-default-init-value>)? It makes no sense for
# ClassVar and InitVar to specify init=<anything>.
# For real fields, disallow mutable defaults for known types.
if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)):
raise ValueError(f'mutable default {type(f.default)} for field '
f'{f.name} is not allowed: use default_factory')
return f
def _set_new_attribute(cls, name, value):
# Never overwrites an existing attribute. Returns True if the
# attribute already exists.
if name in cls.__dict__:
return True
setattr(cls, name, value)
return False
# Decide if/how we're going to create a hash function. Key is
# (unsafe_hash, eq, frozen, does-hash-exist). Value is the action to
# take. The common case is to do nothing, so instead of providing a
# function that is a no-op, use None to signify that.
def _hash_set_none(cls, fields):
return None
def _hash_add(cls, fields):
flds = [f for f in fields if (f.compare if f.hash is None else f.hash)]
return _hash_fn(flds)
def _hash_exception(cls, fields):
# Raise an exception.
raise TypeError(f'Cannot overwrite attribute __hash__ '
f'in class {cls.__name__}')
#
# +-------------------------------------- unsafe_hash?
# | +------------------------------- eq?
# | | +------------------------ frozen?
# | | | +---------------- has-explicit-hash?
# | | | |
# | | | | +------- action
# | | | | |
# v v v v v
_hash_action = {(False, False, False, False): None,
(False, False, False, True ): None,
(False, False, True, False): None,
(False, False, True, True ): None,
(False, True, False, False): _hash_set_none,
(False, True, False, True ): None,
(False, True, True, False): _hash_add,
(False, True, True, True ): None,
(True, False, False, False): _hash_add,
(True, False, False, True ): _hash_exception,
(True, False, True, False): _hash_add,
(True, False, True, True ): _hash_exception,
(True, True, False, False): _hash_add,
(True, True, False, True ): _hash_exception,
(True, True, True, False): _hash_add,
(True, True, True, True ): _hash_exception,
}
# See https://bugs.python.org/issue32929#msg312829 for an if-statement
# version of this table.
def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
# Now that dicts retain insertion order, there's no reason to use
# an ordered dict. I am leveraging that ordering here, because
# derived class fields overwrite base class fields, but the order
# is defined by the base class, which is found first.
fields = {}
setattr(cls, _PARAMS, _DataclassParams(init, repr, eq, order,
unsafe_hash, frozen))
# Find our base classes in reverse MRO order, and exclude
# ourselves. In reversed order so that more derived classes
# override earlier field definitions in base classes. As long as
# we're iterating over them, see if any are frozen.
any_frozen_base = False
has_dataclass_bases = False
for b in cls.__mro__[-1:0:-1]:
# Only process classes that have been processed by our
# decorator. That is, they have a _FIELDS attribute.
base_fields = getattr(b, _FIELDS, None)
if base_fields:
has_dataclass_bases = True
for f in base_fields.values():
fields[f.name] = f
if getattr(b, _PARAMS).frozen:
any_frozen_base = True
# Annotations that are defined in this class (not in base
# classes). If __annotations__ isn't present, then this class
# adds no new annotations. We use this to compute fields that are
# added by this class.
#
# Fields are found from cls_annotations, which is guaranteed to be
# ordered. Default values are from class attributes, if a field
# has a default. If the default value is a Field(), then it
# contains additional info beyond (and possibly including) the
# actual default value. Pseudo-fields ClassVars and InitVars are
# included, despite the fact that they're not real fields. That's
# dealt with later.
cls_annotations = cls.__dict__.get('__annotations__', {})
# Now find fields in our class. While doing so, validate some
# things, and set the default values (as class attributes) where
# we can.
cls_fields = [_get_field(cls, name, type)
for name, type in cls_annotations.items()]
for f in cls_fields:
fields[f.name] = f
# If the class attribute (which is the default value for this
# field) exists and is of type 'Field', replace it with the
# real default. This is so that normal class introspection
# sees a real default value, not a Field.
if isinstance(getattr(cls, f.name, None), Field):
if f.default is MISSING:
# If there's no default, delete the class attribute.
# This happens if we specify field(repr=False), for
# example (that is, we specified a field object, but
# no default value). Also if we're using a default
# factory. The class attribute should not be set at
# all in the post-processed class.
delattr(cls, f.name)
else:
setattr(cls, f.name, f.default)
# Do we have any Field members that don't also have annotations?
for name, value in cls.__dict__.items():
if isinstance(value, Field) and not name in cls_annotations:
raise TypeError(f'{name!r} is a field but has no type annotation')
# Check rules that apply if we are derived from any dataclasses.
if has_dataclass_bases:
# Raise an exception if any of our bases are frozen, but we're not.
if any_frozen_base and not frozen:
raise TypeError('cannot inherit non-frozen dataclass from a '
'frozen one')
# Raise an exception if we're frozen, but none of our bases are.
if not any_frozen_base and frozen:
raise TypeError('cannot inherit frozen dataclass from a '
'non-frozen one')
# Remember all of the fields on our class (including bases). This
# also marks this class as being a dataclass.
setattr(cls, _FIELDS, fields)
# Was this class defined with an explicit __hash__? Note that if
# __eq__ is defined in this class, then python will automatically
# set __hash__ to None. This is a heuristic, as it's possible
# that such a __hash__ == None was not auto-generated, but it
# close enough.
class_hash = cls.__dict__.get('__hash__', MISSING)
has_explicit_hash = not (class_hash is MISSING or
(class_hash is None and '__eq__' in cls.__dict__))
# If we're generating ordering methods, we must be generating the
# eq methods.
if order and not eq:
raise ValueError('eq must be true if order is true')
if init:
# Does this class have a post-init function?
has_post_init = hasattr(cls, _POST_INIT_NAME)
# Include InitVars and regular fields (so, not ClassVars).
flds = [f for f in fields.values()
if f._field_type in (_FIELD, _FIELD_INITVAR)]
_set_new_attribute(cls, '__init__',
_init_fn(flds,
frozen,
has_post_init,
# The name to use for the "self"
# param in __init__. Use "self"
# if possible.
'__dataclass_self__' if 'self' in fields
else 'self',
))
# Get the fields as a list, and include only real fields. This is
# used in all of the following methods.
field_list = [f for f in fields.values() if f._field_type is _FIELD]
if repr:
flds = [f for f in field_list if f.repr]
_set_new_attribute(cls, '__repr__', _repr_fn(flds))
if eq:
# Create _eq__ method. There's no need for a __ne__ method,
# since python will call __eq__ and negate it.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str('self', flds)
other_tuple = _tuple_str('other', flds)
_set_new_attribute(cls, '__eq__',
_cmp_fn('__eq__', '==',
self_tuple, other_tuple))
if order:
# Create and set the ordering methods.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str('self', flds)
other_tuple = _tuple_str('other', flds)
for name, op in [('__lt__', '<'),
('__le__', '<='),
('__gt__', '>'),
('__ge__', '>='),
]:
if _set_new_attribute(cls, name,
_cmp_fn(name, op, self_tuple, other_tuple)):
raise TypeError(f'Cannot overwrite attribute {name} '
f'in class {cls.__name__}. Consider using '
'functools.total_ordering')
if frozen:
for fn in _frozen_get_del_attr(cls, field_list):
if _set_new_attribute(cls, fn.__name__, fn):
raise TypeError(f'Cannot overwrite attribute {fn.__name__} '
f'in class {cls.__name__}')
# Decide if/how we're going to create a hash function.
hash_action = _hash_action[bool(unsafe_hash),
bool(eq),
bool(frozen),
has_explicit_hash]
if hash_action:
# No need to call _set_new_attribute here, since by the time
# we're here the overwriting is unconditional.
cls.__hash__ = hash_action(cls, field_list)
if not getattr(cls, '__doc__'):
# Create a class doc-string.
cls.__doc__ = (cls.__name__ +
str(inspect.signature(cls)).replace(' -> None', ''))
return cls
# _cls should never be specified by keyword, so start it with an
# underscore. The presence of _cls is used to detect if this
# decorator is being called with parameters or not.
def dataclass(_cls=None, *, init=True, repr=True, eq=True, order=False,
unsafe_hash=False, frozen=False):
"""Returns the same class as was passed in, with dunder methods
added based on the fields defined in the class.
Examines PEP 526 __annotations__ to determine fields.
If init is true, an __init__() method is added to the class. If
repr is true, a __repr__() method is added. If order is true, rich
comparison dunder methods are added. If unsafe_hash is true, a
__hash__() method function is added. If frozen is true, fields may
not be assigned to after instance creation.
"""
def wrap(cls):
return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen)
# See if we're being called as @dataclass or @dataclass().
if _cls is None:
# We're called with parens.
return wrap
# We're called as @dataclass without parens.
return wrap(_cls)
def fields(class_or_instance):
"""Return a tuple describing the fields of this dataclass.
Accepts a dataclass or an instance of one. Tuple elements are of
type Field.
"""
# Might it be worth caching this, per class?
try:
fields = getattr(class_or_instance, _FIELDS)
except AttributeError:
raise TypeError('must be called with a dataclass type or instance')
# Exclude pseudo-fields. Note that fields is sorted by insertion
# order, so the order of the tuple is as the fields were defined.
return tuple(f for f in fields.values() if f._field_type is _FIELD)
def _is_dataclass_instance(obj):
"""Returns True if obj is an instance of a dataclass."""
return not isinstance(obj, type) and hasattr(obj, _FIELDS)
def is_dataclass(obj):
"""Returns True if obj is a dataclass or an instance of a
dataclass."""
return hasattr(obj, _FIELDS)
def asdict(obj, *, dict_factory=dict):
"""Return the fields of a dataclass instance as a new dictionary mapping
field names to field values.
Example usage:
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert asdict(c) == {'x': 1, 'y': 2}
If given, 'dict_factory' will be used instead of built-in dict.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("asdict() should be called on dataclass instances")
return _asdict_inner(obj, dict_factory)
def _asdict_inner(obj, dict_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _asdict_inner(getattr(obj, f.name), dict_factory)
result.append((f.name, value))
return dict_factory(result)
elif isinstance(obj, tuple) and hasattr(obj, '_fields'):
# obj is a namedtuple. Recurse into it, but the returned
# object is another namedtuple of the same type. This is
# similar to how other list- or tuple-derived classes are
# treated (see below), but we just need to create them
# differently because a namedtuple's __init__ needs to be
# called differently (see bpo-34363).
# I'm not using namedtuple's _asdict()
# method, because:
# - it does not recurse in to the namedtuple fields and
# convert them to dicts (using dict_factory).
# - I don't actually want to return a dict here. The the main
# use case here is json.dumps, and it handles converting
# namedtuples to lists. Admittedly we're losing some
# information here when we produce a json list instead of a
# dict. Note that if we returned dicts here instead of
# namedtuples, we could no longer call asdict() on a data
# structure where a namedtuple was used as a dict key.
return type(obj)(*[_asdict_inner(v, dict_factory) for v in obj])
elif isinstance(obj, (list, tuple)):
# Assume we can create an object of this type by passing in a
# generator (which is not true for namedtuples, handled
# above).
return type(obj)(_asdict_inner(v, dict_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)((_asdict_inner(k, dict_factory),
_asdict_inner(v, dict_factory))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
def astuple(obj, *, tuple_factory=tuple):
"""Return the fields of a dataclass instance as a new tuple of field values.
Example usage::
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert astuple(c) == (1, 2)
If given, 'tuple_factory' will be used instead of built-in tuple.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("astuple() should be called on dataclass instances")
return _astuple_inner(obj, tuple_factory)
def _astuple_inner(obj, tuple_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _astuple_inner(getattr(obj, f.name), tuple_factory)
result.append(value)
return tuple_factory(result)
elif isinstance(obj, tuple) and hasattr(obj, '_fields'):
# obj is a namedtuple. Recurse into it, but the returned
# object is another namedtuple of the same type. This is
# similar to how other list- or tuple-derived classes are
# treated (see below), but we just need to create them
# differently because a namedtuple's __init__ needs to be
# called differently (see bpo-34363).
return type(obj)(*[_astuple_inner(v, tuple_factory) for v in obj])
elif isinstance(obj, (list, tuple)):
# Assume we can create an object of this type by passing in a
# generator (which is not true for namedtuples, handled
# above).
return type(obj)(_astuple_inner(v, tuple_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)((_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
def make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True,
repr=True, eq=True, order=False, unsafe_hash=False,
frozen=False):
"""Return a new dynamically created dataclass.
The dataclass name will be 'cls_name'. 'fields' is an iterable
of either (name), (name, type) or (name, type, Field) objects. If type is
omitted, use the string 'typing.Any'. Field objects are created by
the equivalent of calling 'field(name, type [, Field-info])'.
C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,))
is equivalent to:
@dataclass
class C(Base):
x: 'typing.Any'
y: int
z: int = field(init=False)
For the bases and namespace parameters, see the builtin type() function.
The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to
dataclass().
"""
if namespace is None:
namespace = {}
else:
# Copy namespace since we're going to mutate it.
namespace = namespace.copy()
# While we're looking through the field names, validate that they
# are identifiers, are not keywords, and not duplicates.
seen = set()
anns = {}
for item in fields:
if isinstance(item, str):
name = item
tp = 'typing.Any'
elif len(item) == 2:
name, tp, = item
elif len(item) == 3:
name, tp, spec = item
namespace[name] = spec
else:
raise TypeError(f'Invalid field: {item!r}')
if not isinstance(name, str) or not name.isidentifier():
raise TypeError(f'Field names must be valid identifers: {name!r}')
if keyword.iskeyword(name):
raise TypeError(f'Field names must not be keywords: {name!r}')
if name in seen:
raise TypeError(f'Field name duplicated: {name!r}')
seen.add(name)
anns[name] = tp
namespace['__annotations__'] = anns
# We use `types.new_class()` instead of simply `type()` to allow dynamic creation
# of generic dataclassses.
cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace))
return dataclass(cls, init=init, repr=repr, eq=eq, order=order,
unsafe_hash=unsafe_hash, frozen=frozen)
def replace(obj, **changes):
"""Return a new object replacing specified fields with new values.
This is especially useful for frozen classes. Example usage:
@dataclass(frozen=True)
class C:
x: int
y: int
c = C(1, 2)
c1 = replace(c, x=3)
assert c1.x == 3 and c1.y == 2
"""
# We're going to mutate 'changes', but that's okay because it's a
# new dict, even if called with 'replace(obj, **my_changes)'.
if not _is_dataclass_instance(obj):
raise TypeError("replace() should be called on dataclass instances")
# It's an error to have init=False fields in 'changes'.
# If a field is not in 'changes', read its value from the provided obj.
for f in getattr(obj, _FIELDS).values():
# Only consider normal fields or InitVars.
if f._field_type is _FIELD_CLASSVAR:
continue
if not f.init:
# Error if this field is specified in changes.
if f.name in changes:
raise ValueError(f'field {f.name} is declared with '
'init=False, it cannot be specified with '
'replace()')
continue
if f.name not in changes:
if f._field_type is _FIELD_INITVAR:
raise ValueError(f"InitVar {f.name!r} "
'must be specified with replace()')
changes[f.name] = getattr(obj, f.name)
# Create the new object, which calls __init__() and
# __post_init__() (if defined), using all of the init fields we've
# added and/or left in 'changes'. If there are values supplied in
# changes that aren't fields, this will correctly raise a
# TypeError.
return obj.__class__(**changes)
| FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/dataclasses.py | Python | gpl-2.0 | 48,530 |
###########################################################
#
# Copyright (c) 2005-2013, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
from pyasm.common import jsonloads, Environment, Common
from pyasm.web import DivWdg, SpanWdg, HtmlElement
from tactic.ui.common import BaseRefreshWdg
from pyasm.search import Search, SearchType
from pyasm.command import Command
from pyasm.web import Table
from pyasm.widget import TextWdg, IconWdg, ThumbWdg, TextWdg, TextAreaWdg
from pyasm.security import Sudo
from pyasm.biz import Project
from tactic.ui.widget import ActionButtonWdg, IconButtonWdg
from tactic.ui.input import TextInputWdg
from tactic.ui.common import BaseTableElementWdg
__all__ = ['ChatWdg', 'ChatSessionWdg', 'ChatCmd', 'SubscriptionWdg', 'SubscriptionBarWdg', 'MessageWdg', 'FormatMessageWdg', 'MessageTableElementWdg']
class MessageTableElementWdg(BaseTableElementWdg):
def get_display(self):
sobject = self.get_current_sobject()
msg = FormatMessageWdg()
msg.set_sobject(sobject)
return msg
class FormatMessageWdg(BaseRefreshWdg):
''' formatted message for user-friendly display'''
def get_preview_wdg(cls, subscription, category='', message_code=''):
size = 60
if subscription:
category = subscription.get_value("category")
message_code = subscription.get_value("message_code")
if category == 'sobject':
sobject = Search.get_by_search_key(message_code)
thumb = DivWdg()
thumb_wdg = ThumbWdg()
thumb.add(thumb_wdg)
thumb_wdg.set_sobject(sobject)
thumb_wdg.set_icon_size(size)
search_code = sobject.get_code()
thumb.add_behavior( {
'type': 'click_up',
'search_key': message_code,
'search_code': search_code,
'cbjs_action': '''
var class_name = 'tactic.ui.tools.SObjectDetailWdg';
var kwargs = {
search_key: bvr.search_key
}
spt.tab.set_main_body_tab();
var title = "Detail ["+bvr.search_code+"]";
spt.app_busy.show("Loading " + bvr.search_code);
spt.tab.add_new(bvr.search_code, title, class_name, kwargs);
spt.app_busy.hide();
'''
} )
elif category == 'chat':
thumb = DivWdg()
thumb.add_style("width: %s" % size)
thumb.add_style("height: %s" % (size*3/4))
thumb.add_border()
thumb.add_style('text-align: center')
thumb.add_class("hand")
message = Search.get_by_code("sthpw/message", message_code)
login_code = message.get_value("login")
login = Search.get_by_code("sthpw/login", login_code)
thumb_wdg = ThumbWdg()
thumb.add(thumb_wdg)
thumb_wdg.set_sobject(login)
thumb_wdg.set_icon_size(size)
if subscription:
key = subscription.get_value("message_code")
thumb.add_behavior( {
'type': 'click_up',
'key': key,
'cbjs_action': '''
var class_name = 'tactic.ui.app.ChatSessionWdg';
var kwargs = {
'key': bvr.key,
}
spt.panel.load_popup("Chat: " + bvr.key, class_name, kwargs);
'''
} )
else:
if not category:
category = "default"
preview_text = "No Preview Available"
thumb = DivWdg()
thumb.add_style("width: %s" % size)
thumb.add_style("height: %s" % (size*3/4))
thumb.add_border()
thumb.add_color("background", "background")
#thumb.add("<br/>")
thumb.add(preview_text)
thumb.add_style('text-align: center')
thumb.add_class("hand")
thumb.add_style("margin: 3px")
return thumb
get_preview_wdg = classmethod(get_preview_wdg)
def get_display(self):
# self.sobjects is preferred, otherwise use
# search_key.
search_key = self.kwargs.get('search_key')
message = None
if not self.sobjects and search_key:
message = Search.get_by_search_key(search_key)
elif self.sobjects:
message = self.sobjects[0]
if not message:
return DivWdg()
if message.get_search_type() == 'sthpw/message':
message_code = message.get_value("code")
else:
message_code = message.get_value("message_code")
category = message.get_value("category")
table = Table()
table.add_row()
td = table.add_cell()
subscription = self.kwargs.get('subscription')
show_preview = self.kwargs.get('show_preview')
if show_preview in ['',None]:
show_preview = True
show_preview_category_list = ['sobject','chat']
if (category in show_preview_category_list and show_preview not in ['False','false',False]) or show_preview in ["True" ,"true",True]:
td.add( self.get_preview_wdg(subscription, category=category, message_code=message_code ))
message_value = message.get_value("message")
message_login = message.get_value("login")
#TODO: implement short_format even for closing html tags properly while truncating
short_format = self.kwargs.get('short_format') in ['true', True]
if message_value.startswith('{') and message_value.endswith('}'):
#message_value = message_value.replace(r"\\", "\\");
message_value = jsonloads(message_value)
# that doesn't support delete
if category == "sobject":
update_data = message_value.get("update_data")
sobject_data = message_value.get("sobject")
sobject_code = sobject_data.get('code')
search_type = message_value.get("search_type")
if search_type == "sthpw/note":
description = "<b>Note added:</b><br/>%s" % update_data.get("note")
elif search_type == "sthpw/task":
description = "<b>Task modified:</b><br/>%s" % update_data.get("process")
elif search_type == "sthpw/snapshot":
sobject = message_value.get("sobject")
description = "<b>Files checked in:</b><br/>%s" % sobject.get("process")
else:
display = []
if update_data:
for key, val in update_data.items():
display.append('%s – %s'%(key, val))
else:
if message_value.get('mode') == 'retire':
display.append('Retired')
base_search_type = Project.extract_base_search_type(search_type)
description = DivWdg()
title = DivWdg("<b>%s</b> - %s modified by %s:"%(base_search_type, sobject_code, message_login))
title.add_style('margin-bottom: 6px')
content = DivWdg()
content.add_style('padding-left: 2px')
content.add('<br>'.join(display))
description.add(title)
description.add(content)
elif category == 'progress':
description = DivWdg()
message = message_value.get('message')
message_div = DivWdg()
message_div.add(message)
description.add(message_div)
percent = message_value.get('progress')
if not percent:
percent = 0.0
progress = HtmlElement('progress')
progress.add_attr('value', percent)
progress.add_attr('max', '100')
progress.add_styles('''width: 280px; border-radius: 8px; box-shadow: 0 1px 2px rgba(0, 0, 0, 0.75) inset''')
progress_size = self.kwargs.get("progress_size")
if progress_size == "large":
progress.add_styles("height: 16px; margin-top: 9px;")
else:
progress.add_styles("height: 5px; margin-top: 4px;")
description.add(progress)
else:
message = message_value.get('message')
if message:
description = message
else:
description = message_value.get("description")
else:
if category == "chat":
login = message.get("login")
timestamp = message.get("timestamp")
message_value = message.get("message")
message_value = message_value.replace("\n", "<br/>")
description = '''
<b>%s</b><br/>
%s
''' % (login, message_value)
else:
description = message_value
div = DivWdg()
div.add(description)
table.add_cell(div)
return table
class ChatWdg(BaseRefreshWdg):
def get_display(self):
top = self.top;
self.set_as_panel(top)
top.add_class("spt_chat_top")
inner = DivWdg()
top.add(inner)
inner.add_behavior( {
'type': 'load',
'cbjs_action': MessageWdg.get_onload_js()
} )
search = Search("sthpw/subscription")
search.add_filter("category", "chat")
search.add_user_filter()
chats = search.get_sobjects()
keys = [x.get_value("message_code") for x in chats]
"""
chat_list_div = DivWdg()
inner.add(chat_list_div)
for i, chat in enumerate(chats):
chat_div = DivWdg()
chat_list_div.add(chat_div)
chat_div.add_style("padding: 5px")
chat_div.add_class("hand")
# find all the users with the same chat
key = chat.get_value("message_code")
#chat_div.add(key)
chat_div.add("#%s: " % i)
search = Search("sthpw/subscription")
search.add_filter("message_code", key)
subscriptions = search.get_sobjects()
users = [x.get_value("login") for x in subscriptions]
chat_div.add(", ".join(users))
chat_div.add_behavior( {
'type': 'click_up',
'key': key,
'cbjs_action': '''
var class_name = 'tactic.ui.app.ChatSessionWdg';
var kwargs = {
'key': bvr.key,
}
spt.panel.load_popup("Chat: " + bvr.key, class_name, kwargs);
'''
} )
chat_div.add_behavior( {
'type': 'mouseover',
'cbjs_action': '''
bvr.src_el.setStyle("color", "#214e75");
'''
} )
chat_div.add_behavior( {
'type': 'mouseout',
'cbjs_action': '''
bvr.src_el.setStyle("color", "");
'''
} )
"""
#keys = self.kwargs.get("keys")
#if not keys:
# return
inner.add( self.get_add_chat_wdg() )
inner.add("<br/>")
from tactic.ui.container import TabWdg
tab = TabWdg(
show_add=False,
show_remove=False
)
inner.add(tab)
for key in keys:
search = Search("sthpw/subscription")
search.add_filter("message_code", key)
subscriptions = search.get_sobjects()
users = [x.get_value("login") for x in subscriptions]
users = ", ".join(users)
session_div = DivWdg()
session_div.set_name(users)
session_div.add_style("width: 100%")
#inner.add(session_div)
tab.add(session_div)
session = ChatSessionWdg(key=key)
session_div.add(session)
inner.add("<br clear='all'/>")
if self.kwargs.get("is_refresh") == 'true':
return inner
else:
return top
def get_add_chat_wdg(self):
div = DivWdg()
div.add_border()
div.add_style("padding: 20px")
div.add_class("spt_add_chat_top")
table = Table()
table.add_style("width: auto")
div.add(table)
table.add_row()
text = TextInputWdg(title="user", icon="USER_ADD")
table.add_cell(text)
text.add_class("spt_add_chat_user")
add_button = ActionButtonWdg(title="Start Chat")
table.add_cell(add_button)
add_button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_add_chat_top");
var el = top.getElement(".spt_add_chat_user");
var user = el.value;
if (!user) {
alert("Specify a valid user to chat with");
return;
}
// new chat
var server = TacticServerStub.get();
var category = "chat";
var class_name = 'tactic.ui.app.ChatCmd';
var kwargs = {
users: [user]
}
server.execute_cmd(class_name, kwargs);
spt.panel.refresh(bvr.src_el);
'''
} )
return div
class ChatCmd(Command):
def execute(self):
login = Environment.get_user_name()
users = self.kwargs.get("users")
everyone = [login]
everyone.extend(users)
# find out if there already is a subscription between this user
# and others
search = Search("sthpw/subscription")
search.add_filter("login", login)
search.add_filter("category", "chat")
login_subscriptions = search.get_sobjects()
keys = [x.get_value("message_code") for x in login_subscriptions]
create = True
# find the subscriptions for each user with the same keys
for user in users:
search = Search("sthpw/subscription")
search.add_filters("message_code", keys)
search.add_filter("login", user)
user_subscriptions = search.get_sobjects()
if user_subscriptions:
create = False
# create a new subscription
if create:
key = Common.generate_random_key()
message = SearchType.create("sthpw/message")
message.set_value("code", key)
message.set_value("login", login)
message.set_value("category", "chat")
message.set_value("message", "Welcome!!!")
message.commit()
# create a subscription for each person
for person in everyone:
subscription = SearchType.create("sthpw/subscription")
subscription.set_value("message_code", key)
subscription.set_value("login", person)
subscription.set_value("category", "chat")
subscription.commit()
class ChatSessionWdg(BaseRefreshWdg):
def get_display(self):
top = self.top
self.set_as_panel(top)
inner = DivWdg()
top.add(inner)
inner.add_behavior( {
'type': 'load',
'cbjs_action': MessageWdg.get_onload_js()
} )
inner.add_style("min-width: 400px")
key = self.kwargs.get("key")
interval = True
top.add( self.get_chat_wdg(key, interval) )
return top
def get_chat_wdg(self, key, interval=False):
div = DivWdg()
div.add_class("spt_chat_session_top")
div.add_color("background", "background")
title_wdg = DivWdg()
div.add(title_wdg)
title_wdg.add_color("background", "background3")
title_wdg.add_style("padding: 5px")
title_wdg.add_style("font-weight: bold")
title_wdg.add_border()
icon = IconButtonWdg(title="Remove Chat", icon=IconWdg.DELETE)
icon.add_style("float: right")
icon.add_style("margin-top: -5px")
title_wdg.add(icon)
icon.add_behavior( {
'type': 'click_up',
'key': key,
'cbjs_action': '''
var server = TacticServerStub.get();
var top = bvr.src_el.getParent(".spt_chat_session_top");
spt.behavior.destroy_element(top);
'''
} )
current_user = Environment.get_user_name()
sudo = Sudo()
try:
logins = Search.eval("@SOBJECT(sthpw/subscription['message_code','%s'].sthpw/login)" % key)
finally:
sudo.exit()
for login in logins:
if login.get_value("login") == current_user:
continue
thumb = ThumbWdg()
thumb.set_icon_size(45)
thumb.set_sobject(login)
thumb.add_style("float: left")
thumb.add_style("margin: -5px 10px 0px -5px")
title_wdg.add(thumb)
title_wdg.add(login.get_value("display_name"))
title_wdg.add("<br clear='all'/>")
history_div = DivWdg()
div.add(history_div)
history_div.add_class("spt_chat_history")
history_div.add_style("width: auto")
history_div.add_style("height: auto")
history_div.add_style("max-height: 400px")
history_div.add_style("padding: 5px")
history_div.add_class("spt_resizable")
history_div.add_border()
history_div.add_style("overflow-y: auto")
#history_div.add_style("font-size: 0.9em")
search = Search("sthpw/message_log")
search.add_filter("message_code", key)
search.add_order_by("timestamp")
message_logs = search.get_sobjects()
last_login = None;
last_date = None;
for message_log in message_logs:
login = message_log.get("login")
message = message_log.get("message")
timestamp = message_log.get_datetime_value("timestamp")
#timestamp = timestamp.strftime("%b %d, %Y - %H:%M")
timestamp_str = timestamp.strftime("%H:%M")
date_str = timestamp.strftime("%b %d, %Y")
if login != last_login:
table = Table()
history_div.add(table)
table.add_row()
table.add_style("width: 100%")
table.add_style("margin-top: 15px")
login_sobj = Search.get_by_code("sthpw/login", login)
thumb_div = DivWdg()
td = table.add_cell()
td.add_style("vertical-align: top")
td.add_style("width: 75px")
thumb_div = DivWdg()
td.add(thumb_div)
thumb_div.add_style("overflow: hidden")
thumb = ThumbWdg()
thumb_div.add(thumb)
thumb.set_sobject(login_sobj)
thumb.set_icon_size(60)
display_name = login_sobj.get("display_name")
td = table.add_cell()
td.add_style("padding-top: 3px")
name_div = DivWdg()
td.add(name_div)
name_div.add_style("color", "#214e75")
name_div.add_style("font-size", "1.3em")
name_div.add(display_name)
msg = "";
msg += "<table style='margin-top: 5px; font-size: 1.0em; width: 100%'>";
if date_str != last_date:
msg += "<tr><td colspan='2' style='text-align: right'><br/><b style='font-size: 1.0em'>"+date_str+"</b></td></tr>";
last_login = None
msg += "<tr><td>"
msg += message.replace("\n",'<br/>')
msg += "</td><td style='vertical-align: top; text-align: right; margin-bottom: 5px; width: 75px; vertical-align: top; opacity: 0.7;'>";
msg += timestamp_str;
msg += "</td></tr></table>";
td.add(msg)
last_login = login
last_date = date_str
history_div.add_behavior( {
'type': 'load',
'cbjs_action': '''
bvr.src_el.scrollTop = bvr.src_el.scrollHeight;
'''
} )
if message_logs:
last_message = message_logs[-1].get("message")
last_login = message_logs[-1].get("login")
else:
last_message = ""
last_login = ""
div.add_attr("spt_last_message", last_message)
div.add_attr("spt_last_login", last_login)
if interval:
div.add_behavior( {
'type': 'load',
'key': key,
'cbjs_action': r'''
var text_el = bvr.src_el.getElement(".spt_chat_text");
var history_el = bvr.src_el.getElement(".spt_chat_history");
var callback = function(message) {
//history_el.setStyle("background", "red");
var login = message.login;
var timestamp = message.timestamp;
if (timestamp) {
var parts = timestamp.split(" ");
parts = parts[1].split(".");
timestamp = parts[0];
}
else {
timestamp = "";
}
var tmp = message.message || "";
var last_message = bvr.src_el.getAttribute("spt_last_message");
var last_login = bvr.src_el.getAttribute("spt_last_login");
if (tmp == last_message && login == last_login) {
return;
}
bvr.src_el.setAttribute("spt_last_message", tmp);
bvr.src_el.setAttribute("spt_last_login", login);
var msg = "";
msg += "<table style='margin-top: 5px; font-size: 1.0em; width: 100%'><tr><td>";
if (login != last_login) {
msg += "<b>"+login+"</b><br/>";
}
msg += tmp.replace(/\n/g,'<br/>');
msg += "</td><td style='text-align: right; margin-bottom: 5px; width: 75px; vertical-align: top'>";
msg += timestamp;
msg += "</td></tr></table>";
if (msg == history_el.last_msg) {
return;
}
history_el.innerHTML = history_el.innerHTML + msg;
// remember last message
history_el.last_msg = msg;
history_el.scrollTop = history_el.scrollHeight;
}
spt.message.set_interval(bvr.key, callback, 3000, bvr.src_el);
'''
} )
text = TextAreaWdg("chat")
div.add(text)
text.add_class("spt_chat_text")
text.add_style("width: 100%")
text.add_style("padding: 5px")
#text.add_style("margin-top: -1px")
text.add_style("margin-top: 5px")
text.add_behavior( {
'type': 'load',
'cbjs_action': '''
bvr.src_el.addEvent("keydown", function(e) {
var keys = ['tab','keys(control+enter)', 'enter'];
var key = e.key;
var input = bvr.src_el
if (keys.indexOf(key) > -1) e.stop();
if (key == 'tab') {
}
else if (key == 'enter') {
if (e.control == false) {
//pass;
}
else {
// TODO: check if it's multi-line first
//... use ctrl-ENTER for new-line, regular ENTER (RETURN) accepts value
//var tvals = parse_selected_text(input);
//input.value = tvals[0] + "\\n" + tvals[1];
//spt.set_cursor_position( input, tvals[0].length + 1 );
}
}
} )
'''
} )
button = ActionButtonWdg(title="Send")
div.add(button)
button.add_style("float: right")
button.add_style("margin: 5px")
button.add_behavior( {
'type': 'click_up',
'key': key,
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_chat_session_top");
var text_el = top.getElement(".spt_chat_text");
var message = text_el.value;
if (!message) {
return;
}
var history_el = top.getElement(".spt_chat_history");
var category = "chat";
var server = TacticServerStub.get();
var key = bvr.key;
var last_message = server.log_message(key, message, {category:category, status:"in_progress"});
text_el.value = "";
'''
} )
div.add("<br clear='all'/>")
return div
class SubscriptionWdg(BaseRefreshWdg):
def get_subscriptions(self, category, mode="new"):
search = Search("sthpw/subscription")
search.add_user_filter()
if category:
search.add_filter("category", category)
mode = "nnn"
if mode == "new":
search.add_op("begin")
search.add_filter("last_cleared", '"message"."timestamp"', quoted=False, op="<")
search.add_filter("last_cleared", "NULL", quoted=False, op="is")
search.add_op("or")
#project_code = Project.get_project_code()
#search.add_filter("project_code", project_code )
# use an inner join because if there are no messages, we don't
# want the subscription
search.add_order_by("message.timestamp", direction="desc", join="INNER")
# don't show user message except when category is certain values
user = Environment.get_user_name()
search.add_op("begin")
search.add_filter("login", user, op="!=", table="message")
search.add_filters("category", ["script","default","sobject","progress"], table="message")
search.add_op("or")
else:
search.add_order_by("message.timestamp", direction="desc")
subscriptions = search.get_sobjects()
return subscriptions
def set_refresh(self, inner, interval, panel_cls='spt_subscription_top', mode='timeout'):
''' @param:
mode - timeout or interval
'''
inner.add_behavior( {
'type': 'load',
'interval': interval,
'panel_cls': panel_cls,
'mode': mode,
'cbjs_action': '''
var top = bvr.src_el.getParent("."+bvr.panel_cls);
var run = function() {
var dialog = bvr.panel_cls == 'spt_subscription_bar_top' ? top.getElement(".spt_dialog_top"): top.getParent(".spt_dialog_top") ;
if (dialog && dialog.getStyle("display") == "none") {
top.setAttribute("spt_dialog_open", "false");
}
else {
top.setAttribute("spt_dialog_open", "true");
}
// skip redraw of activator if dialog is shown to prevent refreshing
// or redraw of dialog if dialog is hidden to save resources
if (dialog && spt.is_shown(dialog) && bvr.panel_cls=='spt_subscription_bar_top') {
}
else if (dialog && spt.is_hidden(dialog) && bvr.panel_cls=='spt_subscription_top') {
}
else {
spt.panel.refresh(top, {async: true});
}
}
if (bvr.mode == 'timeout')
timeout_id = setTimeout(function() {run()}, bvr.interval);
else
timeout_id = setInterval(function() {run()}, bvr.interval);
bvr.src_el.timeout_id = timeout_id;
'''
} )
inner.add_behavior( {
'type': 'unload',
'cbjs_action': '''
if (bvr.src_el.timeout_id)
clearInterval(bvr.src_el.timeout_id);
'''
} )
def get_display(self):
top = self.top
self.set_as_panel(top)
top.add_class("spt_subscription_top")
interval = self.kwargs.get("interval")
if not interval:
interval = 30 * 1000
else:
interval = int(interval) * 1000
inner = DivWdg()
top.add(inner)
self.set_refresh(inner,interval)
inner.add_style("min-width: %spx"%SubscriptionBarWdg.WIDTH)
inner.add_style("min-height: 300px")
#mode = "all"
mode = "new"
categories = ['chat','sobject','script','progress']
#categories = [None]
#categories = ['feedback']
has_entries = False
for category in categories:
category_wdg = self.get_category_wdg(category, mode)
if category_wdg:
inner.add(category_wdg)
has_entries = True
if not has_entries:
no_entries = DivWdg()
inner.add(no_entries)
no_entries.add_style("padding: 50px")
no_entries.add_style("width: %spx"%(SubscriptionBarWdg.WIDTH-50))
no_entries.add_style("height: 100px")
no_entries.add_style("margin: 100px auto")
no_entries.add_style("text-align: center")
no_entries.add_border()
no_entries.add_color("background", "background3")
no_entries.add("No messages")
if self.kwargs.get("is_refresh") == 'true':
return inner
else:
return top
def get_category_wdg(self, category, mode="new"):
subscriptions = self.get_subscriptions(category, mode)
if not subscriptions:
return
div = DivWdg()
div.add_style("width: 100%")
title_div = DivWdg()
div.add(title_div)
title_div.add_style("padding: 10px")
title_div.add_border()
title_div.add_color("background", "background3")
title = category or "Subscriptions"
title_div.add("%s " % title)
summary_div = SpanWdg()
title_div.add(summary_div)
summary_div.add_style("font-size: 0.8em")
summary_div.add_style("opacity: 0.5")
search_keys = [x.get_search_key() for x in subscriptions]
button = ActionButtonWdg(title="Clear All")
button.add_style("float: right")
button.add_style("padding: 2px")
button_div = DivWdg(button)
button_div.add_style('min-height: 26px')
div.add(button_div)
button.add_behavior( {
'type': 'click_up',
'search_keys': search_keys,
'cbjs_action': '''
var server = TacticServerStub.get();
for (var i = 0; i < bvr.search_keys.length; i++) {
var search_key = bvr.search_keys[i];
server.update(search_key, {'last_cleared':'NOW'});
spt.panel.refresh(bvr.src_el);
}
'''
} )
# types of subscriptions
table_div = DivWdg()
table_div.add_styles('overflow-y: auto; max-height: 500px; width: 100%')
div.add(table_div)
table = Table()
table.add_style('width: 100%')
table.add_border()
table.add_color("background", "background3")
table_div.add(table)
ss = []
for subscription in subscriptions:
table.add_row()
td = table.add_cell()
message_code = subscription.get_value("message_code")
search = Search("sthpw/message")
search.add_filter("code", message_code)
message = search.get_sobject()
# show the thumb
if not message:
if mode == "all":
td = table.add_cell(FormatMessageWdg.get_preview_wdg(subscription))
td = table.add_cell()
td.add("No Messages")
continue
size = 60
show_preview = self.kwargs.get('show_preview')
if show_preview in ['',None]:
show_preview = True
msg_element = FormatMessageWdg(subscription=subscription, short_format='true',show_preview=show_preview)
# this is optional
msg_element.set_sobject(message)
description = msg_element.get_buffer_display()
#td = table.add_cell()
history_icon = IconButtonWdg(title="Subscription History", icon=IconWdg.HISTORY)
#td.add(icon)
message_code = subscription.get_value("message_code")
history_icon.add_behavior( {
'type': 'click_up',
'message_code': message_code,
'cbjs_action': '''
var class_name = 'tactic.ui.panel.FastTableLayoutWdg';
var message_code = bvr.message_code;
var kwargs = {
search_type: 'sthpw/message_log',
show_shelf: false,
expression: "@SOBJECT(sthpw/message_log['message_code','"+message_code+"'])",
view: 'history'
};
spt.tab.set_main_body_tab();
spt.tab.add_new("Message History", "Message History", class_name, kwargs);
'''
} )
# description can take up 70%
td = table.add_cell()
td.add_style("width: %spx"%(SubscriptionBarWdg.WIDTH*0.7))
desc_div = DivWdg()
td.add(desc_div)
desc_div.add(description)
desc_div.add_style("padding: 0px 20px")
td = table.add_cell()
#td.add(message.get_value("status"))
#td = table.add_cell()
timestamp = message.get_datetime_value("timestamp")
if timestamp:
timestamp_str = timestamp.strftime("%b %d, %Y - %H:%M")
else:
timestamp_str = ""
show_timestamp = self.kwargs.get('show_timestamp')
if show_timestamp in ['',None]:
show_timestamp = True
if show_timestamp in ["True","true",True]:
td.add(timestamp_str)
#td = table.add_cell()
#td.add(subscription.get_value("last_cleared"))
td = table.add_cell()
show_message_history = self.kwargs.get('show_message_history')
if show_message_history in ['',None]:
show_message_history = True
if show_message_history in ["True","true",True]:
td.add(history_icon)
td.add(HtmlElement.br(2))
td.add_style('width: 30px')
icon = IconButtonWdg(title="Unsubscribe", icon=IconWdg.DELETE)
icon.add_style('bottom: 14px')
subscription_key = subscription.get_search_key()
icon.add_behavior( {
'type': 'click_up',
'search_key': subscription_key,
'message_code': message_code,
'cbjs_action': '''
if (!confirm("Unsubscribe from [" + bvr.message_code + "]?")) {
return;
}
var top = bvr.src_el.getParent(".spt_subscription_top");
var server = TacticServerStub.get();
server.delete_sobject(bvr.search_key);
spt.panel.refresh(top);
'''
} )
show_unsubscribe = self.kwargs.get('show_unsubscribe')
if show_unsubscribe in ['',None]:
show_unsubscribe = False
if show_unsubscribe in ["True","true",True]:
td.add(icon)
ss.append(subscription)
num_sobjects = len(ss)
if not num_sobjects:
return None
summary_div.add("(%s changes)" % num_sobjects)
#from tactic.ui.panel import FastTableLayoutWdg
#table = FastTableLayoutWdg(search_type="sthpw/subscription",show_shelf=False)
#div.add(table)
#table.set_sobjects(ss)
return div
class SubscriptionBarWdg(SubscriptionWdg):
ARGS_KEYS = {
'mode': {
'description': "tab|dialog|popup - Determine how the details should open",
'type': 'SelectWdg',
'values': 'tab|dialog|popup'
},
'interval': {
'description': "Determine how many seconds it takes to refresh",
'type': 'TextWdg'
},
'dialog_open': {
'description': "Determine if the dialog opens initially",
'type': 'SelectWdg',
'values': 'true|false'
}
}
# this is referenced in SubcriptionWdg as well
WIDTH = 500
def get_display(self):
top = self.top
top.add_class("spt_subscription_bar_top")
self.set_as_panel(top)
top.add_style("width: 40px")
top.add_style("height: 20px")
#top.add_class("hand")
interval = self.kwargs.get("interval")
if not interval:
interval = 10 * 1000
else:
interval = int(interval) * 1000
inner = DivWdg()
top.add(inner)
self.set_refresh(inner, interval, panel_cls='spt_subscription_bar_top', mode='interval')
mode = self.kwargs.get("mode")
if not mode:
mode = "tab"
dialog_open = self.kwargs.get("dialog_open")
if dialog_open in [True, 'true']:
dialog_open = True
else:
dialog_open = False
subscription_kwargs ={}
subscription_kwargs_list = ['icon','show_preview','show_message_history','show_unsubscribe','show_timestamp','interval']
for key in self.kwargs:
if key in subscription_kwargs_list:
subscription_kwargs[key]= self.kwargs.get(key)
mode = "dialog"
if mode == "dialog":
from tactic.ui.container import DialogWdg
dialog = DialogWdg(display=dialog_open, show_title=False, show_pointer=False )
inner.add(dialog)
dialog.set_as_activator(inner)
subscription_wdg = SubscriptionWdg(**subscription_kwargs)
dialog.add(subscription_wdg)
subscription_wdg.add_style("width: %spx"%(self.WIDTH+50))
subscription_wdg.add_color("background", "background")
subscription_wdg.add_style("max-height: 500px")
subscription_wdg.add_style("min-height: 300px")
elif mode == "popup":
top.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var class_name = 'tactic.ui.app.SubscriptionWdg';
var kwargs = {};
spt.panel.load_popup("Subscriptions", class_name, kwargs);
'''
} )
else:
top.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
spt.tab.set_main_body_tab();
var class_name = 'tactic.ui.app.SubscriptionWdg';
var kwargs = {};
spt.tab.add_new("Subscriptions", "Subscriptions", class_name, kwargs);
'''
} )
color = inner.get_color("border")
inner.add_style("border-style: solid")
inner.add_style("border-size: 1px")
inner.add_style("border-color: transparent")
inner.set_round_corners(5)
inner.add_style("padding: 2px")
inner.add_behavior( {
'type': 'mouseenter',
'color': color,
'cbjs_action': '''
bvr.src_el.setStyle("border", "solid 1px "+bvr.color);
'''
} )
inner.add_behavior( {
'type': 'mouseleave',
'cbjs_action': '''
bvr.src_el.setStyle("border-color", "transparent");
'''
} )
category = None
subscriptions = self.get_subscriptions(category)
#if not subscriptions:
# inner.add_style("display: none")
num = len(subscriptions)
# the word message takes up too much space
"""
if num <= 1:
msg = "%s message" % num
else:
msg = "%s messages" % num
"""
if num > 0:
msg = num
else:
msg = ''
try:
icon_display = self.kwargs.get('icon')
except:
icon_display = "STAR"
if icon_display is None:
icon_display = "STAR"
icon = IconWdg(msg, icon_display)
icon.add_style('float: left')
inner.add(icon)
msg_div = DivWdg(msg)
msg_div.add_style('padding-top: 1px')
#msg_div.add_style('border-width: 1px')
#msg_div.add_styles('border-radius: 50%; width: 18px; height: 18px; background: white')
inner.add(msg_div)
if self.kwargs.get("is_refresh") == 'true':
return inner
else:
return top
class MessageWdg(BaseRefreshWdg):
def get_display(self):
div = DivWdg()
div.add_class("spt_message_top")
div. add("<h1>Message</h1>")
outer = DivWdg()
div.add(outer)
outer.add_style("width: 250px")
outer.add_border()
progress = DivWdg()
outer.add(progress)
progress.add_class("spt_message_progress")
progress.add_style("background", "#AAD")
progress.add_style("width: 0%")
progress.add_style("height: 20px")
div.add("<img src='/context/icons/common/indicator_snake.gif'/>")
text = TextWdg("complete")
div.add(text)
text.add_class("spt_message_text");
div.add_behavior( {
'type': 'load',
'cbjs_action': self.get_onload_js()
} )
div.add_behavior( {
'type': 'load',
'cbjs_action': '''
var key = spt.message.generate_key();
// create a subscription
var server = TacticServerStub.get();
login = spt.Environment.get().get_user();
server.insert("sthpw/subscription", {'message_code':key, login: login, category: "script"} );
var server = TacticServerStub.get();
var x = function() {};
server.execute_python_script("message/action", {key:key}, {on_complete: x});
var el = bvr.src_el.getElement(".spt_message_text");
var progress_el = bvr.src_el.getElement(".spt_message_progress");
var callback = function(message) {
if (message.status == "complete") {
el.value = "OK DONE FINISHED"
width = "100"
} else {
var value = JSON.parse(message.message);
el.value = value.progress;
width = value.progress;
}
progress_el.setStyle("width", width+"%");
}
spt.message.set_interval(key, callback, 1000, bvr.src_el);
'''
} )
div.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
spt.message.stop_all_intervals();
'''
} )
return div
def get_onload_js(cls):
return r'''
if (spt.message) {
return;
}
spt.message = {}
spt.message.intervals = {};
spt.message.elements = {};
spt.message.set_interval = function(key, callback, interval, element) {
var f = function(message) {
try {
console.log(message);
if (message) {
callback(message);
}
else {
console.log("WARNING: message is undefined for key ["+key+"]");
//spt.message.stop_interval(key);
return;
}
}
catch(e) {
spt.message.stop_interval(key);
alert(e);
}
if (message.status == "complete") {
spt.message.stop_interval(key);
}
}
// stop this interval if it already started/registered
spt.message.stop_interval(key);
var interval_id = setInterval( function() {
spt.message.async_poll(key, f);
} , interval );
spt.message.intervals[key] = interval_id;
if (element) {
var id = element.getAttribute("id");
if (!id) {
element.setAttribute("id", key);
spt.message.elements[key] = key;
}
else {
spt.message.elements[key] = id;
}
element.addClass("spt_notify_destroyed");
}
else
spt.message.elements[key] = null;
}
spt.message.stop_interval = function(key) {
if (!spt.message.intervals[key]) {
return;
}
clearInterval(spt.message.intervals[key]);
delete spt.message.intervals[key];
delete spt.message.elements[key];
}
spt.message.stop_all_intervals = function() {
for (var key in spt.message.intervals) {
spt.message.stop_interval(key);
}
}
spt.message.generate_key = function(length) {
if (!length) {
length = 20;
}
var text = "";
var possible = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
for( var i=0; i < length; i++ ) {
text += possible.charAt(Math.floor(Math.random() * possible.length));
}
return text;
}
spt.message.poll = function(key) {
var server = TacticServerStub.get();
var messages = server.eval("@SOBJECT(sthpw/message['code','"+key+"'])");
var message = messages[0];
return message;
}
spt.message.async_poll = function(key, callback) {
// before polling, check that the element still exists
var el_id = spt.message.elements[key];
var el = document.id(el_id);
if (!el || el.hasClass("spt_destroyed")) {
spt.message.stop_interval(key);
return;
}
if (! el.isVisible() ) {
return;
}
var server = TacticServerStub.get();
var expr = "@SOBJECT(sthpw/message['code','"+key+"'])";
server.eval(expr, {single:true,cbjs_action:callback});
}
// TEST pooling of queries from different "apps"
spt.message.keys = {};
spt.message.register_key = function() {
spt.message.results[keys] = true;
}
spt.message.async_polls = function(keys, callback) {
// before polling, check that the element still exists
/*
var el = spt.message.elements[key];
if (el && el.parentNode == null) {
spt.message.stop_interval(key);
return;
}
*/
var keys_string = keys.join("|");
var server = TacticServerStub.get();
var expr = "@SOBJECT(sthpw/message['code','in','"+keys_string+"'])";
server.async_eval(expr, {single:false,cbjs_action:callback});
}
'''
get_onload_js = classmethod(get_onload_js)
| Southpaw-TACTIC/TACTIC | src/tactic/ui/app/message_wdg.py | Python | epl-1.0 | 47,704 |
# -*- coding: utf-8 -*-
from config.config import ARTICLE_PER_PAGE
from exception import Unauthorized
from helper.model_control import get_board, get_article_page
from helper.permission import is_anybody, can_write
from helper.resource import YuzukiResource
from helper.template import render_template
class Board(YuzukiResource):
def render_GET(self, request):
name = request.get_argument("name")
if not (name == "notice" or is_anybody(request)):
raise Unauthorized()
page = request.get_argument_int("page", 1)
board = get_board(request, name)
articles = get_article_page(request, board, page)
total_article_count = board.article_count
page_total = total_article_count / ARTICLE_PER_PAGE
if total_article_count % ARTICLE_PER_PAGE != 0:
page_total = total_article_count / ARTICLE_PER_PAGE + 1
context = {
"items": articles,
"board": board,
"page": page,
"page_total": page_total,
"can_write": can_write(request, board),
}
return render_template("board.html", request, context)
| TintypeMolly/Yuzuki | resource/board.py | Python | mit | 1,155 |
# coding: utf-8
import functools
import hashlib
import math
import numpy as np
import pandas
import neural_network.neural_network as nn
from neural_network.Dropout import Dropout
from neural_network.EarlyStop import EarlyStop
from neural_network.L2Decay import L2Decay
SUBSET_NUM = 5
def preprocess_train(data_raw: pandas.DataFrame) -> (np.ndarray, np.ndarray):
x, y, urn = [], [], []
for person in data_raw.get_values():
urn.append(person)
np.random.shuffle(urn)
for person in urn:
y.append(person[1])
vec = [1 if person[2] == 1 else 0, 1 if person[2] == 2 else 0, 1 if person[4] == 'male' else 0]
if math.isnan(person[5]):
vec.append(0)
vec.append(0)
else:
vec.append(person[5])
vec.append(1)
vec.append(person[6])
vec.append(person[7])
vec.append(person[9])
vec.append(1 if person[11] == 'C' else 0)
vec.append(1 if person[11] == 'Q' else 0)
x.append(vec)
return np.array(x).T, np.array(y).reshape((1, -1))
def preprocess_test(data_raw: pandas.DataFrame) -> np.ndarray:
data = []
for person in data_raw.get_values():
vec = [1 if person[1] == 1 else 0, 1 if person[1] == 2 else 0, 1 if person[3] == 'male' else 0]
if math.isnan(person[4]):
vec.append(0)
vec.append(0)
else:
vec.append(person[4])
vec.append(1)
vec.append(person[5])
vec.append(person[6])
vec.append(person[8])
vec.append(1 if person[10] == 'C' else 0)
vec.append(1 if person[10] == 'Q' else 0)
data.append(vec)
return np.array(data).T
def normalize(x: np.ndarray, mean: np.ndarray = None, std: np.ndarray = None) -> np.ndarray:
if mean is None:
mean = np.mean(x, axis=1, keepdims=True)
if std is None:
std = np.std(x, axis=1, keepdims=True)
return (x - mean) / std
def run(x: np.ndarray, y: np.ndarray, *, distribution: str, dev_type: str, hidden_units: int, iter_num: int,
friction: float, learning_rate: float, dropout_rate: float = None, early_stop_config: dict = None,
l2_decay_factor: float = None) -> (
float, float):
train_cost, validation_cost, train_acc, validation_acc = 0.0, 0.0, 0.0, 0.0
x = normalize(x)
for cv_id in range(SUBSET_NUM):
idx_train = np.ones(x.shape[1], np.bool)
idx_train[np.arange(cv_id, y.shape[1], SUBSET_NUM)] = 0
idx_validate = np.logical_not(idx_train)
x_train, y_train = x[:, idx_train], y[:, idx_train]
x_validate, y_validate = x[:, idx_validate], y[:, idx_validate]
layer_dims = (x_train.shape[0], hidden_units, y_train.shape[0])
if dropout_rate is not None:
dropout = Dropout(rate=dropout_rate, layer_dims=layer_dims)
else:
dropout = None
if early_stop_config is not None:
early_stop = EarlyStop(x_validate, y_validate,
interval=early_stop_config["interval"], half_life=early_stop_config["half_life"],
threshold=early_stop_config["threshold"])
else:
early_stop = None
if l2_decay_factor is not None:
l2_decay = L2Decay(factor=l2_decay_factor)
else:
l2_decay = None
w, b = nn.init(layer_dims, distribution=distribution, dev_type=dev_type, dropout=dropout)
w, b = nn.optimize(w, b, x_train, y_train,
iter_num=iter_num, friction=friction, learning_rate=learning_rate,
dropout=dropout, early_stop=early_stop, l2_decay=l2_decay)
y_train_p = nn.forward_propagation(w, b, x_train, training=False, dropout=dropout)[-1]
y_validate_p = nn.forward_propagation(w, b, x_validate, training=False, dropout=dropout)[-1]
train_cost = train_cost + nn.cost(y_train, y_train_p)
validation_cost = validation_cost + nn.cost(y_validate, y_validate_p)
train_acc = train_acc + np.sum(np.logical_xor(y_train, y_train_p >= 0.5)) / y_train.shape[1]
validation_acc = validation_acc + np.sum(np.logical_xor(y_validate, y_validate_p >= 0.5)) / y_validate.shape[1]
train_acc, validation_acc = 1.0 - train_acc / SUBSET_NUM, 1.0 - validation_acc / SUBSET_NUM
print(train_acc, validation_acc)
return train_cost / SUBSET_NUM, validation_cost / SUBSET_NUM
def run_test(x: np.ndarray, y: np.ndarray, x_test, *,
distribution: str, dev_type: str, hidden_units: int, iter_num: int, friction: float,
learning_rate: float, dropout_rate: float = None, early_stop_config: dict = None,
l2_decay_factor: float = None) -> np.ndarray:
mean = np.mean(np.concatenate((x, x_test), axis=1), axis=1, keepdims=True)
std = np.std(np.concatenate((x, x_test), axis=1), axis=1, keepdims=True)
x, x_test = normalize(x, mean, std), normalize(x_test, mean, std)
layer_dims = (x.shape[0], hidden_units, y.shape[0])
if dropout_rate is not None:
dropout = Dropout(rate=dropout_rate, layer_dims=layer_dims)
else:
dropout = None
if early_stop_config is not None:
idx_train = np.ones(x.shape[1], np.bool)
idx_train[np.random.choice(x.shape[1], x.shape[1] // SUBSET_NUM, replace=False)] = 0
idx_validate = np.logical_not(idx_train)
x_train, y_train = x[:, idx_train], y[:, idx_train]
x_validate, y_validate = x[:, idx_validate], y[:, idx_validate]
early_stop = EarlyStop(x_validate, y_validate,
interval=early_stop_config["interval"], half_life=early_stop_config["half_life"],
threshold=early_stop_config["threshold"])
else:
x_train, y_train = x, y
early_stop = None
if l2_decay_factor is not None:
l2_decay = L2Decay(factor=l2_decay_factor)
else:
l2_decay = None
w, b = nn.init(layer_dims, distribution=distribution, dev_type=dev_type, dropout=dropout)
nn.optimize(w, b, x_train, y_train,
iter_num=iter_num, friction=friction, learning_rate=learning_rate, dropout=dropout,
early_stop=early_stop, l2_decay=l2_decay)
w, b = nn.optimize(w, b, x, y,
iter_num=early_stop.best_epoch, friction=friction, learning_rate=learning_rate, dropout=dropout,
early_stop=early_stop, l2_decay=l2_decay)
y_test_p = nn.forward_propagation(w, b, x_test, training=False, dropout=dropout)[-1]
return y_test_p
def check_output_status(file_name: str, param_list_hash: str) -> int:
try:
with open(file_name, "r", encoding="utf-8") as f:
if f.readline().split(',')[0] == param_list_hash:
cnt = 0
while len(f.readline().split(',')) >= 2:
cnt = cnt + 1
return cnt
else:
return -1
except IOError:
return -1
def restart(file_name: str, param_list_hash: str, run_list: tuple) -> None:
with open(file_name, "w", encoding="utf-8") as f:
f.write(param_list_hash + ',\n')
f.flush()
for fun in run_list:
train_cost, validation_cost = fun()
f.write(str(train_cost) + ',' + str(validation_cost) + ',\n')
f.flush()
def resume(file_name: str, run_list: tuple, start_pos: int) -> None:
run_list = run_list[start_pos:]
with open(file_name, "a", encoding="utf-8") as f:
for fun in run_list[start_pos:]:
train_cost, validation_cost = fun()
f.write(str(train_cost) + ',' + str(validation_cost) + ',\n')
f.flush()
def ensemble(run_list: tuple) -> np.ndarray:
return np.mean([fun() for fun in run_list], axis=0) >= 0.5
def main():
x_train, y_train = preprocess_train(pandas.read_csv("train.csv"))
# param_list = (("UNIFORM", "FAN_IN", 20, 1000, 0.1, 0.1, None, (5, 15, 0.005)),)
param_list = (("UNIFORM", "FAN_IN", 16, 1000, 0.1, 0.7, None, (5, 25, 0.0), 4.096),
("UNIFORM", "FAN_IN", 20, 1000, 0.1, 0.7, 0.5, (5, 25, 0.0), 0.016),
("UNIFORM", "FAN_IN", 20, 1000, 0.1, 0.7, 0.5, (5, 25, 0.0), 0.064),
("UNIFORM", "FAN_IN", 20, 1000, 0.1, 0.7, 0.5, (5, 25, 0.0), 0.128),
("UNIFORM", "FAN_IN", 20, 1000, 0.1, 0.7, 0.5, (5, 25, 0.0), 1.024))
# print(param_list)
param_list_hash = hashlib.sha256(str(param_list).encode('utf-8')).hexdigest()
output_status = check_output_status("output.csv", param_list_hash)
run_list = tuple(map(lambda params: functools.partial(run, x=x_train, y=y_train,
distribution=params[0],
dev_type=params[1],
hidden_units=params[2],
iter_num=params[3],
friction=params[4],
learning_rate=params[5],
dropout_rate=params[6],
early_stop_config=None if params[7] is None else
{"interval": params[7][0],
"half_life": params[7][1],
"threshold": params[7][2]},
l2_decay_factor=params[8]),
param_list))
if output_status == -1:
restart("output.csv", param_list_hash, run_list)
else:
resume("output.csv", run_list, output_status)
def test():
x_train, y_train = preprocess_train(pandas.read_csv("train.csv"))
x_test = preprocess_test(pandas.read_csv("test.csv"))
param_list = (("UNIFORM", "FAN_IN", 16, 1000, 0.1, 0.7, None, (5, 25, 0.0), 4.096),
("UNIFORM", "FAN_IN", 20, 1000, 0.1, 0.7, 0.5, (5, 25, 0.0), 0.016),
("UNIFORM", "FAN_IN", 20, 1000, 0.1, 0.7, 0.5, (5, 25, 0.0), 0.064),
("UNIFORM", "FAN_IN", 20, 1000, 0.1, 0.7, 0.5, (5, 25, 0.0), 0.128),
("UNIFORM", "FAN_IN", 20, 1000, 0.1, 0.7, 0.5, (5, 25, 0.0), 1.024))
run_list = tuple(map(lambda params: functools.partial(run_test, x=x_train, y=y_train, x_test=x_test,
distribution=params[0],
dev_type=params[1],
hidden_units=params[2],
iter_num=params[3],
friction=params[4],
learning_rate=params[5],
dropout_rate=params[6],
early_stop_config=None if params[7] is None else
{"interval": params[7][0],
"half_life": params[7][1],
"threshold": params[7][2]},
l2_decay_factor=params[8]),
param_list))
y_test = ensemble(run_list)
pandas.DataFrame(y_test.astype(int).T,
index=np.arange(y_train.shape[1] + 1, y_train.shape[1] + y_test.shape[1] + 1),
columns=("Survived",)).to_csv("submission.csv", index_label="PassengerId")
# main()
test()
| gonglinyuan/titanic | main.py | Python | gpl-3.0 | 11,931 |
"""
Create slides for a slideshow
TODO: would be nice if it did not give eog focus.
"""
import os
from PIL import Image, ImageDraw, ImageFont
FONT = '/usr/share/fonts/TTF/Vera.ttf'
FONTSIZE = 36
WIDTH = 1024
HEIGHT = 768
class SlideShow:
def __init__(self):
self.pos = 0
self.cache = 'show'
self.font = ImageFont.truetype(FONT, FONTSIZE)
def interpret(self, msg):
""" Load input """
slides = msg.get('slides', [])
self.cache = msg.get('folder')
self.gallery = msg.get('gallery', '..')
with open(self.cache + '/slides.txt', 'w') as logfile:
for ix, item in enumerate(slides):
image = self.prepare_image(item)
filename = self.cache_image(item, image)
text = item.get('caption', '')
# do not write text for heading images
if item.get('heading'):
text = ''
if text:
with open(filename + '.txt', 'w') as caption:
caption.write(text)
print('%s,%d' % (filename, item.get('time', 0)), file=logfile)
def prepare_image(self, slide):
image = slide.get('image')
caption = slide.get('caption')
if caption is None:
# use image name, without the suffic
caption = os.path.splitext(image)[0]
# convert _ to ' '
caption = caption.replace('_', ' ')
# save the caption
slide['caption'] = caption
# create image
image_file = self.create_image(image, caption)
return image_file
def create_image(self, image_file, caption):
""" Create an image with a caption """
suffix = 'png'
if image_file:
img = Image.open(os.path.join(self.gallery, image_file))
width, height = img.size
ratio = width/WIDTH
img = img.resize((int(width // ratio),
int(height // ratio)),
Image.ANTIALIAS)
else:
img = Image.new('RGB', (WIDTH, HEIGHT), 'black')
image = self.add_caption(img, caption)
image = img
return image
def cache_image(self, item, image):
#name = "%s/slide%d.png" % (self.cache, ix)
caption = item.get('image')
if caption is None:
caption = item.get('caption').split('\n')[0]
caption = caption.split('/')[-1]
caption = caption.replace(' ', '_')
name = "%s/%s.png" % (self.cache, caption)
with open(name, 'w') as slide:
image.save(name)
return name
def add_caption(self, image, caption, colour=None):
""" Add a caption to the image """
if colour is None:
colour = "white"
width, height = image.size
draw = ImageDraw.Draw(image)
draw.font = self.font
draw.font = self.font
draw.text((width // 10, height//20), caption,
fill=colour)
return image
| openbermuda/ripl | ripl/caption.py | Python | gpl-3.0 | 3,205 |
"""Test module for export class."""
import pytest
from export.models import ExportDocument
from home.models.profile import Profile
def test_export(export_document):
"""Test the object."""
if not isinstance(export_document, ExportDocument):
raise AssertionError()
@pytest.mark.usefixtures('db')
def test_run_export(export_document, profile):
"""Run the export method and see what happens."""
if not isinstance(profile, Profile):
raise AssertionError()
if not isinstance(export_document, ExportDocument):
raise AssertionError()
| executive-consultants-of-los-angeles/rsum | rsum/export/tests/test_export.py | Python | unlicense | 576 |
import time
import datetime
import traceback
import multiprocessing
import urllib2
import xml.sax
import redis
import random
import pymongo
import re
import requests
import dateutil.parser
import isodate
import urlparse
from django.conf import settings
from django.db import IntegrityError
from django.core.cache import cache
from apps.reader.models import UserSubscription
from apps.rss_feeds.models import Feed, MStory
from apps.rss_feeds.page_importer import PageImporter
from apps.rss_feeds.icon_importer import IconImporter
from apps.notifications.tasks import QueueNotifications, MUserFeedNotification
from apps.push.models import PushSubscription
from apps.statistics.models import MAnalyticsFetcher, MStatistics
from utils import feedparser
from utils.story_functions import pre_process_story, strip_tags, linkify
from utils import log as logging
from utils.feed_functions import timelimit, TimeoutError
from qurl import qurl
from BeautifulSoup import BeautifulSoup
from django.utils import feedgenerator
from django.utils.html import linebreaks
from django.utils.encoding import smart_unicode
from utils import json_functions as json
from celery.exceptions import SoftTimeLimitExceeded
from utils.twitter_fetcher import TwitterFetcher
from utils.json_fetcher import JSONFetcher
# from utils.feed_functions import mail_feed_error_to_admin
# Refresh feed code adapted from Feedjack.
# http://feedjack.googlecode.com
FEED_OK, FEED_SAME, FEED_ERRPARSE, FEED_ERRHTTP, FEED_ERREXC = range(5)
class FetchFeed:
def __init__(self, feed_id, options):
self.feed = Feed.get_by_id(feed_id)
self.options = options
self.fpf = None
self.raw_feed = None
@timelimit(30)
def fetch(self):
"""
Uses requests to download the feed, parsing it in feedparser. Will be storified later.
"""
start = time.time()
identity = self.get_identity()
log_msg = u'%2s ---> [%-30s] ~FYFetching feed (~FB%d~FY), last update: %s' % (identity,
self.feed.log_title[:30],
self.feed.id,
datetime.datetime.now() - self.feed.last_update)
logging.debug(log_msg)
etag = self.feed.etag
modified = self.feed.last_modified.utctimetuple()[:7] if self.feed.last_modified else None
address = self.feed.feed_address
if (self.options.get('force') or random.random() <= .01):
self.options['force'] = True
modified = None
etag = None
address = qurl(address, add={"_": random.randint(0, 10000)})
logging.debug(u' ---> [%-30s] ~FBForcing fetch: %s' % (
self.feed.log_title[:30], address))
elif (not self.feed.fetched_once or not self.feed.known_good):
modified = None
etag = None
if self.options.get('feed_xml'):
logging.debug(u' ---> [%-30s] ~FM~BKFeed has been fat pinged. Ignoring fat: %s' % (
self.feed.log_title[:30], len(self.options.get('feed_xml'))))
if self.options.get('fpf'):
self.fpf = self.options.get('fpf')
logging.debug(u' ---> [%-30s] ~FM~BKFeed fetched in real-time with fat ping.' % (
self.feed.log_title[:30]))
return FEED_OK, self.fpf
if 'youtube.com' in address:
try:
youtube_feed = self.fetch_youtube(address)
except (requests.adapters.ConnectionError):
youtube_feed = None
if not youtube_feed:
logging.debug(u' ***> [%-30s] ~FRYouTube fetch failed: %s.' %
(self.feed.log_title[:30], address))
return FEED_ERRHTTP, None
self.fpf = feedparser.parse(youtube_feed)
elif re.match('(https?)?://twitter.com/\w+/?$', qurl(address, remove=['_'])):
twitter_feed = self.fetch_twitter(address)
if not twitter_feed:
logging.debug(u' ***> [%-30s] ~FRTwitter fetch failed: %s' %
(self.feed.log_title[:30], address))
return FEED_ERRHTTP, None
self.fpf = feedparser.parse(twitter_feed)
if not self.fpf:
try:
headers = self.feed.fetch_headers()
if etag:
headers['If-None-Match'] = etag
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
modified_header = '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])
headers['If-Modified-Since'] = modified_header
if etag or modified:
headers['A-IM'] = 'feed'
raw_feed = requests.get(address, headers=headers)
if raw_feed.status_code >= 400:
logging.debug(" ***> [%-30s] ~FRFeed fetch was %s status code, trying fake user agent: %s" % (self.feed.log_title[:30], raw_feed.status_code, raw_feed.headers))
raw_feed = requests.get(address, headers=self.feed.fetch_headers(fake=True))
if raw_feed.content and 'application/json' in raw_feed.headers.get('Content-Type', ""):
# JSON Feed
json_feed = self.fetch_json_feed(address, raw_feed)
if not json_feed:
logging.debug(u' ***> [%-30s] ~FRJSON fetch failed: %s' %
(self.feed.log_title[:30], address))
return FEED_ERRHTTP, None
self.fpf = feedparser.parse(json_feed)
elif raw_feed.content and raw_feed.status_code < 400:
response_headers = raw_feed.headers
response_headers['Content-Location'] = raw_feed.url
self.raw_feed = smart_unicode(raw_feed.content)
self.fpf = feedparser.parse(self.raw_feed,
response_headers=response_headers)
if self.options.get('debug', False):
logging.debug(" ---> [%-30s] ~FBFeed fetch status %s: %s length / %s" % (self.feed.log_title[:30], raw_feed.status_code, len(smart_unicode(raw_feed.content)), raw_feed.headers))
except Exception, e:
logging.debug(" ***> [%-30s] ~FRFeed failed to fetch with request, trying feedparser: %s" % (self.feed.log_title[:30], unicode(e)[:100]))
if not self.fpf or self.options.get('force_fp', False):
try:
self.fpf = feedparser.parse(address,
agent=self.feed.user_agent,
etag=etag,
modified=modified)
except (TypeError, ValueError, KeyError, EOFError, MemoryError), e:
logging.debug(u' ***> [%-30s] ~FRFeed fetch error: %s' %
(self.feed.log_title[:30], e))
pass
if not self.fpf:
try:
logging.debug(u' ***> [%-30s] ~FRTurning off headers...' %
(self.feed.log_title[:30]))
self.fpf = feedparser.parse(address, agent=self.feed.user_agent)
except (TypeError, ValueError, KeyError, EOFError, MemoryError), e:
logging.debug(u' ***> [%-30s] ~FRFetch failed: %s.' %
(self.feed.log_title[:30], e))
return FEED_ERRHTTP, None
logging.debug(u' ---> [%-30s] ~FYFeed fetch in ~FM%.4ss' % (
self.feed.log_title[:30], time.time() - start))
return FEED_OK, self.fpf
def get_identity(self):
identity = "X"
current_process = multiprocessing.current_process()
if current_process._identity:
identity = current_process._identity[0]
return identity
def fetch_twitter(self, address=None):
twitter_fetcher = TwitterFetcher(self.feed, self.options)
return twitter_fetcher.fetch(address)
def fetch_json_feed(self, address, headers):
json_fetcher = JSONFetcher(self.feed, self.options)
return json_fetcher.fetch(address, headers)
def fetch_youtube(self, address):
username = None
channel_id = None
list_id = None
if 'gdata.youtube.com' in address:
try:
username_groups = re.search('gdata.youtube.com/feeds/\w+/users/(\w+)/', address)
if not username_groups:
return
username = username_groups.group(1)
except IndexError:
return
elif 'youtube.com/feeds/videos.xml?user=' in address:
try:
username = urlparse.parse_qs(urlparse.urlparse(address).query)['user'][0]
except IndexError:
return
elif 'youtube.com/feeds/videos.xml?channel_id=' in address:
try:
channel_id = urlparse.parse_qs(urlparse.urlparse(address).query)['channel_id'][0]
except (IndexError, KeyError):
return
elif 'youtube.com/playlist' in address:
try:
list_id = urlparse.parse_qs(urlparse.urlparse(address).query)['list'][0]
except IndexError:
return
elif 'youtube.com/feeds/videos.xml?playlist_id' in address:
try:
list_id = urlparse.parse_qs(urlparse.urlparse(address).query)['playlist_id'][0]
except IndexError:
return
if channel_id:
video_ids_xml = requests.get("https://www.youtube.com/feeds/videos.xml?channel_id=%s" % channel_id)
channel_json = requests.get("https://www.googleapis.com/youtube/v3/channels?part=snippet&id=%s&key=%s" %
(channel_id, settings.YOUTUBE_API_KEY))
channel = json.decode(channel_json.content)
try:
username = channel['items'][0]['snippet']['title']
description = channel['items'][0]['snippet']['description']
except (IndexError, KeyError):
return
elif list_id:
playlist_json = requests.get("https://www.googleapis.com/youtube/v3/playlists?part=snippet&id=%s&key=%s" %
(list_id, settings.YOUTUBE_API_KEY))
playlist = json.decode(playlist_json.content)
try:
username = playlist['items'][0]['snippet']['title']
description = playlist['items'][0]['snippet']['description']
except (IndexError, KeyError):
return
channel_url = "https://www.youtube.com/playlist?list=%s" % list_id
elif username:
video_ids_xml = requests.get("https://www.youtube.com/feeds/videos.xml?user=%s" % username)
description = "YouTube videos uploaded by %s" % username
else:
return
if list_id:
playlist_json = requests.get("https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&playlistId=%s&key=%s" %
(list_id, settings.YOUTUBE_API_KEY))
playlist = json.decode(playlist_json.content)
try:
video_ids = [video['snippet']['resourceId']['videoId'] for video in playlist['items']]
except (IndexError, KeyError):
return
else:
if video_ids_xml.status_code != 200:
return
video_ids_soup = BeautifulSoup(video_ids_xml.content)
channel_url = video_ids_soup.find('author').find('uri').getText()
video_ids = []
for video_id in video_ids_soup.findAll('yt:videoid'):
video_ids.append(video_id.getText())
videos_json = requests.get("https://www.googleapis.com/youtube/v3/videos?part=contentDetails%%2Csnippet&id=%s&key=%s" %
(','.join(video_ids), settings.YOUTUBE_API_KEY))
videos = json.decode(videos_json.content)
if 'error' in videos:
logging.debug(" ***> ~FRYoutube returned an error: ~FM~SB%s" % (videos))
return
data = {}
data['title'] = ("%s's YouTube Videos" % username if 'Uploads' not in username else username)
data['link'] = channel_url
data['description'] = description
data['lastBuildDate'] = datetime.datetime.utcnow()
data['generator'] = 'NewsBlur YouTube API v3 Decrapifier - %s' % settings.NEWSBLUR_URL
data['docs'] = None
data['feed_url'] = address
rss = feedgenerator.Atom1Feed(**data)
for video in videos['items']:
thumbnail = video['snippet']['thumbnails'].get('maxres')
if not thumbnail:
thumbnail = video['snippet']['thumbnails'].get('high')
if not thumbnail:
thumbnail = video['snippet']['thumbnails'].get('medium')
duration_sec = isodate.parse_duration(video['contentDetails']['duration']).seconds
if duration_sec >= 3600:
hours = (duration_sec / 3600)
minutes = (duration_sec - (hours*3600)) / 60
seconds = duration_sec - (hours*3600) - (minutes*60)
duration = "%s:%s:%s" % (hours, '{0:02d}'.format(minutes), '{0:02d}'.format(seconds))
else:
minutes = duration_sec / 60
seconds = duration_sec - (minutes*60)
duration = "%s:%s" % ('{0:02d}'.format(minutes), '{0:02d}'.format(seconds))
content = """<div class="NB-youtube-player"><iframe allowfullscreen="true" src="%s?iv_load_policy=3"></iframe></div>
<div class="NB-youtube-stats"><small>
<b>From:</b> <a href="%s">%s</a><br />
<b>Duration:</b> %s<br />
</small></div><hr>
<div class="NB-youtube-description">%s</div>
<img src="%s" style="display:none" />""" % (
("https://www.youtube.com/embed/" + video['id']),
channel_url, username,
duration,
linkify(linebreaks(video['snippet']['description'])),
thumbnail['url'] if thumbnail else "",
)
link = "http://www.youtube.com/watch?v=%s" % video['id']
story_data = {
'title': video['snippet']['title'],
'link': link,
'description': content,
'author_name': username,
'categories': [],
'unique_id': "tag:youtube.com,2008:video:%s" % video['id'],
'pubdate': dateutil.parser.parse(video['snippet']['publishedAt']),
}
rss.add_item(**story_data)
return rss.writeString('utf-8')
class ProcessFeed:
def __init__(self, feed_id, fpf, options, raw_feed=None):
self.feed_id = feed_id
self.options = options
self.fpf = fpf
self.raw_feed = raw_feed
def refresh_feed(self):
self.feed = Feed.get_by_id(self.feed_id)
if self.feed_id != self.feed.pk:
logging.debug(" ***> Feed has changed: from %s to %s" % (self.feed_id, self.feed.pk))
self.feed_id = self.feed.pk
def process(self):
""" Downloads and parses a feed.
"""
start = time.time()
self.refresh_feed()
ret_values = dict(new=0, updated=0, same=0, error=0)
if hasattr(self.fpf, 'status'):
if self.options['verbose']:
if self.fpf.bozo and self.fpf.status != 304:
logging.debug(u' ---> [%-30s] ~FRBOZO exception: %s ~SB(%s entries)' % (
self.feed.log_title[:30],
self.fpf.bozo_exception,
len(self.fpf.entries)))
if self.fpf.status == 304:
self.feed = self.feed.save()
self.feed.save_feed_history(304, "Not modified")
return FEED_SAME, ret_values
# 302 and 307: Temporary redirect: ignore
# 301 and 308: Permanent redirect: save it (after 10 tries)
if self.fpf.status == 301 or self.fpf.status == 308:
if self.fpf.href.endswith('feedburner.com/atom.xml'):
return FEED_ERRHTTP, ret_values
redirects, non_redirects = self.feed.count_redirects_in_history('feed')
self.feed.save_feed_history(self.fpf.status, "HTTP Redirect (%d to go)" % (10-len(redirects)))
if len(redirects) >= 10 or len(non_redirects) == 0:
address = self.fpf.href
if self.options['force'] and address:
address = qurl(address, remove=['_'])
self.feed.feed_address = address
if not self.feed.known_good:
self.feed.fetched_once = True
logging.debug(" ---> [%-30s] ~SB~SK~FRFeed is %s'ing. Refetching..." % (self.feed.log_title[:30], self.fpf.status))
self.feed = self.feed.schedule_feed_fetch_immediately()
if not self.fpf.entries:
self.feed = self.feed.save()
self.feed.save_feed_history(self.fpf.status, "HTTP Redirect")
return FEED_ERRHTTP, ret_values
if self.fpf.status >= 400:
logging.debug(" ---> [%-30s] ~SB~FRHTTP Status code: %s. Checking address..." % (self.feed.log_title[:30], self.fpf.status))
fixed_feed = None
if not self.feed.known_good:
fixed_feed, feed = self.feed.check_feed_link_for_feed_address()
if not fixed_feed:
self.feed.save_feed_history(self.fpf.status, "HTTP Error")
else:
self.feed = feed
self.feed = self.feed.save()
return FEED_ERRHTTP, ret_values
if not self.fpf:
logging.debug(" ---> [%-30s] ~SB~FRFeed is Non-XML. No feedparser feed either!" % (self.feed.log_title[:30]))
self.feed.save_feed_history(551, "Broken feed")
return FEED_ERRHTTP, ret_values
if self.fpf and not self.fpf.entries:
if self.fpf.bozo and isinstance(self.fpf.bozo_exception, feedparser.NonXMLContentType):
logging.debug(" ---> [%-30s] ~SB~FRFeed is Non-XML. %s entries. Checking address..." % (self.feed.log_title[:30], len(self.fpf.entries)))
fixed_feed = None
if not self.feed.known_good:
fixed_feed, feed = self.feed.check_feed_link_for_feed_address()
if not fixed_feed:
self.feed.save_feed_history(552, 'Non-xml feed', self.fpf.bozo_exception)
else:
self.feed = feed
self.feed = self.feed.save()
return FEED_ERRPARSE, ret_values
elif self.fpf.bozo and isinstance(self.fpf.bozo_exception, xml.sax._exceptions.SAXException):
logging.debug(" ---> [%-30s] ~SB~FRFeed has SAX/XML parsing issues. %s entries. Checking address..." % (self.feed.log_title[:30], len(self.fpf.entries)))
fixed_feed = None
if not self.feed.known_good:
fixed_feed, feed = self.feed.check_feed_link_for_feed_address()
if not fixed_feed:
self.feed.save_feed_history(553, 'Not an RSS feed', self.fpf.bozo_exception)
else:
self.feed = feed
self.feed = self.feed.save()
return FEED_ERRPARSE, ret_values
# the feed has changed (or it is the first time we parse it)
# saving the etag and last_modified fields
original_etag = self.feed.etag
self.feed.etag = self.fpf.get('etag')
if self.feed.etag:
self.feed.etag = self.feed.etag[:255]
# some times this is None (it never should) *sigh*
if self.feed.etag is None:
self.feed.etag = ''
if self.feed.etag != original_etag:
self.feed.save(update_fields=['etag'])
original_last_modified = self.feed.last_modified
if hasattr(self.fpf, 'modified') and self.fpf.modified:
try:
self.feed.last_modified = datetime.datetime.strptime(self.fpf.modified, '%a, %d %b %Y %H:%M:%S %Z')
except Exception, e:
self.feed.last_modified = None
logging.debug("Broken mtime %s: %s" % (self.feed.last_modified, e))
pass
if self.feed.last_modified != original_last_modified:
self.feed.save(update_fields=['last_modified'])
self.fpf.entries = self.fpf.entries[:100]
original_title = self.feed.feed_title
if self.fpf.feed.get('title'):
self.feed.feed_title = strip_tags(self.fpf.feed.get('title'))
if self.feed.feed_title != original_title:
self.feed.save(update_fields=['feed_title'])
tagline = self.fpf.feed.get('tagline', self.feed.data.feed_tagline)
if tagline:
original_tagline = self.feed.data.feed_tagline
self.feed.data.feed_tagline = smart_unicode(tagline)
if self.feed.data.feed_tagline != original_tagline:
self.feed.data.save(update_fields=['feed_tagline'])
if not self.feed.feed_link_locked:
new_feed_link = self.fpf.feed.get('link') or self.fpf.feed.get('id') or self.feed.feed_link
if self.options['force'] and new_feed_link:
new_feed_link = qurl(new_feed_link, remove=['_'])
if new_feed_link != self.feed.feed_link:
logging.debug(" ---> [%-30s] ~SB~FRFeed's page is different: %s to %s" % (self.feed.log_title[:30], self.feed.feed_link, new_feed_link))
redirects, non_redirects = self.feed.count_redirects_in_history('page')
self.feed.save_page_history(301, "HTTP Redirect (%s to go)" % (10-len(redirects)))
if len(redirects) >= 10 or len(non_redirects) == 0:
self.feed.feed_link = new_feed_link
self.feed.save(update_fields=['feed_link'])
# Determine if stories aren't valid and replace broken guids
guids_seen = set()
permalinks_seen = set()
for entry in self.fpf.entries:
guids_seen.add(entry.get('guid'))
permalinks_seen.add(Feed.get_permalink(entry))
guid_difference = len(guids_seen) != len(self.fpf.entries)
single_guid = len(guids_seen) == 1
replace_guids = single_guid and guid_difference
permalink_difference = len(permalinks_seen) != len(self.fpf.entries)
single_permalink = len(permalinks_seen) == 1
replace_permalinks = single_permalink and permalink_difference
# Compare new stories to existing stories, adding and updating
start_date = datetime.datetime.utcnow()
story_hashes = []
stories = []
for entry in self.fpf.entries:
story = pre_process_story(entry, self.fpf.encoding)
if story.get('published') < start_date:
start_date = story.get('published')
if replace_guids:
if replace_permalinks:
new_story_guid = unicode(story.get('published'))
if self.options['verbose']:
logging.debug(u' ---> [%-30s] ~FBReplacing guid (%s) with timestamp: %s' % (
self.feed.log_title[:30],
story.get('guid'), new_story_guid))
story['guid'] = new_story_guid
else:
new_story_guid = Feed.get_permalink(story)
if self.options['verbose']:
logging.debug(u' ---> [%-30s] ~FBReplacing guid (%s) with permalink: %s' % (
self.feed.log_title[:30],
story.get('guid'), new_story_guid))
story['guid'] = new_story_guid
story['story_hash'] = MStory.feed_guid_hash_unsaved(self.feed.pk, story.get('guid'))
stories.append(story)
story_hashes.append(story.get('story_hash'))
original_story_hash_count = len(story_hashes)
story_hashes_in_unread_cutoff = self.feed.story_hashes_in_unread_cutoff[:original_story_hash_count]
story_hashes.extend(story_hashes_in_unread_cutoff)
story_hashes = list(set(story_hashes))
if self.options['verbose'] or settings.DEBUG:
logging.debug(u' ---> [%-30s] ~FBFound ~SB%s~SN guids, adding ~SB%s~SN/%s guids from db' % (
self.feed.log_title[:30],
original_story_hash_count, len(story_hashes)-original_story_hash_count,
len(story_hashes_in_unread_cutoff)))
existing_stories = dict((s.story_hash, s) for s in MStory.objects(
story_hash__in=story_hashes,
# story_date__gte=start_date,
# story_feed_id=self.feed.pk
))
# if len(existing_stories) == 0:
# existing_stories = dict((s.story_hash, s) for s in MStory.objects(
# story_date__gte=start_date,
# story_feed_id=self.feed.pk
# ))
ret_values = self.feed.add_update_stories(stories, existing_stories,
verbose=self.options['verbose'],
updates_off=self.options['updates_off'])
# PubSubHubbub
if (hasattr(self.fpf, 'feed') and
hasattr(self.fpf.feed, 'links') and self.fpf.feed.links):
hub_url = None
self_url = self.feed.feed_address
for link in self.fpf.feed.links:
if link['rel'] == 'hub' and not hub_url:
hub_url = link['href']
elif link['rel'] == 'self':
self_url = link['href']
push_expired = False
if self.feed.is_push:
try:
push_expired = self.feed.push.lease_expires < datetime.datetime.now()
except PushSubscription.DoesNotExist:
self.feed.is_push = False
if (hub_url and self_url and not settings.DEBUG and
self.feed.active_subscribers > 0 and
(push_expired or not self.feed.is_push or self.options.get('force'))):
logging.debug(u' ---> [%-30s] ~BB~FW%sSubscribing to PuSH hub: %s' % (
self.feed.log_title[:30],
"~SKRe-~SN" if push_expired else "", hub_url))
try:
PushSubscription.objects.subscribe(self_url, feed=self.feed, hub=hub_url)
except TimeoutError:
logging.debug(u' ---> [%-30s] ~BB~FW~FRTimed out~FW subscribing to PuSH hub: %s' % (
self.feed.log_title[:30], hub_url))
elif (self.feed.is_push and
(self.feed.active_subscribers <= 0 or not hub_url)):
logging.debug(u' ---> [%-30s] ~BB~FWTurning off PuSH, no hub found' % (
self.feed.log_title[:30]))
self.feed.is_push = False
self.feed = self.feed.save()
# Push notifications
if ret_values['new'] > 0 and MUserFeedNotification.feed_has_users(self.feed.pk) > 0:
QueueNotifications.delay(self.feed.pk, ret_values['new'])
# All Done
logging.debug(u' ---> [%-30s] ~FYParsed Feed: %snew=%s~SN~FY %sup=%s~SN same=%s%s~SN %serr=%s~SN~FY total=~SB%s' % (
self.feed.log_title[:30],
'~FG~SB' if ret_values['new'] else '', ret_values['new'],
'~FY~SB' if ret_values['updated'] else '', ret_values['updated'],
'~SB' if ret_values['same'] else '', ret_values['same'],
'~FR~SB' if ret_values['error'] else '', ret_values['error'],
len(self.fpf.entries)))
self.feed.update_all_statistics(has_new_stories=bool(ret_values['new']), force=self.options['force'])
fetch_date = datetime.datetime.now()
if ret_values['new']:
if not getattr(settings, 'TEST_DEBUG', False):
self.feed.trim_feed()
self.feed.expire_redis()
if MStatistics.get('raw_feed', None) == self.feed.pk:
self.feed.save_raw_feed(self.raw_feed, fetch_date)
self.feed.save_feed_history(200, "OK", date=fetch_date)
if self.options['verbose']:
logging.debug(u' ---> [%-30s] ~FBTIME: feed parse in ~FM%.4ss' % (
self.feed.log_title[:30], time.time() - start))
return FEED_OK, ret_values
class Dispatcher:
def __init__(self, options, num_threads):
self.options = options
self.feed_stats = {
FEED_OK:0,
FEED_SAME:0,
FEED_ERRPARSE:0,
FEED_ERRHTTP:0,
FEED_ERREXC:0}
self.feed_trans = {
FEED_OK:'ok',
FEED_SAME:'unchanged',
FEED_ERRPARSE:'cant_parse',
FEED_ERRHTTP:'http_error',
FEED_ERREXC:'exception'}
self.feed_keys = sorted(self.feed_trans.keys())
self.num_threads = num_threads
self.time_start = datetime.datetime.utcnow()
self.workers = []
def refresh_feed(self, feed_id):
"""Update feed, since it may have changed"""
return Feed.get_by_id(feed_id)
def process_feed_wrapper(self, feed_queue):
delta = None
current_process = multiprocessing.current_process()
identity = "X"
feed = None
if current_process._identity:
identity = current_process._identity[0]
for feed_id in feed_queue:
start_duration = time.time()
feed_fetch_duration = None
feed_process_duration = None
page_duration = None
icon_duration = None
feed_code = None
ret_entries = None
start_time = time.time()
ret_feed = FEED_ERREXC
try:
feed = self.refresh_feed(feed_id)
skip = False
if self.options.get('fake'):
skip = True
weight = "-"
quick = "-"
rand = "-"
elif (self.options.get('quick') and not self.options['force'] and
feed.known_good and feed.fetched_once and not feed.is_push):
weight = feed.stories_last_month * feed.num_subscribers
random_weight = random.randint(1, max(weight, 1))
quick = float(self.options.get('quick', 0))
rand = random.random()
if random_weight < 1000 and rand < quick:
skip = True
elif False and feed.feed_address.startswith("http://news.google.com/news"):
skip = True
weight = "-"
quick = "-"
rand = "-"
if skip:
logging.debug(' ---> [%-30s] ~BGFaking fetch, skipping (%s/month, %s subs, %s < %s)...' % (
feed.log_title[:30],
weight,
feed.num_subscribers,
rand, quick))
continue
ffeed = FetchFeed(feed_id, self.options)
ret_feed, fetched_feed = ffeed.fetch()
feed_fetch_duration = time.time() - start_duration
raw_feed = ffeed.raw_feed
if ((fetched_feed and ret_feed == FEED_OK) or self.options['force']):
pfeed = ProcessFeed(feed_id, fetched_feed, self.options, raw_feed=raw_feed)
ret_feed, ret_entries = pfeed.process()
feed = pfeed.feed
feed_process_duration = time.time() - start_duration
if (ret_entries and ret_entries['new']) or self.options['force']:
start = time.time()
if not feed.known_good or not feed.fetched_once:
feed.known_good = True
feed.fetched_once = True
feed = feed.save()
if self.options['force'] or random.random() <= 0.02:
logging.debug(' ---> [%-30s] ~FBPerforming feed cleanup...' % (feed.log_title[:30],))
start_cleanup = time.time()
feed.sync_redis()
logging.debug(' ---> [%-30s] ~FBDone with feed cleanup. Took ~SB%.4s~SN sec.' % (feed.log_title[:30], time.time() - start_cleanup))
try:
self.count_unreads_for_subscribers(feed)
except TimeoutError:
logging.debug(' ---> [%-30s] Unread count took too long...' % (feed.log_title[:30],))
if self.options['verbose']:
logging.debug(u' ---> [%-30s] ~FBTIME: unread count in ~FM%.4ss' % (
feed.log_title[:30], time.time() - start))
except urllib2.HTTPError, e:
logging.debug(' ---> [%-30s] ~FRFeed throws HTTP error: ~SB%s' % (unicode(feed_id)[:30], e.fp.read()))
feed_code = e.code
feed.save_feed_history(feed_code, e.msg, e.fp.read())
fetched_feed = None
except Feed.DoesNotExist, e:
logging.debug(' ---> [%-30s] ~FRFeed is now gone...' % (unicode(feed_id)[:30]))
continue
except SoftTimeLimitExceeded, e:
logging.debug(" ---> [%-30s] ~BR~FWTime limit hit!~SB~FR Moving on to next feed..." % feed)
ret_feed = FEED_ERREXC
fetched_feed = None
feed_code = 559
feed.save_feed_history(feed_code, 'Timeout', e)
except TimeoutError, e:
logging.debug(' ---> [%-30s] ~FRFeed fetch timed out...' % (feed.log_title[:30]))
feed_code = 505
feed.save_feed_history(feed_code, 'Timeout', e)
fetched_feed = None
except Exception, e:
logging.debug('[%d] ! -------------------------' % (feed_id,))
tb = traceback.format_exc()
logging.error(tb)
logging.debug('[%d] ! -------------------------' % (feed_id,))
ret_feed = FEED_ERREXC
feed = Feed.get_by_id(getattr(feed, 'pk', feed_id))
if not feed: continue
feed.save_feed_history(500, "Error", tb)
feed_code = 500
fetched_feed = None
# mail_feed_error_to_admin(feed, e, local_vars=locals())
if (not settings.DEBUG and hasattr(settings, 'RAVEN_CLIENT') and
settings.RAVEN_CLIENT):
settings.RAVEN_CLIENT.captureException()
if not feed_code:
if ret_feed == FEED_OK:
feed_code = 200
elif ret_feed == FEED_SAME:
feed_code = 304
elif ret_feed == FEED_ERRHTTP:
feed_code = 400
if ret_feed == FEED_ERREXC:
feed_code = 500
elif ret_feed == FEED_ERRPARSE:
feed_code = 550
if not feed: continue
feed = self.refresh_feed(feed.pk)
if not feed: continue
if ((self.options['force']) or
(random.random() > .9) or
(fetched_feed and
feed.feed_link and
feed.has_page and
(ret_feed == FEED_OK or
(ret_feed == FEED_SAME and feed.stories_last_month > 10)))):
logging.debug(u' ---> [%-30s] ~FYFetching page: %s' % (feed.log_title[:30], feed.feed_link))
page_importer = PageImporter(feed)
try:
page_data = page_importer.fetch_page()
page_duration = time.time() - start_duration
except SoftTimeLimitExceeded, e:
logging.debug(" ---> [%-30s] ~BR~FWTime limit hit!~SB~FR Moving on to next feed..." % feed)
page_data = None
feed.save_feed_history(557, 'Timeout', e)
except TimeoutError, e:
logging.debug(' ---> [%-30s] ~FRPage fetch timed out...' % (feed.log_title[:30]))
page_data = None
feed.save_page_history(555, 'Timeout', '')
except Exception, e:
logging.debug('[%d] ! -------------------------' % (feed_id,))
tb = traceback.format_exc()
logging.error(tb)
logging.debug('[%d] ! -------------------------' % (feed_id,))
feed.save_page_history(550, "Page Error", tb)
fetched_feed = None
page_data = None
# mail_feed_error_to_admin(feed, e, local_vars=locals())
if (not settings.DEBUG and hasattr(settings, 'RAVEN_CLIENT') and
settings.RAVEN_CLIENT):
settings.RAVEN_CLIENT.captureException()
feed = self.refresh_feed(feed.pk)
logging.debug(u' ---> [%-30s] ~FYFetching icon: %s' % (feed.log_title[:30], feed.feed_link))
force = self.options['force']
if random.random() > .99:
force = True
icon_importer = IconImporter(feed, page_data=page_data, force=force)
try:
icon_importer.save()
icon_duration = time.time() - start_duration
except SoftTimeLimitExceeded, e:
logging.debug(" ---> [%-30s] ~BR~FWTime limit hit!~SB~FR Moving on to next feed..." % feed)
feed.save_feed_history(558, 'Timeout', e)
except TimeoutError, e:
logging.debug(' ---> [%-30s] ~FRIcon fetch timed out...' % (feed.log_title[:30]))
feed.save_page_history(556, 'Timeout', '')
except Exception, e:
logging.debug('[%d] ! -------------------------' % (feed_id,))
tb = traceback.format_exc()
logging.error(tb)
logging.debug('[%d] ! -------------------------' % (feed_id,))
# feed.save_feed_history(560, "Icon Error", tb)
# mail_feed_error_to_admin(feed, e, local_vars=locals())
if (not settings.DEBUG and hasattr(settings, 'RAVEN_CLIENT') and
settings.RAVEN_CLIENT):
settings.RAVEN_CLIENT.captureException()
else:
logging.debug(u' ---> [%-30s] ~FBSkipping page fetch: (%s on %s stories) %s' % (feed.log_title[:30], self.feed_trans[ret_feed], feed.stories_last_month, '' if feed.has_page else ' [HAS NO PAGE]'))
feed = self.refresh_feed(feed.pk)
delta = time.time() - start_time
feed.last_load_time = round(delta)
feed.fetched_once = True
try:
feed = feed.save(update_fields=['last_load_time', 'fetched_once'])
except IntegrityError:
logging.debug(" ***> [%-30s] ~FRIntegrityError on feed: %s" % (feed.log_title[:30], feed.feed_address,))
if ret_entries and ret_entries['new']:
self.publish_to_subscribers(feed, ret_entries['new'])
done_msg = (u'%2s ---> [%-30s] ~FYProcessed in ~FM~SB%.4ss~FY~SN (~FB%s~FY) [%s]' % (
identity, feed.log_title[:30], delta,
feed.pk, self.feed_trans[ret_feed],))
logging.debug(done_msg)
total_duration = time.time() - start_duration
MAnalyticsFetcher.add(feed_id=feed.pk, feed_fetch=feed_fetch_duration,
feed_process=feed_process_duration,
page=page_duration, icon=icon_duration,
total=total_duration, feed_code=feed_code)
self.feed_stats[ret_feed] += 1
if len(feed_queue) == 1:
return feed
# time_taken = datetime.datetime.utcnow() - self.time_start
def publish_to_subscribers(self, feed, new_count):
try:
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
listeners_count = r.publish(str(feed.pk), 'story:new_count:%s' % new_count)
if listeners_count:
logging.debug(" ---> [%-30s] ~FMPublished to %s subscribers" % (feed.log_title[:30], listeners_count))
except redis.ConnectionError:
logging.debug(" ***> [%-30s] ~BMRedis is unavailable for real-time." % (feed.log_title[:30],))
def count_unreads_for_subscribers(self, feed):
user_subs = UserSubscription.objects.filter(feed=feed,
active=True,
user__profile__last_seen_on__gte=feed.unread_cutoff)\
.order_by('-last_read_date')
if not user_subs.count():
return
for sub in user_subs:
if not sub.needs_unread_recalc:
sub.needs_unread_recalc = True
sub.save()
if self.options['compute_scores']:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
stories = MStory.objects(story_feed_id=feed.pk,
story_date__gte=feed.unread_cutoff)
stories = Feed.format_stories(stories, feed.pk)
story_hashes = r.zrangebyscore('zF:%s' % feed.pk, int(feed.unread_cutoff.strftime('%s')),
int(time.time() + 60*60*24))
missing_story_hashes = set(story_hashes) - set([s['story_hash'] for s in stories])
if missing_story_hashes:
missing_stories = MStory.objects(story_feed_id=feed.pk,
story_hash__in=missing_story_hashes)\
.read_preference(pymongo.ReadPreference.PRIMARY)
missing_stories = Feed.format_stories(missing_stories, feed.pk)
stories = missing_stories + stories
logging.debug(u' ---> [%-30s] ~FYFound ~SB~FC%s(of %s)/%s~FY~SN un-secondaried stories while computing scores' % (feed.log_title[:30], len(missing_stories), len(missing_story_hashes), len(stories)))
cache.set("S:%s" % feed.pk, stories, 60)
logging.debug(u' ---> [%-30s] ~FYComputing scores: ~SB%s stories~SN with ~SB%s subscribers ~SN(%s/%s/%s)' % (
feed.log_title[:30], len(stories), user_subs.count(),
feed.num_subscribers, feed.active_subscribers, feed.premium_subscribers))
self.calculate_feed_scores_with_stories(user_subs, stories)
elif self.options.get('mongodb_replication_lag'):
logging.debug(u' ---> [%-30s] ~BR~FYSkipping computing scores: ~SB%s seconds~SN of mongodb lag' % (
feed.log_title[:30], self.options.get('mongodb_replication_lag')))
@timelimit(10)
def calculate_feed_scores_with_stories(self, user_subs, stories):
for sub in user_subs:
silent = False if self.options['verbose'] >= 2 else True
sub.calculate_feed_scores(silent=silent, stories=stories)
def add_jobs(self, feeds_queue, feeds_count=1):
""" adds a feed processing job to the pool
"""
self.feeds_queue = feeds_queue
self.feeds_count = feeds_count
def run_jobs(self):
if self.options['single_threaded']:
return self.process_feed_wrapper(self.feeds_queue[0])
else:
for i in range(self.num_threads):
feed_queue = self.feeds_queue[i]
self.workers.append(multiprocessing.Process(target=self.process_feed_wrapper,
args=(feed_queue,)))
for i in range(self.num_threads):
self.workers[i].start()
| mihaip/NewsBlur | utils/feed_fetcher.py | Python | mit | 46,750 |
# -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import array
import ctypes
import warnings
import py4j
from contextlib import contextmanager
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.util import _exception_message
_pandas_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
except ImportError as e:
# If Pandas version requirement is not satisfied, skip related tests.
_pandas_requirement_message = _exception_message(e)
_pyarrow_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
except ImportError as e:
# If Arrow version requirement is not satisfied, skip related tests.
_pyarrow_requirement_message = _exception_message(e)
_test_not_compiled_message = None
try:
from pyspark.sql.utils import require_test_compiled
require_test_compiled()
except Exception as e:
_test_not_compiled_message = _exception_message(e)
_have_pandas = _pandas_requirement_message is None
_have_pyarrow = _pyarrow_requirement_message is None
_test_compiled = _test_not_compiled_message is None
from pyspark import SparkContext
from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type, _make_type_verifier
from pyspark.sql.types import _array_signed_int_typecode_ctype_mappings, _array_type_mappings
from pyspark.sql.types import _array_unsigned_int_typecode_ctype_mappings
from pyspark.sql.types import _merge_type
from pyspark.tests import QuietTest, ReusedPySparkTestCase, PySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2, lit
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
def assertPandasEqual(self, expected, result):
msg = ("DataFrames are not equal: " +
"\n\nExpected:\n%s\n%s" % (expected, expected.dtypes) +
"\n\nResult:\n%s\n%s" % (result, result.dtypes))
self.assertTrue(expected.equals(result), msg=msg)
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
def test_struct_field_type_name(self):
struct_field = StructField("a", IntegerType())
self.assertRaises(TypeError, struct_field.typeName)
class SQLTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedSQLTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
def tearDown(self):
super(SQLTests, self).tearDown()
# tear down test_bucketed_write state
self.spark.sql("DROP TABLE IF EXISTS pyspark_bucket")
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode, explode_outer, posexplode_outer
d = [
Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}),
Row(a=1, intlist=[], mapfield={}),
Row(a=1, intlist=None, mapfield=None),
]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()]
self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])
result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()]
self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])
result = [x[0] for x in data.select(explode_outer("intlist")).collect()]
self.assertEqual(result, [1, 2, 3, None, None])
result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()]
self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
# This is to check if a deprecated 'SQLContext.registerFunction' can call its alias.
sqlContext = self.spark._wrapped
sqlContext.registerFunction("oneArg", lambda x: len(x), IntegerType())
[row] = sqlContext.sql("SELECT oneArg('test')").collect()
self.assertEqual(row[0], 4)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_udf3(self):
two_args = self.spark.catalog.registerFunction(
"twoArgs", UserDefinedFunction(lambda x, y: len(x) + y))
self.assertEqual(two_args.deterministic, True)
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], u'5')
def test_udf_registration_return_type_none(self):
two_args = self.spark.catalog.registerFunction(
"twoArgs", UserDefinedFunction(lambda x, y: len(x) + y, "integer"), None)
self.assertEqual(two_args.deterministic, True)
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf_registration_return_type_not_none(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, "Invalid returnType"):
self.spark.catalog.registerFunction(
"f", UserDefinedFunction(lambda x, y: len(x) + y, StringType()), StringType())
def test_nondeterministic_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
from pyspark.sql.functions import udf
import random
udf_random_col = udf(lambda: int(100 * random.random()), IntegerType()).asNondeterministic()
self.assertEqual(udf_random_col.deterministic, False)
df = self.spark.createDataFrame([Row(1)]).select(udf_random_col().alias('RAND'))
udf_add_ten = udf(lambda rand: rand + 10, IntegerType())
[row] = df.withColumn('RAND_PLUS_TEN', udf_add_ten('RAND')).collect()
self.assertEqual(row[0] + 10, row[1])
def test_nondeterministic_udf2(self):
import random
from pyspark.sql.functions import udf
random_udf = udf(lambda: random.randint(6, 6), IntegerType()).asNondeterministic()
self.assertEqual(random_udf.deterministic, False)
random_udf1 = self.spark.catalog.registerFunction("randInt", random_udf)
self.assertEqual(random_udf1.deterministic, False)
[row] = self.spark.sql("SELECT randInt()").collect()
self.assertEqual(row[0], 6)
[row] = self.spark.range(1).select(random_udf1()).collect()
self.assertEqual(row[0], 6)
[row] = self.spark.range(1).select(random_udf()).collect()
self.assertEqual(row[0], 6)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(udf(lambda: random.randint(6, 6), IntegerType()))
pydoc.render_doc(random_udf)
pydoc.render_doc(random_udf1)
pydoc.render_doc(udf(lambda x: x).asNondeterministic)
def test_nondeterministic_udf3(self):
# regression test for SPARK-23233
from pyspark.sql.functions import udf
f = udf(lambda x: x)
# Here we cache the JVM UDF instance.
self.spark.range(1).select(f("id"))
# This should reset the cache to set the deterministic status correctly.
f = f.asNondeterministic()
# Check the deterministic status of udf.
df = self.spark.range(1).select(f("id"))
deterministic = df._jdf.logicalPlan().projectList().head().deterministic()
self.assertFalse(deterministic)
def test_nondeterministic_udf_in_aggregate(self):
from pyspark.sql.functions import udf, sum
import random
udf_random_col = udf(lambda: int(100 * random.random()), 'int').asNondeterministic()
df = self.spark.range(10)
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, "nondeterministic"):
df.groupby('id').agg(sum(udf_random_col())).collect()
with self.assertRaisesRegexp(AnalysisException, "nondeterministic"):
df.agg(sum(udf_random_col())).collect()
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_single_udf_with_repeated_argument(self):
# regression test for SPARK-20685
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
row = self.spark.sql("SELECT add(1, 1)").first()
self.assertEqual(tuple(row), (2, ))
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_udf_registration_returns_udf(self):
df = self.spark.range(10)
add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType())
self.assertListEqual(
df.selectExpr("add_three(id) AS plus_three").collect(),
df.select(add_three("id").alias("plus_three")).collect()
)
# This is to check if a 'SQLContext.udf' can call its alias.
sqlContext = self.spark._wrapped
add_four = sqlContext.udf.register("add_four", lambda x: x + 4, IntegerType())
self.assertListEqual(
df.selectExpr("add_four(id) AS plus_four").collect(),
df.select(add_four("id").alias("plus_four")).collect()
)
def test_non_existed_udf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: spark.udf.registerJavaFunction("udf1", "non_existed_udf"))
# This is to check if a deprecated 'SQLContext.registerJavaFunction' can call its alias.
sqlContext = spark._wrapped
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: sqlContext.registerJavaFunction("udf1", "non_existed_udf"))
def test_non_existed_udaf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udaf",
lambda: spark.udf.registerJavaUDAF("udaf1", "non_existed_udaf"))
def test_linesep_text(self):
df = self.spark.read.text("python/test_support/sql/ages_newlines.csv", lineSep=",")
expected = [Row(value=u'Joe'), Row(value=u'20'), Row(value=u'"Hi'),
Row(value=u'\nI am Jeo"\nTom'), Row(value=u'30'),
Row(value=u'"My name is Tom"\nHyukjin'), Row(value=u'25'),
Row(value=u'"I am Hyukjin\n\nI love Spark!"\n')]
self.assertEqual(df.collect(), expected)
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
df.write.text(tpath, lineSep="!")
expected = [Row(value=u'Joe!20!"Hi!'), Row(value=u'I am Jeo"'),
Row(value=u'Tom!30!"My name is Tom"'),
Row(value=u'Hyukjin!25!"I am Hyukjin'),
Row(value=u''), Row(value=u'I love Spark!"'),
Row(value=u'!')]
readback = self.spark.read.text(tpath)
self.assertEqual(readback.collect(), expected)
finally:
shutil.rmtree(tpath)
def test_multiline_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
multiLine=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_encoding_json(self):
people_array = self.spark.read\
.json("python/test_support/sql/people_array_utf16le.json",
multiLine=True, encoding="UTF-16LE")
expected = [Row(age=30, name=u'Andy'), Row(age=19, name=u'Justin')]
self.assertEqual(people_array.collect(), expected)
def test_linesep_json(self):
df = self.spark.read.json("python/test_support/sql/people.json", lineSep=",")
expected = [Row(_corrupt_record=None, name=u'Michael'),
Row(_corrupt_record=u' "age":30}\n{"name":"Justin"', name=None),
Row(_corrupt_record=u' "age":19}\n', name=None)]
self.assertEqual(df.collect(), expected)
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
df = self.spark.read.json("python/test_support/sql/people.json")
df.write.json(tpath, lineSep="!!")
readback = self.spark.read.json(tpath, lineSep="!!")
self.assertEqual(readback.collect(), df.collect())
finally:
shutil.rmtree(tpath)
def test_multiline_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", multiLine=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initialization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
class F(object):
"""Identity"""
def __call__(self, x):
return x
f = F()
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
f = functools.partial(f, x=1)
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_validate_column_types(self):
from pyspark.sql.functions import udf, to_json
from pyspark.sql.column import _to_java_column
self.assertTrue("Column" in _to_java_column("a").getClass().toString())
self.assertTrue("Column" in _to_java_column(u"a").getClass().toString())
self.assertTrue("Column" in _to_java_column(self.spark.range(1).id).getClass().toString())
self.assertRaisesRegexp(
TypeError,
"Invalid argument, not a string or column",
lambda: _to_java_column(1))
class A():
pass
self.assertRaises(TypeError, lambda: _to_java_column(A()))
self.assertRaises(TypeError, lambda: _to_java_column([]))
self.assertRaisesRegexp(
TypeError,
"Invalid argument, not a string or column",
lambda: udf(lambda x: x)(None))
self.assertRaises(TypeError, lambda: to_json(1))
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_schema_not_enough_names(self):
df = self.spark.createDataFrame([["a", "b"]], ["col1"])
self.assertEqual(df.columns, ['col1', '_2'])
def test_infer_schema_fails(self):
with self.assertRaisesRegexp(TypeError, 'field a'):
self.spark.createDataFrame(self.spark.sparkContext.parallelize([[1, 1], ["x", 1]]),
schema=["a", "b"], samplingRatio=0.99)
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_dict_respects_schema(self):
df = self.spark.createDataFrame([{'a': 1}], ["b"])
self.assertEqual(df.columns, ['b'])
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _make_type_verifier
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_make_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _make_type_verifier(ExamplePointUDT())([1.0, 2.0]))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_make_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(
ValueError,
lambda: _make_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.show()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_nonparam_udf_with_aggregate(self):
import pyspark.sql.functions as f
df = self.spark.createDataFrame([(1, 2), (1, 2)])
f_udf = f.udf(lambda: "const_str")
rows = df.distinct().withColumn("a", f_udf()).collect()
self.assertEqual(rows, [Row(_1=1, _2=2, a=u'const_str')])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_cast_to_string_with_udt(self):
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
from pyspark.sql.functions import col
row = (ExamplePoint(1.0, 2.0), PythonOnlyPoint(3.0, 4.0))
schema = StructType([StructField("point", ExamplePointUDT(), False),
StructField("pypoint", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
result = df.select(col('point').cast('string'), col('pypoint').cast('string')).head()
self.assertEqual(result, Row(point=u'(1.0, 2.0)', pypoint=u'[3.0, 4.0]'))
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\
cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegexp(ValueError,
"Cannot apply 'in' operator against a column",
lambda: 1 in cs)
def test_column_getitem(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
for f in ["a", u"a"]:
aq = df.stat.approxQuantile(f, [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", u"b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile((u"a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr(u"a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_sampleby(self):
df = self.sc.parallelize([Row(a=i, b=(i % 3)) for i in range(10)]).toDF()
sampled = df.stat.sampleBy(u"b", fractions={0: 0.5, 1: 0.5}, seed=0)
self.assertTrue(sampled.count() == 3)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov(u"a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab(u"a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_string_functions(self):
from pyspark.sql.functions import col, lit
df = self.spark.createDataFrame([['nick']], schema=['name'])
self.assertRaisesRegexp(
TypeError,
"must be the same type",
lambda: df.select(col('name').substr(0, lit(1))))
if sys.version_info.major == 2:
self.assertRaises(
TypeError,
lambda: df.select(col('name').substr(long(0), long(1))))
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, 1).alias('b')).collect()
# The value argument can be implicitly castable to the element's type of the array.
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: StructType().add("name"))
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
self.assertRaises(KeyError, lambda: struct1["f9"])
self.assertRaises(IndexError, lambda: struct1[9])
self.assertRaises(TypeError, lambda: struct1[9.9])
def test_parse_datatype_string(self):
from pyspark.sql.types import _all_atomic_types, _parse_datatype_string
for k, t in _all_atomic_types.items():
if t != NullType:
self.assertEqual(t(), _parse_datatype_string(k))
self.assertEqual(IntegerType(), _parse_datatype_string("int"))
self.assertEqual(DecimalType(1, 1), _parse_datatype_string("decimal(1 ,1)"))
self.assertEqual(DecimalType(10, 1), _parse_datatype_string("decimal( 10,1 )"))
self.assertEqual(DecimalType(11, 1), _parse_datatype_string("decimal(11,1)"))
self.assertEqual(
ArrayType(IntegerType()),
_parse_datatype_string("array<int >"))
self.assertEqual(
MapType(IntegerType(), DoubleType()),
_parse_datatype_string("map< int, double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("struct<a:int, c:double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a:int, c:double"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a INT, c DOUBLE"))
def test_metadata_null(self):
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(processingTime='5 seconds', continuous='1 second')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self.assertTrue("ZeroDivisionError" in e.desc)
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self.assertTrue("ZeroDivisionError" in sq.exception().desc)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
class ForeachWriterTester:
def __init__(self, spark):
self.spark = spark
def write_open_event(self, partitionId, epochId):
self._write_event(
self.open_events_dir,
{'partition': partitionId, 'epoch': epochId})
def write_process_event(self, row):
self._write_event(self.process_events_dir, {'value': 'text'})
def write_close_event(self, error):
self._write_event(self.close_events_dir, {'error': str(error)})
def write_input_file(self):
self._write_event(self.input_dir, "text")
def open_events(self):
return self._read_events(self.open_events_dir, 'partition INT, epoch INT')
def process_events(self):
return self._read_events(self.process_events_dir, 'value STRING')
def close_events(self):
return self._read_events(self.close_events_dir, 'error STRING')
def run_streaming_query_on_writer(self, writer, num_files):
self._reset()
try:
sdf = self.spark.readStream.format('text').load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
for i in range(num_files):
self.write_input_file()
sq.processAllAvailable()
finally:
self.stop_all()
def assert_invalid_writer(self, writer, msg=None):
self._reset()
try:
sdf = self.spark.readStream.format('text').load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
self.write_input_file()
sq.processAllAvailable()
self.fail("invalid writer %s did not fail the query" % str(writer)) # not expected
except Exception as e:
if msg:
assert msg in str(e), "%s not in %s" % (msg, str(e))
finally:
self.stop_all()
def stop_all(self):
for q in self.spark._wrapped.streams.active:
q.stop()
def _reset(self):
self.input_dir = tempfile.mkdtemp()
self.open_events_dir = tempfile.mkdtemp()
self.process_events_dir = tempfile.mkdtemp()
self.close_events_dir = tempfile.mkdtemp()
def _read_events(self, dir, json):
rows = self.spark.read.schema(json).json(dir).collect()
dicts = [row.asDict() for row in rows]
return dicts
def _write_event(self, dir, event):
import uuid
with open(os.path.join(dir, str(uuid.uuid4())), 'w') as f:
f.write("%s\n" % str(event))
def __getstate__(self):
return (self.open_events_dir, self.process_events_dir, self.close_events_dir)
def __setstate__(self, state):
self.open_events_dir, self.process_events_dir, self.close_events_dir = state
def test_streaming_foreach_with_simple_function(self):
tester = self.ForeachWriterTester(self.spark)
def foreach_func(row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(foreach_func, 2)
self.assertEqual(len(tester.process_events()), 2)
def test_streaming_foreach_with_basic_open_process_close(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partitionId, epochId):
tester.write_open_event(partitionId, epochId)
return True
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
open_events = tester.open_events()
self.assertEqual(len(open_events), 2)
self.assertSetEqual(set([e['epoch'] for e in open_events]), {0, 1})
self.assertEqual(len(tester.process_events()), 2)
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e['error'] for e in close_events]), {'None'})
def test_streaming_foreach_with_open_returning_false(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return False
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2)
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e['error'] for e in close_events]), {'None'})
def test_streaming_foreach_without_open_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 2)
def test_streaming_foreach_without_close_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return True
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_without_open_and_close_methods(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_with_process_throwing_error(self):
from pyspark.sql.utils import StreamingQueryException
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
raise Exception("test error")
def close(self, error):
tester.write_close_event(error)
try:
tester.run_streaming_query_on_writer(ForeachWriter(), 1)
self.fail("bad writer did not fail the query") # this is not expected
except StreamingQueryException as e:
# TODO: Verify whether original error message is inside the exception
pass
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 1)
# TODO: Verify whether original error message is inside the exception
def test_streaming_foreach_with_invalid_writers(self):
tester = self.ForeachWriterTester(self.spark)
def func_with_iterator_input(iter):
for x in iter:
print(x)
tester.assert_invalid_writer(func_with_iterator_input)
class WriterWithoutProcess:
def open(self, partition):
pass
tester.assert_invalid_writer(WriterWithoutProcess(), "does not have a 'process'")
class WriterWithNonCallableProcess():
process = True
tester.assert_invalid_writer(WriterWithNonCallableProcess(),
"'process' in provided object is not callable")
class WriterWithNoParamProcess():
def process(self):
pass
tester.assert_invalid_writer(WriterWithNoParamProcess())
# Abstract class for tests below
class WithProcess():
def process(self, row):
pass
class WriterWithNonCallableOpen(WithProcess):
open = True
tester.assert_invalid_writer(WriterWithNonCallableOpen(),
"'open' in provided object is not callable")
class WriterWithNoParamOpen(WithProcess):
def open(self):
pass
tester.assert_invalid_writer(WriterWithNoParamOpen())
class WriterWithNonCallableClose(WithProcess):
close = True
tester.assert_invalid_writer(WriterWithNonCallableClose(),
"'close' in provided object is not callable")
def test_streaming_foreachBatch(self):
q = None
collected = dict()
def collectBatch(batch_df, batch_id):
collected[batch_id] = batch_df.collect()
try:
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.assertTrue(0 in collected)
self.assertTrue(len(collected[0]), 2)
finally:
if q:
q.stop()
def test_streaming_foreachBatch_propagates_python_errors(self):
from pyspark.sql.utils import StreamingQueryException
q = None
def collectBatch(df, id):
raise Exception("this should fail the query")
try:
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.fail("Expected a failure")
except StreamingQueryException as e:
self.assertTrue("this should fail" in str(e))
finally:
if q:
q.stop()
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_merge_type(self):
self.assertEqual(_merge_type(LongType(), NullType()), LongType())
self.assertEqual(_merge_type(NullType(), LongType()), LongType())
self.assertEqual(_merge_type(LongType(), LongType()), LongType())
self.assertEqual(_merge_type(
ArrayType(LongType()),
ArrayType(LongType())
), ArrayType(LongType()))
with self.assertRaisesRegexp(TypeError, 'element in array'):
_merge_type(ArrayType(LongType()), ArrayType(DoubleType()))
self.assertEqual(_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), LongType())
), MapType(StringType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'key of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(DoubleType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'value of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), DoubleType()))
self.assertEqual(_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", LongType()), StructField("f2", StringType())])
), StructType([StructField("f1", LongType()), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'field f1'):
_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", DoubleType()), StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", LongType())]))])
), StructType([StructField("f1", StructType([StructField("f2", LongType())]))]))
with self.assertRaisesRegexp(TypeError, 'field f2 in field f1'):
_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", StringType())]))]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]),
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())])
), StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'element in array field f1'):
_merge_type(
StructType([
StructField("f1", ArrayType(LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", ArrayType(DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())])
), StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'value of map field f1'):
_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))])
), StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]))
with self.assertRaisesRegexp(TypeError, 'key of map element in array field f1'):
_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(DoubleType(), LongType())))])
)
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_dayofweek(self):
from pyspark.sql.functions import dayofweek
dt = datetime.datetime(2017, 11, 6)
df = self.spark.createDataFrame([Row(date=dt)])
row = df.select(dayofweek(df.date)).first()
self.assertEqual(row[0], 2)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_repartitionByRange_dataframe(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
df1 = self.spark.createDataFrame(
[(u'Bob', 27, 66.0), (u'Alice', 10, 10.0), (u'Bob', 10, 66.0)], schema)
df2 = self.spark.createDataFrame(
[(u'Alice', 10, 10.0), (u'Bob', 10, 66.0), (u'Bob', 27, 66.0)], schema)
# test repartitionByRange(numPartitions, *cols)
df3 = df1.repartitionByRange(2, "name", "age")
self.assertEqual(df3.rdd.getNumPartitions(), 2)
self.assertEqual(df3.rdd.first(), df2.rdd.first())
self.assertEqual(df3.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(numPartitions, *cols)
df4 = df1.repartitionByRange(3, "name", "age")
self.assertEqual(df4.rdd.getNumPartitions(), 3)
self.assertEqual(df4.rdd.first(), df2.rdd.first())
self.assertEqual(df4.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(*cols)
df5 = df1.repartitionByRange("name", "age")
self.assertEqual(df5.rdd.first(), df2.rdd.first())
self.assertEqual(df5.rdd.take(3), df2.rdd.take(3))
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
with self.assertRaisesRegexp(
TypeError,
'value argument is required when to_replace is not a dictionary.'):
self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
def test_sample(self):
self.assertRaisesRegexp(
TypeError,
"should be a bool, float and number",
lambda: self.spark.range(1).sample())
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample("a"))
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample(seed="abc"))
self.assertRaises(
IllegalArgumentException,
lambda: self.spark.range(1).sample(-1.0))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
with self.sql_conf({"spark.sql.crossJoin.enabled": True}):
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
self.assertEqual(spark.conf.get("hyukjin", None), None)
# This returns 'STATIC' because it's the default value of
# 'spark.sql.sources.partitionOverwriteMode', and `defaultValue` in
# `spark.conf.get` is unset.
self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode"), "STATIC")
# This returns None because 'spark.sql.sources.partitionOverwriteMode' is unset, but
# `defaultValue` in `spark.conf.get` is set to None.
self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode", None), None)
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEquals(count, 4)
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
# The empty bytearray is test for SPARK-21534.
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')],
[bytearray(b'')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
# test for SPARK-16542
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assertCollectSuccess(typecode, value):
row = Row(myarray=array.array(typecode, [value]))
df = self.spark.createDataFrame([row])
self.assertEqual(df.first()["myarray"][0], value)
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assertCollectSuccess('u', u'a')
if sys.version_info[0] < 3:
supported_string_types += ['c']
# test string
assertCollectSuccess('c', 'a')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assertCollectSuccess('f', ctypes.c_float(1e+38).value)
assertCollectSuccess('f', ctypes.c_float(1e-38).value)
assertCollectSuccess('f', ctypes.c_float(1.123456).value)
assertCollectSuccess('d', sys.float_info.max)
assertCollectSuccess('d', sys.float_info.min)
assertCollectSuccess('d', sys.float_info.epsilon)
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assertCollectSuccess(t, max_val - 1)
assertCollectSuccess(t, -max_val)
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
assertCollectSuccess(t, 2 ** (ctypes.sizeof(ctype) * 8) - 1)
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
# `array.typecodes` are not supported in python 2.
if sys.version_info[0] < 3:
all_types = set(['c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'])
else:
all_types = set(array.typecodes)
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
a = array.array(t)
self.spark.createDataFrame([Row(myarray=a)]).collect()
def test_bucketed_write(self):
data = [
(1, "foo", 3.0), (2, "foo", 5.0),
(3, "bar", -1.0), (4, "bar", 6.0),
]
df = self.spark.createDataFrame(data, ["x", "y", "z"])
def count_bucketed_cols(names, table="pyspark_bucket"):
"""Given a sequence of column names and a table name
query the catalog and return number o columns which are
used for bucketing
"""
cols = self.spark.catalog.listColumns(table)
num = len([c for c in cols if c.name in names and c.isBucket])
return num
# Test write with one bucketing column
df.write.bucketBy(3, "x").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write two bucketing columns
df.write.bucketBy(3, "x", "y").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort
df.write.bucketBy(2, "x").sortBy("z").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with a list of columns
df.write.bucketBy(3, ["x", "y"]).mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with a list of columns
(df.write.bucketBy(2, "x")
.sortBy(["y", "z"])
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with multiple columns
(df.write.bucketBy(2, "x")
.sortBy("y", "z")
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
def _to_pandas(self):
from datetime import datetime, date
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())\
.add("dt", DateType()).add("ts", TimestampType())
data = [
(1, "foo", True, 3.0, date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(2, "foo", True, 5.0, None, None),
(3, "bar", False, -1.0, date(2012, 3, 3), datetime(2012, 3, 3, 3, 3, 3)),
(4, "bar", False, 6.0, date(2100, 4, 4), datetime(2100, 4, 4, 4, 4, 4)),
]
df = self.spark.createDataFrame(data, schema)
return df.toPandas()
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_to_pandas(self):
import numpy as np
pdf = self._to_pandas()
types = pdf.dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
self.assertEquals(types[4], np.object) # datetime.date
self.assertEquals(types[5], 'datetime64[ns]')
@unittest.skipIf(_have_pandas, "Required Pandas was found.")
def test_to_pandas_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(ImportError, 'Pandas >= .* must be installed'):
self._to_pandas()
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_to_pandas_avoid_astype(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", IntegerType())
data = [(1, "foo", 16777220), (None, "bar", None)]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.float64) # doesn't convert to np.int32 due to NaN value.
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.float64)
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_create_dataframe_from_pandas_with_timestamp(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
# test types are inferred correctly without specifying schema
df = self.spark.createDataFrame(pdf)
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
# test with schema will accept pdf as input
df = self.spark.createDataFrame(pdf, schema="d date, ts timestamp")
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
@unittest.skipIf(_have_pandas, "Required Pandas was found.")
def test_create_dataframe_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
ImportError,
"(Pandas >= .* must be installed|No module named '?pandas'?)"):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
self.spark.createDataFrame(pdf)
# Regression test for SPARK-23360
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_create_dateframe_from_pandas_with_dst(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({'time': [datetime(2015, 10, 31, 22, 30)]})
df = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df.toPandas())
orig_env_tz = os.environ.get('TZ', None)
try:
tz = 'America/Los_Angeles'
os.environ['TZ'] = tz
time.tzset()
with self.sql_conf({'spark.sql.session.timeZone': tz}):
df = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df.toPandas())
finally:
del os.environ['TZ']
if orig_env_tz is not None:
os.environ['TZ'] = orig_env_tz
time.tzset()
def test_sort_with_nulls_order(self):
from pyspark.sql import functions
df = self.spark.createDataFrame(
[('Tom', 80), (None, 60), ('Alice', 50)], ["name", "height"])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Alice'), Row(name=u'Tom')])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_last('name')).collect(),
[Row(name=u'Alice'), Row(name=u'Tom'), Row(name=None)])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Tom'), Row(name=u'Alice')])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_last('name')).collect(),
[Row(name=u'Tom'), Row(name=u'Alice'), Row(name=None)])
def test_json_sampling_ratio(self):
rdd = self.spark.sparkContext.range(0, 100, 1, 1) \
.map(lambda x: '{"a":0.1}' if x == 1 else '{"a":%s}' % str(x))
schema = self.spark.read.option('inferSchema', True) \
.option('samplingRatio', 0.5) \
.json(rdd).schema
self.assertEquals(schema, StructType([StructField("a", LongType(), True)]))
def test_csv_sampling_ratio(self):
rdd = self.spark.sparkContext.range(0, 100, 1, 1) \
.map(lambda x: '0.1' if x == 1 else str(x))
schema = self.spark.read.option('inferSchema', True)\
.csv(rdd, samplingRatio=0.5).schema
self.assertEquals(schema, StructType([StructField("_c0", IntegerType(), True)]))
def test_checking_csv_header(self):
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.createDataFrame([[1, 1000], [2000, 2]])\
.toDF('f1', 'f2').write.option("header", "true").csv(path)
schema = StructType([
StructField('f2', IntegerType(), nullable=True),
StructField('f1', IntegerType(), nullable=True)])
df = self.spark.read.option('header', 'true').schema(schema)\
.csv(path, enforceSchema=False)
self.assertRaisesRegexp(
Exception,
"CSV header does not conform to the schema",
lambda: df.collect())
finally:
shutil.rmtree(path)
def test_ignore_column_of_all_nulls(self):
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
df = self.spark.createDataFrame([["""{"a":null, "b":1, "c":3.0}"""],
["""{"a":null, "b":null, "c":"string"}"""],
["""{"a":null, "b":null, "c":null}"""]])
df.write.text(path)
schema = StructType([
StructField('b', LongType(), nullable=True),
StructField('c', StringType(), nullable=True)])
readback = self.spark.read.json(path, dropFieldIfAllNull=True)
self.assertEquals(readback.schema, schema)
finally:
shutil.rmtree(path)
# SPARK-24721
@unittest.skipIf(not _test_compiled, _test_not_compiled_message)
def test_datasource_with_udf(self):
from pyspark.sql.functions import udf, lit, col
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.range(1).write.mode("overwrite").format('csv').save(path)
filesource_df = self.spark.read.option('inferSchema', True).csv(path).toDF('i')
datasource_df = self.spark.read \
.format("org.apache.spark.sql.sources.SimpleScanSource") \
.option('from', 0).option('to', 1).load().toDF('i')
datasource_v2_df = self.spark.read \
.format("org.apache.spark.sql.sources.v2.SimpleDataSourceV2") \
.load().toDF('i', 'j')
c1 = udf(lambda x: x + 1, 'int')(lit(1))
c2 = udf(lambda x: x + 1, 'int')(col('i'))
f1 = udf(lambda x: False, 'boolean')(lit(1))
f2 = udf(lambda x: False, 'boolean')(col('i'))
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c1)
expected = df.withColumn('c', lit(2))
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c2)
expected = df.withColumn('c', col('i') + 1)
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
for f in [f1, f2]:
result = df.filter(f)
self.assertEquals(0, result.count())
finally:
shutil.rmtree(path)
def test_repr_behaviors(self):
import re
pattern = re.compile(r'^ *\|', re.MULTILINE)
df = self.spark.createDataFrame([(1, "1"), (22222, "22222")], ("key", "value"))
# test when eager evaluation is enabled and _repr_html_ will not be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """+-----+-----+
|| key|value|
|+-----+-----+
|| 1| 1|
||22222|22222|
|+-----+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected1), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
||222| 222|
|+---+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected2), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df.__repr__())
# test when eager evaluation is enabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>22222</td><td>22222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected1), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>222</td><td>222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected2), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|</table>
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df._repr_html_())
# test when eager evaluation is disabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": False}):
expected = "DataFrame[key: bigint, value: string]"
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
class HiveSparkSubmitTests(SparkSubmitTests):
@classmethod
def setUpClass(cls):
# get a SparkContext to check for availability of Hive
sc = SparkContext('local[4]', cls.__name__)
cls.hive_available = True
try:
sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.hive_available = False
except TypeError:
cls.hive_available = False
finally:
# we don't need this SparkContext for the test
sc.stop()
def setUp(self):
super(HiveSparkSubmitTests, self).setUp()
if not self.hive_available:
self.skipTest("Hive is not available.")
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
self.sparkSubmit + ["--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class SQLTests2(ReusedSQLTestCase):
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
try:
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
finally:
spark.stop()
sc.stop()
class QueryExecutionListenerTests(unittest.TestCase, SQLTestUtils):
# These tests are separate because it uses 'spark.sql.queryExecutionListeners' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"TestQueryExecutionListener.class")
cls.has_listener = bool(glob.glob(os.path.join(SPARK_HOME, filename_pattern)))
if cls.has_listener:
# Note that 'spark.sql.queryExecutionListeners' is a static immutable configuration.
cls.spark = SparkSession.builder \
.master("local[4]") \
.appName(cls.__name__) \
.config(
"spark.sql.queryExecutionListeners",
"org.apache.spark.sql.TestQueryExecutionListener") \
.getOrCreate()
def setUp(self):
if not self.has_listener:
raise self.skipTest(
"'org.apache.spark.sql.TestQueryExecutionListener' is not "
"available. Will skip the related tests.")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def tearDown(self):
self.spark._jvm.OnSuccessCall.clear()
def test_query_execution_listener_on_collect(self):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be called before 'collect'")
self.spark.sql("SELECT * FROM range(1)").collect()
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'collect'")
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
def test_query_execution_listener_on_collect_with_arrow(self):
with self.sql_conf({"spark.sql.execution.arrow.enabled": True}):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be "
"called before 'toPandas'")
self.spark.sql("SELECT * FROM range(1)").toPandas()
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'toPandas'")
class SparkSessionTests(PySparkTestCase):
# This test is separate because it's closely related with session's start and stop.
# See SPARK-23228.
def test_set_jvm_default_session(self):
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
finally:
spark.stop()
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isEmpty())
def test_jvm_default_session_already_set(self):
# Here, we assume there is the default session already set in JVM.
jsession = self.sc._jvm.SparkSession(self.sc._jsc.sc())
self.sc._jvm.SparkSession.setDefaultSession(jsession)
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
# The session should be the same with the exiting one.
self.assertTrue(jsession.equals(spark._jvm.SparkSession.getDefaultSession().get()))
finally:
spark.stop()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_context.stop()
def test_udf_init_shouldnt_initialize_context(self):
from pyspark.sql.functions import UserDefinedFunction
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
cls.hive_available = True
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.hive_available = False
except TypeError:
cls.hive_available = False
os.unlink(cls.tempdir.name)
if cls.hive_available:
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
def setUp(self):
if not self.hive_available:
self.skipTest("Hive is not available.")
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date, datetime
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])
@unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking")
def test_unbounded_frames(self):
from unittest.mock import patch
from pyspark.sql import functions as F
from pyspark.sql import window
import importlib
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
with patch("sys.maxsize", 2 ** 31 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 63 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 127 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
importlib.reload(window)
class DataTypeVerificationTests(unittest.TestCase):
def test_verify_type_exception_msg(self):
self.assertRaisesRegexp(
ValueError,
"test_name",
lambda: _make_type_verifier(StringType(), nullable=False, name="test_name")(None))
schema = StructType([StructField('a', StructType([StructField('b', IntegerType())]))])
self.assertRaisesRegexp(
TypeError,
"field b in field a",
lambda: _make_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [IntegerType(), FloatType(), StringType(), StructType([])]
for data_type in types:
try:
_make_type_verifier(data_type, nullable=True)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = StructType([
StructField('s', StringType(), nullable=False),
StructField('i', IntegerType(), nullable=True)])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", StringType()),
(u"", StringType()),
(1, StringType()),
(1.0, StringType()),
([], StringType()),
({}, StringType()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, BooleanType()),
# Byte
(-(2**7), ByteType()),
(2**7 - 1, ByteType()),
# Short
(-(2**15), ShortType()),
(2**15 - 1, ShortType()),
# Integer
(-(2**31), IntegerType()),
(2**31 - 1, IntegerType()),
# Long
(2**64, LongType()),
# Float & Double
(1.0, FloatType()),
(1.0, DoubleType()),
# Decimal
(decimal.Decimal("1.0"), DecimalType()),
# Binary
(bytearray([1, 2]), BinaryType()),
# Date/Timestamp
(datetime.date(2000, 1, 2), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), TimestampType()),
# Array
([], ArrayType(IntegerType())),
(["1", None], ArrayType(StringType(), containsNull=True)),
([1, 2], ArrayType(IntegerType())),
((1, 2), ArrayType(IntegerType())),
(array.array('h', [1, 2]), ArrayType(IntegerType())),
# Map
({}, MapType(StringType(), IntegerType())),
({"a": 1}, MapType(StringType(), IntegerType())),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=True)),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# String (match anything but None)
(None, StringType(), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, BooleanType(), TypeError),
("True", BooleanType(), TypeError),
([1], BooleanType(), TypeError),
# Byte
(-(2**7) - 1, ByteType(), ValueError),
(2**7, ByteType(), ValueError),
("1", ByteType(), TypeError),
(1.0, ByteType(), TypeError),
# Short
(-(2**15) - 1, ShortType(), ValueError),
(2**15, ShortType(), ValueError),
# Integer
(-(2**31) - 1, IntegerType(), ValueError),
(2**31, IntegerType(), ValueError),
# Float & Double
(1, FloatType(), TypeError),
(1, DoubleType(), TypeError),
# Decimal
(1.0, DecimalType(), TypeError),
(1, DecimalType(), TypeError),
("1.0", DecimalType(), TypeError),
# Binary
(1, BinaryType(), TypeError),
# Date/Timestamp
("2000-01-02", DateType(), TypeError),
(946811040, TimestampType(), TypeError),
# Array
(["1", None], ArrayType(StringType(), containsNull=False), ValueError),
([1, "2"], ArrayType(IntegerType()), TypeError),
# Map
({"a": 1}, MapType(IntegerType(), IntegerType()), TypeError),
({"a": "1"}, MapType(StringType(), IntegerType()), TypeError),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=False),
ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_make_type_verifier(data_type, nullable=False)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_make_type_verifier(data_type, nullable=False)(obj)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class ArrowTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
from datetime import date, datetime
from decimal import Decimal
from distutils.version import LooseVersion
import pyarrow as pa
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.spark.conf.set("spark.sql.session.timeZone", tz)
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# Disable fallback by default to easily detect the failures.
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True),
StructField("6_decimal_t", DecimalType(38, 18), True),
StructField("7_date_t", DateType(), True),
StructField("8_timestamp_t", TimestampType(), True)])
cls.data = [(u"a", 1, 10, 0.2, 2.0, Decimal("2.0"),
date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(u"b", 2, 20, 0.4, 4.0, Decimal("4.0"),
date(2012, 2, 2), datetime(2012, 2, 2, 2, 2, 2)),
(u"c", 3, 30, 0.8, 6.0, Decimal("6.0"),
date(2100, 3, 3), datetime(2100, 3, 3, 3, 3, 3))]
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion("0.10.0") <= LooseVersion(pa.__version__):
cls.schema.add(StructField("9_binary_t", BinaryType(), True))
cls.data[0] = cls.data[0] + (bytearray(b"a"),)
cls.data[1] = cls.data[1] + (bytearray(b"bb"),)
cls.data[2] = cls.data[2] + (bytearray(b"ccc"),)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
def create_pandas_data_frame(self):
import pandas as pd
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
return pd.DataFrame(data=data_dict)
def test_toPandas_fallback_enabled(self):
import pandas as pd
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([({u'a': 1},)], schema=schema)
with QuietTest(self.sc):
with warnings.catch_warnings(record=True) as warns:
pdf = df.toPandas()
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertPandasEqual(pdf, pd.DataFrame({u'map': [{u'a': 1}]}))
def test_toPandas_fallback_disabled(self):
from distutils.version import LooseVersion
import pyarrow as pa
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Unsupported type'):
df.toPandas()
# TODO: remove BinaryType check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
schema = StructType([StructField("binary", BinaryType(), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Unsupported type.*BinaryType'):
df.toPandas()
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def _toPandas_arrow_toggle(self, df):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
pdf = df.toPandas()
pdf_arrow = df.toPandas()
return pdf, pdf_arrow
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
expected = self.create_pandas_data_frame()
self.assertPandasEqual(expected, pdf)
self.assertPandasEqual(expected, pdf_arrow)
def test_toPandas_respect_session_timezone(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
pdf_la, pdf_arrow_la = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_la, pdf_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
pdf_ny, pdf_arrow_ny = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_ny, pdf_ny)
self.assertFalse(pdf_ny.equals(pdf_la))
from pyspark.sql.types import _check_series_convert_timestamps_local_tz
pdf_la_corrected = pdf_la.copy()
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz(
pdf_la_corrected[field.name], timezone)
self.assertPandasEqual(pdf_ny, pdf_la_corrected)
def test_pandas_round_trip(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
def _createDataFrame_toggle(self, pdf, schema=None):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
df_no_arrow = self.spark.createDataFrame(pdf, schema=schema)
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
return df_no_arrow, df_arrow
def test_createDataFrame_toggle(self):
pdf = self.create_pandas_data_frame()
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema)
self.assertEquals(df_no_arrow.collect(), df_arrow.collect())
def test_createDataFrame_respect_session_timezone(self):
from datetime import timedelta
pdf = self.create_pandas_data_frame()
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_la, df_arrow_la = self._createDataFrame_toggle(pdf, schema=self.schema)
result_la = df_no_arrow_la.collect()
result_arrow_la = df_arrow_la.collect()
self.assertEqual(result_la, result_arrow_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_ny, df_arrow_ny = self._createDataFrame_toggle(pdf, schema=self.schema)
result_ny = df_no_arrow_ny.collect()
result_arrow_ny = df_arrow_ny.collect()
self.assertEqual(result_ny, result_arrow_ny)
self.assertNotEqual(result_ny, result_la)
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
result_la_corrected = [Row(**{k: v - timedelta(hours=3) if k == '8_timestamp_t' else v
for k, v in row.asDict().items()})
for row in result_la]
self.assertEqual(result_ny, result_la_corrected)
def test_createDataFrame_with_schema(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(pdf, schema=self.schema)
self.assertEquals(self.schema, df.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_createDataFrame_with_incorrect_schema(self):
pdf = self.create_pandas_data_frame()
fields = list(self.schema)
fields[0], fields[7] = fields[7], fields[0] # swap str with timestamp
wrong_schema = StructType(fields)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, ".*No cast.*string.*timestamp.*"):
self.spark.createDataFrame(pdf, schema=wrong_schema)
def test_createDataFrame_with_names(self):
pdf = self.create_pandas_data_frame()
new_names = list(map(str, range(len(self.schema.fieldNames()))))
# Test that schema as a list of column names gets applied
df = self.spark.createDataFrame(pdf, schema=list(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
# Test that schema as tuple of column names gets applied
df = self.spark.createDataFrame(pdf, schema=tuple(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
def test_createDataFrame_column_name_encoding(self):
import pandas as pd
pdf = pd.DataFrame({u'a': [1]})
columns = self.spark.createDataFrame(pdf).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'a')
columns = self.spark.createDataFrame(pdf, [u'b']).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'b')
def test_createDataFrame_with_single_data_type(self):
import pandas as pd
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, ".*IntegerType.*not supported.*"):
self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema="int")
def test_createDataFrame_does_not_modify_input(self):
import pandas as pd
# Some series get converted for Spark to consume, this makes sure input is unchanged
pdf = self.create_pandas_data_frame()
# Use a nanosecond value to make sure it is not truncated
pdf.ix[0, '8_timestamp_t'] = pd.Timestamp(1)
# Integers with nulls will get NaNs filled with 0 and will be casted
pdf.ix[1, '2_int_t'] = None
pdf_copy = pdf.copy(deep=True)
self.spark.createDataFrame(pdf, schema=self.schema)
self.assertTrue(pdf.equals(pdf_copy))
def test_schema_conversion_roundtrip(self):
from pyspark.sql.types import from_arrow_schema, to_arrow_schema
arrow_schema = to_arrow_schema(self.schema)
schema_rt = from_arrow_schema(arrow_schema)
self.assertEquals(self.schema, schema_rt)
def test_createDataFrame_with_array_type(self):
import pandas as pd
pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [[u"x", u"y"], [u"y", u"z"]]})
df, df_arrow = self._createDataFrame_toggle(pdf)
result = df.collect()
result_arrow = df_arrow.collect()
expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_toPandas_with_array_type(self):
expected = [([1, 2], [u"x", u"y"]), ([3, 4], [u"y", u"z"])]
array_schema = StructType([StructField("a", ArrayType(IntegerType())),
StructField("b", ArrayType(StringType()))])
df = self.spark.createDataFrame(expected, schema=array_schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
result_arrow = [tuple(list(e) for e in rec) for rec in pdf_arrow.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_createDataFrame_with_int_col_names(self):
import numpy as np
import pandas as pd
pdf = pd.DataFrame(np.random.rand(4, 2))
df, df_arrow = self._createDataFrame_toggle(pdf)
pdf_col_names = [str(c) for c in pdf.columns]
self.assertEqual(pdf_col_names, df.columns)
self.assertEqual(pdf_col_names, df_arrow.columns)
def test_createDataFrame_fallback_enabled(self):
import pandas as pd
with QuietTest(self.sc):
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
with warnings.catch_warnings(record=True) as warns:
df = self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertEqual(df.collect(), [Row(a={u'a': 1})])
def test_createDataFrame_fallback_disabled(self):
from distutils.version import LooseVersion
import pandas as pd
import pyarrow as pa
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type'):
self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# TODO: remove BinaryType check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type.*BinaryType'):
self.spark.createDataFrame(
pd.DataFrame([[{'a': b'aaa'}]]), "a: binary")
# Regression test for SPARK-23314
def test_timestamp_dst(self):
import pandas as pd
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
pdf = pd.DataFrame({'time': dt})
df_from_python = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
df_from_pandas = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df_from_python.toPandas())
self.assertPandasEqual(pdf, df_from_pandas.toPandas())
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class PandasUDFTests(ReusedSQLTestCase):
def test_pandas_udf_basic(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, PandasUDFType
udf = pandas_udf(lambda x: x, DoubleType())
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, DoubleType(), PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'double', PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, StructType([StructField("v", DoubleType())]),
PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, returnType='v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_pandas_udf_decorator(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import StructType, StructField, DoubleType
@pandas_udf(DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
schema = StructType([StructField("v", DoubleType())])
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf('v double', PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(returnType='double', functionType=PandasUDFType.SCALAR)
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_udf_wrong_arg(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaises(ParseException):
@pandas_udf('blah')
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid returnType.*None'):
@pandas_udf(functionType=PandasUDFType.SCALAR)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid functionType'):
@pandas_udf('double', 100)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
pandas_udf(lambda: 1, LongType(), PandasUDFType.SCALAR)
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
@pandas_udf(LongType(), PandasUDFType.SCALAR)
def zero_with_type():
return 1
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType='double', functionType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
@pandas_udf(returnType='k int, v double', functionType=PandasUDFType.GROUPED_MAP)
def foo(k, v, w):
return k
def test_stopiteration_in_udf(self):
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
from py4j.protocol import Py4JJavaError
def foo(x):
raise StopIteration()
def foofoo(x, y):
raise StopIteration()
exc_message = "Caught StopIteration thrown from user's code; failing the task"
df = self.spark.range(0, 100)
# plain udf (test for SPARK-23754)
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.withColumn('v', udf(foo)('id')).collect
)
# pandas scalar udf
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.withColumn(
'v', pandas_udf(foo, 'double', PandasUDFType.SCALAR)('id')
).collect
)
# pandas grouped map
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').apply(
pandas_udf(foo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').apply(
pandas_udf(foofoo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
# pandas grouped agg
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').agg(
pandas_udf(foo, 'double', PandasUDFType.GROUPED_AGG)('id')
).collect
)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class ScalarPandasUDFTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.sc.environment["TZ"] = tz
cls.spark.conf.set("spark.sql.session.timeZone", tz)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
@property
def nondeterministic_vectorized_udf(self):
from pyspark.sql.functions import pandas_udf
@pandas_udf('double')
def random_udf(v):
import pandas as pd
import numpy as np
return pd.Series(np.random.random(len(v)))
random_udf = random_udf.asNondeterministic()
return random_udf
def test_vectorized_udf_basic(self):
from pyspark.sql.functions import pandas_udf, col, array
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'),
array(col('id')).alias('array_long'))
f = lambda x: x
str_f = pandas_udf(f, StringType())
int_f = pandas_udf(f, IntegerType())
long_f = pandas_udf(f, LongType())
float_f = pandas_udf(f, FloatType())
double_f = pandas_udf(f, DoubleType())
decimal_f = pandas_udf(f, DecimalType())
bool_f = pandas_udf(f, BooleanType())
array_long_f = pandas_udf(f, ArrayType(LongType()))
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')), array_long_f('array_long'))
self.assertEquals(df.collect(), res.collect())
def test_register_nondeterministic_vectorized_udf_basic(self):
from pyspark.sql.functions import pandas_udf
from pyspark.rdd import PythonEvalType
import random
random_pandas_udf = pandas_udf(
lambda x: random.randint(6, 6) + x, IntegerType()).asNondeterministic()
self.assertEqual(random_pandas_udf.deterministic, False)
self.assertEqual(random_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
nondeterministic_pandas_udf = self.spark.catalog.registerFunction(
"randomPandasUDF", random_pandas_udf)
self.assertEqual(nondeterministic_pandas_udf.deterministic, False)
self.assertEqual(nondeterministic_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
[row] = self.spark.sql("SELECT randomPandasUDF(1)").collect()
self.assertEqual(row[0], 7)
def test_vectorized_udf_null_boolean(self):
from pyspark.sql.functions import pandas_udf, col
data = [(True,), (True,), (None,), (False,)]
schema = StructType().add("bool", BooleanType())
df = self.spark.createDataFrame(data, schema)
bool_f = pandas_udf(lambda x: x, BooleanType())
res = df.select(bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_byte(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("byte", ByteType())
df = self.spark.createDataFrame(data, schema)
byte_f = pandas_udf(lambda x: x, ByteType())
res = df.select(byte_f(col('byte')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_short(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("short", ShortType())
df = self.spark.createDataFrame(data, schema)
short_f = pandas_udf(lambda x: x, ShortType())
res = df.select(short_f(col('short')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_int(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("int", IntegerType())
df = self.spark.createDataFrame(data, schema)
int_f = pandas_udf(lambda x: x, IntegerType())
res = df.select(int_f(col('int')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_long(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("long", LongType())
df = self.spark.createDataFrame(data, schema)
long_f = pandas_udf(lambda x: x, LongType())
res = df.select(long_f(col('long')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_float(self):
from pyspark.sql.functions import pandas_udf, col
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("float", FloatType())
df = self.spark.createDataFrame(data, schema)
float_f = pandas_udf(lambda x: x, FloatType())
res = df.select(float_f(col('float')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_double(self):
from pyspark.sql.functions import pandas_udf, col
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("double", DoubleType())
df = self.spark.createDataFrame(data, schema)
double_f = pandas_udf(lambda x: x, DoubleType())
res = df.select(double_f(col('double')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_decimal(self):
from decimal import Decimal
from pyspark.sql.functions import pandas_udf, col
data = [(Decimal(3.0),), (Decimal(5.0),), (Decimal(-1.0),), (None,)]
schema = StructType().add("decimal", DecimalType(38, 18))
df = self.spark.createDataFrame(data, schema)
decimal_f = pandas_udf(lambda x: x, DecimalType(38, 18))
res = df.select(decimal_f(col('decimal')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_string(self):
from pyspark.sql.functions import pandas_udf, col
data = [("foo",), (None,), ("bar",), ("bar",)]
schema = StructType().add("str", StringType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, StringType())
res = df.select(str_f(col('str')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_string_in_udf(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
str_f = pandas_udf(lambda x: pd.Series(map(str, x)), StringType())
actual = df.select(str_f(col('id')))
expected = df.select(col('id').cast('string'))
self.assertEquals(expected.collect(), actual.collect())
def test_vectorized_udf_datatype_string(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'))
f = lambda x: x
str_f = pandas_udf(f, 'string')
int_f = pandas_udf(f, 'integer')
long_f = pandas_udf(f, 'long')
float_f = pandas_udf(f, 'float')
double_f = pandas_udf(f, 'double')
decimal_f = pandas_udf(f, 'decimal(38, 18)')
bool_f = pandas_udf(f, 'boolean')
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_binary(self):
from distutils.version import LooseVersion
import pyarrow as pa
from pyspark.sql.functions import pandas_udf, col
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*BinaryType'):
pandas_udf(lambda x: x, BinaryType())
else:
data = [(bytearray(b"a"),), (None,), (bytearray(b"bb"),), (bytearray(b"ccc"),)]
schema = StructType().add("binary", BinaryType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, BinaryType())
res = df.select(str_f(col('binary')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_array_type(self):
from pyspark.sql.functions import pandas_udf, col
data = [([1, 2],), ([3, 4],)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_null_array(self):
from pyspark.sql.functions import pandas_udf, col
data = [([1, 2],), (None,), (None,), ([3, 4],), (None,)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_complex(self):
from pyspark.sql.functions import pandas_udf, col, expr
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'),
col('id').cast('double').alias('c'))
add = pandas_udf(lambda x, y: x + y, IntegerType())
power2 = pandas_udf(lambda x: 2 ** x, IntegerType())
mul = pandas_udf(lambda x, y: x * y, DoubleType())
res = df.select(add(col('a'), col('b')), power2(col('a')), mul(col('b'), col('c')))
expected = df.select(expr('a + b'), expr('power(2, a)'), expr('b * c'))
self.assertEquals(expected.collect(), res.collect())
def test_vectorized_udf_exception(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
raise_exception = pandas_udf(lambda x: x * (1 / 0), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'division( or modulo)? by zero'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_invalid_length(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
raise_exception = pandas_udf(lambda _: pd.Series(1), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Result vector from pandas_udf was not the required length'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_chained(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
f = pandas_udf(lambda x: x + 1, LongType())
g = pandas_udf(lambda x: x - 1, LongType())
res = df.select(g(f(col('id'))))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_wrong_return_type(self):
from pyspark.sql.functions import pandas_udf, col
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x * 1.0, MapType(LongType(), LongType()))
def test_vectorized_udf_return_scalar(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
f = pandas_udf(lambda x: 1.0, DoubleType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Return.*type.*Series'):
df.select(f(col('id'))).collect()
def test_vectorized_udf_decorator(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
@pandas_udf(returnType=LongType())
def identity(x):
return x
res = df.select(identity(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_empty_partition(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda x: x, LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_varargs(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda *v: v[0], LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_unsupported_types(self):
from pyspark.sql.functions import pandas_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x, MapType(StringType(), IntegerType()))
def test_vectorized_udf_dates(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import date
schema = StructType().add("idx", LongType()).add("date", DateType())
data = [(0, date(1969, 1, 1),),
(1, date(2012, 2, 2),),
(2, None,),
(3, date(2100, 4, 4),)]
df = self.spark.createDataFrame(data, schema=schema)
date_copy = pandas_udf(lambda t: t, returnType=DateType())
df = df.withColumn("date_copy", date_copy(col("date")))
@pandas_udf(returnType=StringType())
def check_data(idx, date, date_copy):
import pandas as pd
msgs = []
is_equal = date.isnull()
for i in range(len(idx)):
if (is_equal[i] and data[idx[i]][1] is None) or \
date[i] == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"date values are not equal (date='%s': data[%d][1]='%s')"
% (date[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data",
check_data(col("idx"), col("date"), col("date_copy"))).collect()
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "date" col
self.assertEquals(data[i][1], result[i][2]) # "date_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_timestamps(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import datetime
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(0, datetime(1969, 1, 1, 1, 1, 1)),
(1, datetime(2012, 2, 2, 2, 2, 2)),
(2, None),
(3, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
# Check that a timestamp passed through a pandas_udf will not be altered by timezone calc
f_timestamp_copy = pandas_udf(lambda t: t, returnType=TimestampType())
df = df.withColumn("timestamp_copy", f_timestamp_copy(col("timestamp")))
@pandas_udf(returnType=StringType())
def check_data(idx, timestamp, timestamp_copy):
import pandas as pd
msgs = []
is_equal = timestamp.isnull() # use this array to check values are equal
for i in range(len(idx)):
# Check that timestamps are as expected in the UDF
if (is_equal[i] and data[idx[i]][1] is None) or \
timestamp[i].to_pydatetime() == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"timestamp values are not equal (timestamp='%s': data[%d][1]='%s')"
% (timestamp[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data", check_data(col("idx"), col("timestamp"),
col("timestamp_copy"))).collect()
# Check that collection values are correct
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "timestamp" col
self.assertEquals(data[i][1], result[i][2]) # "timestamp_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_return_timestamp_tz(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
@pandas_udf(returnType=TimestampType())
def gen_timestamps(id):
ts = [pd.Timestamp(i, unit='D', tz='America/Los_Angeles') for i in id]
return pd.Series(ts)
result = df.withColumn("ts", gen_timestamps(col("id"))).collect()
spark_ts_t = TimestampType()
for r in result:
i, ts = r
ts_tz = pd.Timestamp(i, unit='D', tz='America/Los_Angeles').to_pydatetime()
expected = spark_ts_t.fromInternal(spark_ts_t.toInternal(ts_tz))
self.assertEquals(expected, ts)
def test_vectorized_udf_check_config(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 3}):
df = self.spark.range(10, numPartitions=1)
@pandas_udf(returnType=LongType())
def check_records_per_batch(x):
return pd.Series(x.size).repeat(x.size)
result = df.select(check_records_per_batch(col("id"))).collect()
for (r,) in result:
self.assertTrue(r <= 3)
def test_vectorized_udf_timestamps_respect_session_timezone(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import datetime
import pandas as pd
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(1, datetime(1969, 1, 1, 1, 1, 1)),
(2, datetime(2012, 2, 2, 2, 2, 2)),
(3, None),
(4, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
f_timestamp_copy = pandas_udf(lambda ts: ts, TimestampType())
internal_value = pandas_udf(
lambda ts: ts.apply(lambda ts: ts.value if ts is not pd.NaT else None), LongType())
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_la = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_la = df_la.select(col("idx"), col("internal_value")).collect()
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
diff = 3 * 60 * 60 * 1000 * 1000 * 1000
result_la_corrected = \
df_la.select(col("idx"), col("tscopy"), col("internal_value") + diff).collect()
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_ny = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_ny = df_ny.select(col("idx"), col("tscopy"), col("internal_value")).collect()
self.assertNotEqual(result_ny, result_la)
self.assertEqual(result_ny, result_la_corrected)
def test_nondeterministic_vectorized_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
from pyspark.sql.functions import udf, pandas_udf, col
@pandas_udf('double')
def plus_ten(v):
return v + 10
random_udf = self.nondeterministic_vectorized_udf
df = self.spark.range(10).withColumn('rand', random_udf(col('id')))
result1 = df.withColumn('plus_ten(rand)', plus_ten(df['rand'])).toPandas()
self.assertEqual(random_udf.deterministic, False)
self.assertTrue(result1['plus_ten(rand)'].equals(result1['rand'] + 10))
def test_nondeterministic_vectorized_udf_in_aggregate(self):
from pyspark.sql.functions import pandas_udf, sum
df = self.spark.range(10)
random_udf = self.nondeterministic_vectorized_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.groupby(df.id).agg(sum(random_udf(df.id))).collect()
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.agg(sum(random_udf(df.id))).collect()
def test_register_vectorized_udf_basic(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, col, expr
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'))
original_add = pandas_udf(lambda x, y: x + y, IntegerType())
self.assertEqual(original_add.deterministic, True)
self.assertEqual(original_add.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
new_add = self.spark.catalog.registerFunction("add1", original_add)
res1 = df.select(new_add(col('a'), col('b')))
res2 = self.spark.sql(
"SELECT add1(t.a, t.b) FROM (SELECT id as a, id as b FROM range(10)) t")
expected = df.select(expr('a + b'))
self.assertEquals(expected.collect(), res1.collect())
self.assertEquals(expected.collect(), res2.collect())
# Regression test for SPARK-23314
def test_timestamp_dst(self):
from pyspark.sql.functions import pandas_udf
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda x: x, 'timestamp')
result = df.withColumn('time', foo_udf(df.time))
self.assertEquals(df.collect(), result.collect())
@unittest.skipIf(sys.version_info[:2] < (3, 5), "Type hints are supported from Python 3.5.")
def test_type_annotation(self):
from pyspark.sql.functions import pandas_udf
# Regression test to check if type hints can be used. See SPARK-23569.
# Note that it throws an error during compilation in lower Python versions if 'exec'
# is not used. Also, note that we explicitly use another dictionary to avoid modifications
# in the current 'locals()'.
#
# Hyukjin: I think it's an ugly way to test issues about syntax specific in
# higher versions of Python, which we shouldn't encourage. This was the last resort
# I could come up with at that time.
_locals = {}
exec(
"import pandas as pd\ndef noop(col: pd.Series) -> pd.Series: return col",
_locals)
df = self.spark.range(1).select(pandas_udf(f=_locals['noop'], returnType='bigint')('id'))
self.assertEqual(df.first()[0], 0)
def test_mixed_udf(self):
import pandas as pd
from pyspark.sql.functions import col, udf, pandas_udf
df = self.spark.range(0, 1).toDF('v')
# Test mixture of multiple UDFs and Pandas UDFs.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
@pandas_udf('int')
def f2(x):
assert type(x) == pd.Series
return x + 10
@udf('int')
def f3(x):
assert type(x) == int
return x + 100
@pandas_udf('int')
def f4(x):
assert type(x) == pd.Series
return x + 1000
# Test single expression with chained UDFs
df_chained_1 = df.withColumn('f2_f1', f2(f1(df['v'])))
df_chained_2 = df.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
df_chained_3 = df.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(df['v'])))))
df_chained_4 = df.withColumn('f4_f2_f1', f4(f2(f1(df['v']))))
df_chained_5 = df.withColumn('f4_f3_f1', f4(f3(f1(df['v']))))
expected_chained_1 = df.withColumn('f2_f1', df['v'] + 11)
expected_chained_2 = df.withColumn('f3_f2_f1', df['v'] + 111)
expected_chained_3 = df.withColumn('f4_f3_f2_f1', df['v'] + 1111)
expected_chained_4 = df.withColumn('f4_f2_f1', df['v'] + 1011)
expected_chained_5 = df.withColumn('f4_f3_f1', df['v'] + 1101)
self.assertEquals(expected_chained_1.collect(), df_chained_1.collect())
self.assertEquals(expected_chained_2.collect(), df_chained_2.collect())
self.assertEquals(expected_chained_3.collect(), df_chained_3.collect())
self.assertEquals(expected_chained_4.collect(), df_chained_4.collect())
self.assertEquals(expected_chained_5.collect(), df_chained_5.collect())
# Test multiple mixed UDF expressions in a single projection
df_multi_1 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(col('f1'))) \
.withColumn('f3_f1', f3(col('f1'))) \
.withColumn('f4_f1', f4(col('f1'))) \
.withColumn('f3_f2', f3(col('f2'))) \
.withColumn('f4_f2', f4(col('f2'))) \
.withColumn('f4_f3', f4(col('f3'))) \
.withColumn('f3_f2_f1', f3(col('f2_f1'))) \
.withColumn('f4_f2_f1', f4(col('f2_f1'))) \
.withColumn('f4_f3_f1', f4(col('f3_f1'))) \
.withColumn('f4_f3_f2', f4(col('f3_f2'))) \
.withColumn('f4_f3_f2_f1', f4(col('f3_f2_f1')))
# Test mixed udfs in a single expression
df_multi_2 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(f1(col('v')))) \
.withColumn('f3_f1', f3(f1(col('v')))) \
.withColumn('f4_f1', f4(f1(col('v')))) \
.withColumn('f3_f2', f3(f2(col('v')))) \
.withColumn('f4_f2', f4(f2(col('v')))) \
.withColumn('f4_f3', f4(f3(col('v')))) \
.withColumn('f3_f2_f1', f3(f2(f1(col('v'))))) \
.withColumn('f4_f2_f1', f4(f2(f1(col('v'))))) \
.withColumn('f4_f3_f1', f4(f3(f1(col('v'))))) \
.withColumn('f4_f3_f2', f4(f3(f2(col('v'))))) \
.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(col('v'))))))
expected = df \
.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f4', df['v'] + 1000) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f4_f1', df['v'] + 1001) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f4_f2', df['v'] + 1010) \
.withColumn('f4_f3', df['v'] + 1100) \
.withColumn('f3_f2_f1', df['v'] + 111) \
.withColumn('f4_f2_f1', df['v'] + 1011) \
.withColumn('f4_f3_f1', df['v'] + 1101) \
.withColumn('f4_f3_f2', df['v'] + 1110) \
.withColumn('f4_f3_f2_f1', df['v'] + 1111)
self.assertEquals(expected.collect(), df_multi_1.collect())
self.assertEquals(expected.collect(), df_multi_2.collect())
def test_mixed_udf_and_sql(self):
import pandas as pd
from pyspark.sql import Column
from pyspark.sql.functions import udf, pandas_udf
df = self.spark.range(0, 1).toDF('v')
# Test mixture of UDFs, Pandas UDFs and SQL expression.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
def f2(x):
assert type(x) == Column
return x + 10
@pandas_udf('int')
def f3(x):
assert type(x) == pd.Series
return x + 100
df1 = df.withColumn('f1', f1(df['v'])) \
.withColumn('f2', f2(df['v'])) \
.withColumn('f3', f3(df['v'])) \
.withColumn('f1_f2', f1(f2(df['v']))) \
.withColumn('f1_f3', f1(f3(df['v']))) \
.withColumn('f2_f1', f2(f1(df['v']))) \
.withColumn('f2_f3', f2(f3(df['v']))) \
.withColumn('f3_f1', f3(f1(df['v']))) \
.withColumn('f3_f2', f3(f2(df['v']))) \
.withColumn('f1_f2_f3', f1(f2(f3(df['v'])))) \
.withColumn('f1_f3_f2', f1(f3(f2(df['v'])))) \
.withColumn('f2_f1_f3', f2(f1(f3(df['v'])))) \
.withColumn('f2_f3_f1', f2(f3(f1(df['v'])))) \
.withColumn('f3_f1_f2', f3(f1(f2(df['v'])))) \
.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
expected = df.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f1_f2', df['v'] + 11) \
.withColumn('f1_f3', df['v'] + 101) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f2_f3', df['v'] + 110) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f1_f2_f3', df['v'] + 111) \
.withColumn('f1_f3_f2', df['v'] + 111) \
.withColumn('f2_f1_f3', df['v'] + 111) \
.withColumn('f2_f3_f1', df['v'] + 111) \
.withColumn('f3_f1_f2', df['v'] + 111) \
.withColumn('f3_f2_f1', df['v'] + 111)
self.assertEquals(expected.collect(), df1.collect())
# SPARK-24721
@unittest.skipIf(not _test_compiled, _test_not_compiled_message)
def test_datasource_with_udf(self):
# Same as SQLTests.test_datasource_with_udf, but with Pandas UDF
# This needs to a separate test because Arrow dependency is optional
import pandas as pd
import numpy as np
from pyspark.sql.functions import pandas_udf, lit, col
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.range(1).write.mode("overwrite").format('csv').save(path)
filesource_df = self.spark.read.option('inferSchema', True).csv(path).toDF('i')
datasource_df = self.spark.read \
.format("org.apache.spark.sql.sources.SimpleScanSource") \
.option('from', 0).option('to', 1).load().toDF('i')
datasource_v2_df = self.spark.read \
.format("org.apache.spark.sql.sources.v2.SimpleDataSourceV2") \
.load().toDF('i', 'j')
c1 = pandas_udf(lambda x: x + 1, 'int')(lit(1))
c2 = pandas_udf(lambda x: x + 1, 'int')(col('i'))
f1 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(lit(1))
f2 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(col('i'))
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c1)
expected = df.withColumn('c', lit(2))
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c2)
expected = df.withColumn('c', col('i') + 1)
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
for f in [f1, f2]:
result = df.filter(f)
self.assertEquals(0, result.count())
finally:
shutil.rmtree(path)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class GroupedMapPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i) for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))).drop('vs')
def test_supported_types(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, array, col
df = self.data.withColumn("arr", array(col("id")))
# Different forms of group map pandas UDF, results of these are the same
output_schema = StructType(
[StructField('id', LongType()),
StructField('v', IntegerType()),
StructField('arr', ArrayType(LongType())),
StructField('v1', DoubleType()),
StructField('v2', LongType())])
udf1 = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf2 = pandas_udf(
lambda _, pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf3 = pandas_udf(
lambda key, pdf: pdf.assign(id=key[0], v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
result1 = df.groupby('id').apply(udf1).sort('id').toPandas()
expected1 = df.toPandas().groupby('id').apply(udf1.func).reset_index(drop=True)
result2 = df.groupby('id').apply(udf2).sort('id').toPandas()
expected2 = expected1
result3 = df.groupby('id').apply(udf3).sort('id').toPandas()
expected3 = expected1
self.assertPandasEqual(expected1, result1)
self.assertPandasEqual(expected2, result2)
self.assertPandasEqual(expected3, result3)
def test_array_type_correct(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, array, col
df = self.data.withColumn("arr", array(col("id"))).repartition(1, "id")
output_schema = StructType(
[StructField('id', LongType()),
StructField('v', IntegerType()),
StructField('arr', ArrayType(LongType()))])
udf = pandas_udf(
lambda pdf: pdf,
output_schema,
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(udf.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_register_grouped_map_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
foo_udf = pandas_udf(lambda x: x, "id long", PandasUDFType.GROUPED_MAP)
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'f must be either SQL_BATCHED_UDF or '
'SQL_SCALAR_PANDAS_UDF'):
self.spark.catalog.registerFunction("foo_udf", foo_udf)
def test_decorator(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
def foo(pdf):
return pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_coerce(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
foo = pandas_udf(
lambda pdf: pdf,
'id long, v double',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
expected = expected.assign(v=expected.v.astype('float64'))
self.assertPandasEqual(expected, result)
def test_complex_groupby(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby(col('id') % 2 == 0).apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = pdf.groupby(pdf['id'] % 2 == 0).apply(normalize.func)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
self.assertPandasEqual(expected, result)
def test_empty_groupby(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby().apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = normalize.func(pdf)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
self.assertPandasEqual(expected, result)
def test_datatype_string(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
foo_udf = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo_udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo_udf.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_wrong_return_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*MapType'):
pandas_udf(
lambda pdf: pdf,
'id long, v map<int, int>',
PandasUDFType.GROUPED_MAP)
def test_wrong_args(self):
from pyspark.sql.functions import udf, pandas_udf, sum, PandasUDFType
df = self.data
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(lambda x: x)
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(udf(lambda x: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(sum(df.v))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(df.v + 1)
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
df.groupby('id').apply(
pandas_udf(lambda: 1, StructType([StructField("d", DoubleType())])))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(pandas_udf(lambda x, y: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf.*GROUPED_MAP'):
df.groupby('id').apply(
pandas_udf(lambda x, y: x, DoubleType(), PandasUDFType.SCALAR))
def test_unsupported_types(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
schema = StructType(
[StructField("id", LongType(), True),
StructField("map", MapType(StringType(), IntegerType()), True)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*MapType'):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
schema = StructType(
[StructField("id", LongType(), True),
StructField("arr_ts", ArrayType(TimestampType()), True)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*ArrayType.*TimestampType'):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
# Regression test for SPARK-23314
def test_timestamp_dst(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda pdf: pdf, 'time timestamp', PandasUDFType.GROUPED_MAP)
result = df.groupby('time').apply(foo_udf).sort('time')
self.assertPandasEqual(df.toPandas(), result.toPandas())
def test_udf_with_key(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
pdf = df.toPandas()
def foo1(key, pdf):
import numpy as np
assert type(key) == tuple
assert type(key[0]) == np.int64
return pdf.assign(v1=key[0],
v2=pdf.v * key[0],
v3=pdf.v * pdf.id,
v4=pdf.v * pdf.id.mean())
def foo2(key, pdf):
import numpy as np
assert type(key) == tuple
assert type(key[0]) == np.int64
assert type(key[1]) == np.int32
return pdf.assign(v1=key[0],
v2=key[1],
v3=pdf.v * key[0],
v4=pdf.v + key[1])
def foo3(key, pdf):
assert type(key) == tuple
assert len(key) == 0
return pdf.assign(v1=pdf.v * pdf.id)
# v2 is int because numpy.int64 * pd.Series<int32> results in pd.Series<int32>
# v3 is long because pd.Series<int64> * pd.Series<int32> results in pd.Series<int64>
udf1 = pandas_udf(
foo1,
'id long, v int, v1 long, v2 int, v3 long, v4 double',
PandasUDFType.GROUPED_MAP)
udf2 = pandas_udf(
foo2,
'id long, v int, v1 long, v2 int, v3 int, v4 int',
PandasUDFType.GROUPED_MAP)
udf3 = pandas_udf(
foo3,
'id long, v int, v1 long',
PandasUDFType.GROUPED_MAP)
# Test groupby column
result1 = df.groupby('id').apply(udf1).sort('id', 'v').toPandas()
expected1 = pdf.groupby('id')\
.apply(lambda x: udf1.func((x.id.iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected1, result1)
# Test groupby expression
result2 = df.groupby(df.id % 2).apply(udf1).sort('id', 'v').toPandas()
expected2 = pdf.groupby(pdf.id % 2)\
.apply(lambda x: udf1.func((x.id.iloc[0] % 2,), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected2, result2)
# Test complex groupby
result3 = df.groupby(df.id, df.v % 2).apply(udf2).sort('id', 'v').toPandas()
expected3 = pdf.groupby([pdf.id, pdf.v % 2])\
.apply(lambda x: udf2.func((x.id.iloc[0], (x.v % 2).iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected3, result3)
# Test empty groupby
result4 = df.groupby().apply(udf3).sort('id', 'v').toPandas()
expected4 = udf3.func((), pdf)
self.assertPandasEqual(expected4, result4)
def test_column_order(self):
from collections import OrderedDict
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
# Helper function to set column names from a list
def rename_pdf(pdf, names):
pdf.rename(columns={old: new for old, new in
zip(pd_result.columns, names)}, inplace=True)
df = self.data
grouped_df = df.groupby('id')
grouped_pdf = df.toPandas().groupby('id')
# Function returns a pdf with required column names, but order could be arbitrary using dict
def change_col_order(pdf):
# Constructing a DataFrame from a dict should result in the same order,
# but use from_items to ensure the pdf column order is different than schema
return pd.DataFrame.from_items([
('id', pdf.id),
('u', pdf.v * 2),
('v', pdf.v)])
ordered_udf = pandas_udf(
change_col_order,
'id long, v int, u int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by name from the pdf
result = grouped_df.apply(ordered_udf).sort('id', 'v')\
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(change_col_order)
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
# Function returns a pdf with positional columns, indexed by range
def range_col_order(pdf):
# Create a DataFrame with positional columns, fix types to long
return pd.DataFrame(list(zip(pdf.id, pdf.v * 3, pdf.v)), dtype='int64')
range_udf = pandas_udf(
range_col_order,
'id long, u long, v long',
PandasUDFType.GROUPED_MAP
)
# The UDF result uses positional columns from the pdf
result = grouped_df.apply(range_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(range_col_order)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
# Function returns a pdf with columns indexed with integers
def int_index(pdf):
return pd.DataFrame(OrderedDict([(0, pdf.id), (1, pdf.v * 4), (2, pdf.v)]))
int_index_udf = pandas_udf(
int_index,
'id long, u int, v int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by position of integer index
result = grouped_df.apply(int_index_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(int_index)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def column_name_typo(pdf):
return pd.DataFrame({'iid': pdf.id, 'v': pdf.v})
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def invalid_positional_types(pdf):
return pd.DataFrame([(u'a', 1.2)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "KeyError: 'id'"):
grouped_df.apply(column_name_typo).collect()
with self.assertRaisesRegexp(Exception, "No cast implemented"):
grouped_df.apply(invalid_positional_types).collect()
def test_positional_assignment_conf(self):
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
with self.sql_conf({"spark.sql.execution.pandas.groupedMap.assignColumnsByPosition": True}):
@pandas_udf("a string, b float", PandasUDFType.GROUPED_MAP)
def foo(_):
return pd.DataFrame([('hi', 1)], columns=['x', 'y'])
df = self.data
result = df.groupBy('id').apply(foo).select('a', 'b').collect()
for r in result:
self.assertEqual(r.a, 'hi')
self.assertEqual(r.b, 1)
def test_self_join_with_pandas(self):
import pyspark.sql.functions as F
@F.pandas_udf('key long, col string', F.PandasUDFType.GROUPED_MAP)
def dummy_pandas_udf(df):
return df[['key', 'col']]
df = self.spark.createDataFrame([Row(key=1, col='A'), Row(key=1, col='B'),
Row(key=2, col='C')])
df_with_pandas = df.groupBy('key').apply(dummy_pandas_udf)
# this was throwing an AnalysisException before SPARK-24208
res = df_with_pandas.alias('temp0').join(df_with_pandas.alias('temp1'),
F.col('temp0.key') == F.col('temp1.key'))
self.assertEquals(res.count(), 5)
def test_mixed_scalar_udfs_followed_by_grouby_apply(self):
import pandas as pd
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
df = self.spark.range(0, 10).toDF('v1')
df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \
.withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))
result = df.groupby() \
.apply(pandas_udf(lambda x: pd.DataFrame([x.sum().sum()]),
'sum int',
PandasUDFType.GROUPED_MAP))
self.assertEquals(result.collect()[0]['sum'], 165)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class GroupedAggPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
from pyspark.sql.functions import udf
@udf('double')
def plus_one(v):
assert isinstance(v, (int, float))
return v + 1
return plus_one
@property
def pandas_scalar_plus_two(self):
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.SCALAR)
def plus_two(v):
assert isinstance(v, pd.Series)
return v + 2
return plus_two
@property
def pandas_agg_mean_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_sum_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def sum(v):
return v.sum()
return sum
@property
def pandas_agg_weighted_mean_udf(self):
import numpy as np
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def weighted_mean(v, w):
return np.average(v, weights=w)
return weighted_mean
def test_manual(self):
from pyspark.sql.functions import pandas_udf, array
df = self.data
sum_udf = self.pandas_agg_sum_udf
mean_udf = self.pandas_agg_mean_udf
mean_arr_udf = pandas_udf(
self.pandas_agg_mean_udf.func,
ArrayType(self.pandas_agg_mean_udf.returnType),
self.pandas_agg_mean_udf.evalType)
result1 = df.groupby('id').agg(
sum_udf(df.v),
mean_udf(df.v),
mean_arr_udf(array(df.v))).sort('id')
expected1 = self.spark.createDataFrame(
[[0, 245.0, 24.5, [24.5]],
[1, 255.0, 25.5, [25.5]],
[2, 265.0, 26.5, [26.5]],
[3, 275.0, 27.5, [27.5]],
[4, 285.0, 28.5, [28.5]],
[5, 295.0, 29.5, [29.5]],
[6, 305.0, 30.5, [30.5]],
[7, 315.0, 31.5, [31.5]],
[8, 325.0, 32.5, [32.5]],
[9, 335.0, 33.5, [33.5]]],
['id', 'sum(v)', 'avg(v)', 'avg(array(v))'])
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_basic(self):
from pyspark.sql.functions import col, lit, sum, mean
df = self.data
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
# Groupby one column and aggregate one UDF with literal
result1 = df.groupby('id').agg(weighted_mean_udf(df.v, lit(1.0))).sort('id')
expected1 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
# Groupby one expression and aggregate one UDF with literal
result2 = df.groupby((col('id') + 1)).agg(weighted_mean_udf(df.v, lit(1.0)))\
.sort(df.id + 1)
expected2 = df.groupby((col('id') + 1))\
.agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort(df.id + 1)
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
# Groupby one column and aggregate one UDF without literal
result3 = df.groupby('id').agg(weighted_mean_udf(df.v, df.w)).sort('id')
expected3 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, w)')).sort('id')
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
# Groupby one expression and aggregate one UDF without literal
result4 = df.groupby((col('id') + 1).alias('id'))\
.agg(weighted_mean_udf(df.v, df.w))\
.sort('id')
expected4 = df.groupby((col('id') + 1).alias('id'))\
.agg(mean(df.v).alias('weighted_mean(v, w)'))\
.sort('id')
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
def test_unsupported_types(self):
from pyspark.sql.types import DoubleType, MapType
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
pandas_udf(
lambda x: x,
ArrayType(ArrayType(TimestampType())),
PandasUDFType.GROUPED_AGG)
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf('mean double, std double', PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return v.mean(), v.std()
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf(MapType(DoubleType(), DoubleType()), PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return {v.mean(): v.std()}
def test_alias(self):
from pyspark.sql.functions import mean
df = self.data
mean_udf = self.pandas_agg_mean_udf
result1 = df.groupby('id').agg(mean_udf(df.v).alias('mean_alias'))
expected1 = df.groupby('id').agg(mean(df.v).alias('mean_alias'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
"""
Test mixing group aggregate pandas UDF with sql expression.
"""
from pyspark.sql.functions import sum, mean
df = self.data
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF with sql expression
result1 = (df.groupby('id')
.agg(sum_udf(df.v) + 1)
.sort('id'))
expected1 = (df.groupby('id')
.agg(sum(df.v) + 1)
.sort('id'))
# Mix group aggregate pandas UDF with sql expression (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(df.v + 1))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(df.v + 1))
.sort('id'))
# Wrap group aggregate pandas UDF with two sql expressions
result3 = (df.groupby('id')
.agg(sum_udf(df.v + 1) + 2)
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(df.v + 1) + 2)
.sort('id'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
def test_mixed_udfs(self):
"""
Test mixing group aggregate pandas UDF with python UDF and scalar pandas UDF.
"""
from pyspark.sql.functions import sum, mean
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF and python UDF
result1 = (df.groupby('id')
.agg(plus_one(sum_udf(df.v)))
.sort('id'))
expected1 = (df.groupby('id')
.agg(plus_one(sum(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and python UDF (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(plus_one(df.v)))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(plus_one(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF
result3 = (df.groupby('id')
.agg(sum_udf(plus_two(df.v)))
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(plus_two(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF (order swapped)
result4 = (df.groupby('id')
.agg(plus_two(sum_udf(df.v)))
.sort('id'))
expected4 = (df.groupby('id')
.agg(plus_two(sum(df.v)))
.sort('id'))
# Wrap group aggregate pandas UDF with two python UDFs and use python UDF in groupby
result5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum_udf(plus_one(df.v))))
.sort('plus_one(id)'))
expected5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum(plus_one(df.v))))
.sort('plus_one(id)'))
# Wrap group aggregate pandas UDF with two scala pandas UDF and user scala pandas UDF in
# groupby
result6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum_udf(plus_two(df.v))))
.sort('plus_two(id)'))
expected6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum(plus_two(df.v))))
.sort('plus_two(id)'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
self.assertPandasEqual(expected5.toPandas(), result5.toPandas())
self.assertPandasEqual(expected6.toPandas(), result6.toPandas())
def test_multiple_udfs(self):
"""
Test multiple group aggregate pandas UDFs in one agg function.
"""
from pyspark.sql.functions import col, lit, sum, mean
df = self.data
mean_udf = self.pandas_agg_mean_udf
sum_udf = self.pandas_agg_sum_udf
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
result1 = (df.groupBy('id')
.agg(mean_udf(df.v),
sum_udf(df.v),
weighted_mean_udf(df.v, df.w))
.sort('id')
.toPandas())
expected1 = (df.groupBy('id')
.agg(mean(df.v),
sum(df.v),
mean(df.v).alias('weighted_mean(v, w)'))
.sort('id')
.toPandas())
self.assertPandasEqual(expected1, result1)
def test_complex_groupby(self):
from pyspark.sql.functions import lit, sum
df = self.data
sum_udf = self.pandas_agg_sum_udf
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
# groupby one expression
result1 = df.groupby(df.v % 2).agg(sum_udf(df.v))
expected1 = df.groupby(df.v % 2).agg(sum(df.v))
# empty groupby
result2 = df.groupby().agg(sum_udf(df.v))
expected2 = df.groupby().agg(sum(df.v))
# groupby one column and one sql expression
result3 = df.groupby(df.id, df.v % 2).agg(sum_udf(df.v)).orderBy(df.id, df.v % 2)
expected3 = df.groupby(df.id, df.v % 2).agg(sum(df.v)).orderBy(df.id, df.v % 2)
# groupby one python UDF
result4 = df.groupby(plus_one(df.id)).agg(sum_udf(df.v))
expected4 = df.groupby(plus_one(df.id)).agg(sum(df.v))
# groupby one scalar pandas UDF
result5 = df.groupby(plus_two(df.id)).agg(sum_udf(df.v))
expected5 = df.groupby(plus_two(df.id)).agg(sum(df.v))
# groupby one expression and one python UDF
result6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum_udf(df.v))
expected6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum(df.v))
# groupby one expression and one scalar pandas UDF
result7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum_udf(df.v)).sort('sum(v)')
expected7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum(df.v)).sort('sum(v)')
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
self.assertPandasEqual(expected5.toPandas(), result5.toPandas())
self.assertPandasEqual(expected6.toPandas(), result6.toPandas())
self.assertPandasEqual(expected7.toPandas(), result7.toPandas())
def test_complex_expressions(self):
from pyspark.sql.functions import col, sum
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Test complex expressions with sql expression, python UDF and
# group aggregate pandas UDF
result1 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_one(sum_udf(col('v1'))),
sum_udf(plus_one(col('v2'))))
.sort('id')
.toPandas())
expected1 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_one(sum(col('v1'))),
sum(plus_one(col('v2'))))
.sort('id')
.toPandas())
# Test complex expressions with sql expression, scala pandas UDF and
# group aggregate pandas UDF
result2 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_two(sum_udf(col('v1'))),
sum_udf(plus_two(col('v2'))))
.sort('id')
.toPandas())
expected2 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_two(sum(col('v1'))),
sum(plus_two(col('v2'))))
.sort('id')
.toPandas())
# Test sequential groupby aggregate
result3 = (df.groupby('id')
.agg(sum_udf(df.v).alias('v'))
.groupby('id')
.agg(sum_udf(col('v')))
.sort('id')
.toPandas())
expected3 = (df.groupby('id')
.agg(sum(df.v).alias('v'))
.groupby('id')
.agg(sum(col('v')))
.sort('id')
.toPandas())
self.assertPandasEqual(expected1, result1)
self.assertPandasEqual(expected2, result2)
self.assertPandasEqual(expected3, result3)
def test_retain_group_columns(self):
from pyspark.sql.functions import sum, lit, col
with self.sql_conf({"spark.sql.retainGroupColumns": False}):
df = self.data
sum_udf = self.pandas_agg_sum_udf
result1 = df.groupby(df.id).agg(sum_udf(df.v))
expected1 = df.groupby(df.id).agg(sum(df.v))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_array_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.groupby('id').agg(array_udf(df['v']).alias('v2'))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
from pyspark.sql.functions import mean
df = self.data
plus_one = self.python_plus_one
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'nor.*aggregate function'):
df.groupby(df.id).agg(plus_one(df.v)).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'aggregate function.*argument.*aggregate function'):
df.groupby(df.id).agg(mean_udf(mean_udf(df.v))).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'mixture.*aggregate function.*group aggregate pandas UDF'):
df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect()
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class WindowPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
from pyspark.sql.functions import udf
return udf(lambda v: v + 1, 'double')
@property
def pandas_scalar_time_two(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
return pandas_udf(lambda v: v * 2, 'double')
@property
def pandas_agg_mean_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_max_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max(v):
return v.max()
return max
@property
def pandas_agg_min_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def min(v):
return v.min()
return min
@property
def unbounded_window(self):
return Window.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
@property
def ordered_window(self):
return Window.partitionBy('id').orderBy('v')
@property
def unpartitioned_window(self):
return Window.partitionBy()
def test_simple(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, percent_rank, mean, max
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_multiple_udfs(self):
from pyspark.sql.functions import max, min, mean
df = self.data
w = self.unbounded_window
result1 = df.withColumn('mean_v', self.pandas_agg_mean_udf(df['v']).over(w)) \
.withColumn('max_v', self.pandas_agg_max_udf(df['v']).over(w)) \
.withColumn('min_w', self.pandas_agg_min_udf(df['w']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w)) \
.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('min_w', min(df['w']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_replace_existing(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unbounded_window
result1 = df.withColumn('v', self.pandas_agg_mean_udf(df['v']).over(w))
expected1 = df.withColumn('v', mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v', mean_udf(df['v'] * 2).over(w) + 1)
expected1 = df.withColumn('v', mean(df['v'] * 2).over(w) + 1)
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_udf(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unbounded_window
plus_one = self.python_plus_one
time_two = self.pandas_scalar_time_two
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn(
'v2',
plus_one(mean_udf(plus_one(df['v'])).over(w)))
expected1 = df.withColumn(
'v2',
plus_one(mean(plus_one(df['v'])).over(w)))
result2 = df.withColumn(
'v2',
time_two(mean_udf(time_two(df['v'])).over(w)))
expected2 = df.withColumn(
'v2',
time_two(mean(time_two(df['v'])).over(w)))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_without_partitionBy(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unpartitioned_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v2', mean_udf(df['v']).over(w))
expected1 = df.withColumn('v2', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_mixed_sql_and_udf(self):
from pyspark.sql.functions import max, min, rank, col
df = self.data
w = self.unbounded_window
ow = self.ordered_window
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min_udf(df['v']).over(w))
expected1 = df.withColumn('v_diff', max(df['v']).over(w) - min(df['v']).over(w))
# Test mixing sql window function and window udf in the same expression
result2 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min(df['v']).over(w))
expected2 = expected1
# Test chaining sql aggregate function and udf
result3 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('min_v', min(df['v']).over(w)) \
.withColumn('v_diff', col('max_v') - col('min_v')) \
.drop('max_v', 'min_v')
expected3 = expected1
# Test mixing sql window function and udf
result4 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
expected4 = df.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
def test_array_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
w = self.unbounded_window
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.withColumn('v2', array_udf(df['v']).over(w))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
from pyspark.sql.functions import mean, pandas_udf, PandasUDFType
df = self.data
w = self.unbounded_window
ow = self.ordered_window
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'.*not supported within a window function'):
foo_udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
df.withColumn('v2', foo_udf(df['v']).over(w))
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'.*Only unbounded window frame is supported.*'):
df.withColumn('mean_v', mean_udf(df['v']).over(ow))
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'), verbosity=2)
else:
unittest.main(verbosity=2)
| sahilTakiar/spark | python/pyspark/sql/tests.py | Python | apache-2.0 | 276,859 |
from django.contrib import admin
from .models import CodilityTask
admin.site.register(CodilityTask)
| JacekKarnasiewicz/HomePage | apps/codility/admin.py | Python | mit | 102 |
#!/usr/bin/env python3
import sys
import math
#class Shape:
# def __init__(self, name):
class Triangle:
"""equilateral triangle class"""
def __init__(self, name):
"""creating triangle"""
self.name= name
self.shape_type ='Triangle'
#self.edge_length=2
self.triangle_perimeter=0
self.edge_length=2
self.sides=3
self.allies= ['Circle']
self.enemies= ['Square']
self.role= ['aggressive']
def area(self):
"""creating method for area"""
self.area = ((3**0.5)/4)* self.edge_length**2
return self.area
def perimeter(self):
"""creating method for perimeter"""
self.triangle_perimeter += self.sides * self.edge_length
#return self.set_perimeter
#self.triangle_perimeter += self.set_perimeter
return self.triangle_perimeter
def update_edge_length(self, change):
"""updating edge length"""
self.edge_length += change
def add_ally (self, shape_object):
"""adding ally shape"""
if shape_object not in self.allies:
self.allies.append(shape_object)
def add_enemy (self, shape_object):
"""adding enemy"""
if shape_object not in self.enemies:
self.enemies.append(shape_object)
if __name__== "__main__":
a= Triangle('Tom')
a.add_ally('Circle')
a.add_enemy('Square')
print (a.triangle_perimeter)
print (a.shape_type)
print (a.allies)
print (str(a.name) + " is a " + str(a.shape_type)+". Its enemies are "+ str(','.join(a.enemies))+" and its allies are " + str(','.join(a.allies))+".")
class Square:
"""Square class"""
shape_type ='Square'
edge_length=2
sides=4
allies= []
enemies= []
role= ['complacent']
def __init__(self, name):
"""creating triangle"""
self.name= name
def area(self):
"""creating method for area"""
self.area = self.__class__.edge_length**2
return self.area
def perimeter(self):
"""creating method for perimeter"""
self.perimeter= self.sides * self.__class__.edge_length
return self.perimeter
def update_edge_length(self, change):
"""updating edge length"""
self.__class__.edge_length += change
def add_ally (self, shape_object):
"""adding ally shape"""
if shape_object not in self.__class__.allies:
self.__class__.allies.append(shape_object)
def add_enemy (self, shape_object):
"""adding enemy"""
if shape_object not in self.__class__.enemies:
self.__class__.enemies.append(shape_object)
class Circle:
"""Circle class"""
shape_type ='Circle'
edge_length=2
sides=1
allies= []
enemies= []
name= []
role= ['wise']
def __init__(self, name):
"""creating triangle"""
self.name= name
def area(self):
"""creating method for area"""
self.area = math.pi*self.__class__.edge_length**2
return self.area
def perimeter(self):
"""creating method for perimeter"""
self.perimeter= 2 * math.pi * self.__class__.edge_length
return self.perimeter
def update_edge_length(self, change):
"""updating edge length"""
self.__class__.edge_length += change
def add_ally (self, shape_object):
"""adding ally shape"""
if shape_object not in self.__class__.allies:
self.__class__.allies.append(shape_object)
def add_enemy (self, shape_object):
"""adding enemy"""
if shape_object not in self.__class__.enemies:
self.__class__.enemies.append(shape_object)
| artopping/nyu-python | course2/npd_c2_a3/redo_classes.py | Python | mit | 3,742 |
"""
Get your own public IP address or that of any host.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.dnsip/
"""
import asyncio
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import STATE_UNKNOWN
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['aiodns==1.1.1']
_LOGGER = logging.getLogger(__name__)
CONF_HOSTNAME = 'hostname'
CONF_RESOLVER = 'resolver'
CONF_RESOLVER_IPV6 = 'resolver_ipv6'
CONF_IPV6 = 'ipv6'
DEFAULT_HOSTNAME = 'myip.opendns.com'
DEFAULT_RESOLVER = '208.67.222.222'
DEFAULT_RESOLVER_IPV6 = '2620:0:ccc::2'
DEFAULT_IPV6 = False
SCAN_INTERVAL = timedelta(seconds=120)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOSTNAME, default=DEFAULT_HOSTNAME): cv.string,
vol.Optional(CONF_RESOLVER, default=DEFAULT_RESOLVER): cv.string,
vol.Optional(CONF_RESOLVER_IPV6, default=DEFAULT_RESOLVER_IPV6): cv.string,
vol.Optional(CONF_IPV6, default=DEFAULT_IPV6): cv.boolean,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the DNS IP sensor."""
hostname = config.get(CONF_HOSTNAME)
ipv6 = config.get(CONF_IPV6)
if ipv6:
resolver = config.get(CONF_RESOLVER_IPV6)
else:
resolver = config.get(CONF_RESOLVER)
async_add_devices([WanIpSensor(
hass, hostname, resolver, ipv6)], True)
class WanIpSensor(Entity):
"""Implementation of a DNS IP sensor."""
def __init__(self, hass, hostname, resolver, ipv6):
"""Initialize the sensor."""
import aiodns
self.hass = hass
self._name = hostname
self.resolver = aiodns.DNSResolver(loop=self.hass.loop)
self.resolver.nameservers = [resolver]
self.querytype = 'AAAA' if ipv6 else 'A'
self._state = STATE_UNKNOWN
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the current DNS IP address for hostname."""
return self._state
@asyncio.coroutine
def async_update(self):
"""Get the current DNS IP address for hostname."""
response = yield from self.resolver.query(self._name, self.querytype)
if response:
self._state = response[0].host
else:
self._state = STATE_UNKNOWN
| MungoRae/home-assistant | homeassistant/components/sensor/dnsip.py | Python | apache-2.0 | 2,565 |
from keras.datasets import cifar100
from keras.utils import np_utils
from keras import backend
from keras.models import Sequential
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten
from keras.utils import plot_model
from keras.callbacks import EarlyStopping
from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam
import matplotlib.pyplot as plt
N_CLASSES = 100
SAMPLE_WIDTH = 32
SAMPLE_HEIGHT = 32
# Parameters
BATCH_SIZE = 100
N_EPOCHS = 10000 # We stop training when the validation loss converges; the training can take all the epochs it needs
VALIDATION_SPLIT = 0.2
VALIDATION_PATIENCE = 15
ACTIVATION = 'elu'
def plotOptions(results, title, ylabel, keys):
plt.gca().set_color_cycle(None)
plt.plot([], '--', color='black')
plt.plot([], color='black')
for i in results:
plt.plot(results[i]['h'].history[keys[0]])
plt.legend(['Training', 'Validation'] + results.keys(), loc='upper right')
plt.gca().set_color_cycle(None)
for i in results:
plt.plot(results[i]['h'].history[keys[1]], '--')
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel('Epoch')
plt.show()
(x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')
# Normalizing the input.
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train = x_train / 255
x_test = x_test / 255
# One-hot encoding the labels.
y_train = np_utils.to_categorical(y_train, N_CLASSES)
y_test = np_utils.to_categorical(y_test, N_CLASSES)
# Reshaping the samples depending on which format the backend uses.
if backend.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 3, SAMPLE_WIDTH, SAMPLE_HEIGHT)
x_test = x_test.reshape(x_test.shape[0], 3, SAMPLE_WIDTH, SAMPLE_HEIGHT)
input_shape = (3, SAMPLE_WIDTH, SAMPLE_HEIGHT)
else:
x_train = x_train.reshape(x_train.shape[0], SAMPLE_WIDTH, SAMPLE_HEIGHT, 3)
x_test = x_test.reshape(x_test.shape[0], SAMPLE_WIDTH, SAMPLE_HEIGHT, 3)
input_shape = (SAMPLE_WIDTH, SAMPLE_HEIGHT, 3)
optimizers = {
'SGD': SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False),
'RMSProp': RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),
'Adagrad': Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
'Adadelta': Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
'Adam': Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),
'Adamax': Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),
'Nadam': Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
}
results = {}
for i in optimizers:
print '### Optimizer ' + i + ' ###'
# Defining the model.
model = Sequential()
model.add(Conv2D(27, (3, 3), activation=ACTIVATION, input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(81, (3, 3), activation=ACTIVATION))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(135, (3, 3), activation=ACTIVATION))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation=ACTIVATION))
model.add(Dense(128, activation=ACTIVATION))
model.add(Dense(N_CLASSES, activation='softmax'))
model.compile(optimizer=optimizers[i], loss='categorical_crossentropy', metrics=['accuracy'])
# Training the model.
stopper = EarlyStopping(monitor='val_loss', patience=VALIDATION_PATIENCE)
h = model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=N_EPOCHS, callbacks=[stopper], validation_split=VALIDATION_SPLIT)
# Evaluating the model.
score = model.evaluate(x_test, y_test, verbose=0)
results[i] = {
'h': h,
'test_loss': score[0],
'test_acc': score[1]
}
print '### FINISH! ###'
for i in results:
h = results[i]['h']
print i + ':'
result = [str(round(i, 4)) for i in [h.history['loss'][-1], h.history['acc'][-1], h.history['val_loss'][-1], h.history['val_acc'][-1], results[i]['test_loss'], results[i]['test_acc']]]
print ','.join(result)
# Plotting
plotOptions(results, 'Model loss', 'Loss', ['val_loss', 'loss'])
plotOptions(results, 'Model accuracy', 'Accuracy', ['val_acc', 'acc'])
| juanlao7/CIFAR100-CNN | optimizer.py | Python | mit | 4,299 |
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""A PoC that implements like 2% of the job of converting Spyne objects to
standard C++ classes."""
import sys
INDENT = ' '
class Object(object):
def __init__(self):
self.parent = None
self.comment_before = None
self.comment_after = None
def _comment_before_to_stream(self, ostr, indent):
if self.comment_before is None:
return
ostr.write("\n")
ostr.write(INDENT * indent)
ostr.write("/**\n")
ostr.write(INDENT * indent)
ostr.write(" *")
for line in self.comment_before.split('\n'):
ostr.write(" ")
ostr.write(line)
ostr.write('\n')
ostr.write(INDENT * indent)
ostr.write(" */")
ostr.write("\n")
def _comment_after_to_stream(self, ostr, indent):
if self.comment_after is None:
return
lines = self.comment_after.split('\n')
if len(lines) < 2:
ostr.write(" // ")
ostr.write(self.comment_after)
else:
ostr.write(INDENT * indent)
ostr.write("/**\n")
ostr.write(INDENT * indent)
ostr.write(" *")
for line in lines:
ostr.write(" ")
ostr.write(line)
ostr.write('\n')
ostr.write(INDENT * indent)
ostr.write(" */")
ostr.write("\n")
class Entry(Object):
def __init__(self, modifier=None):
super(Entry, self).__init__()
self.modifier = modifier
def to_decl_stream(self, ostr, indent):
raise NotImplemented()
def to_defn_stream(self, ostr, indent):
raise NotImplemented()
class Literal(Object):
def __init__(self, value):
super(Literal, self).__init__()
self.value = value
class StringLiteral(Literal):
def to_stream(self, ostr, indent):
self._comment_before_to_stream(ostr, indent)
ostr.write('"')
ostr.write(self.value) # TODO: escaping
ostr.write('"')
self._comment_after_to_stream(ostr, indent)
class DataMember(Entry):
def __init__(self, modifier, type, name, initializer=None):
super(DataMember, self).__init__(modifier)
self.type = type
self.name = name
self.initializer = initializer
def to_decl_stream(self, ostr, indent):
ostr.write(INDENT * indent)
if self.modifier is not None:
ostr.write(self.modifier)
ostr.write(" ")
ostr.write(self.type)
ostr.write(" ")
ostr.write(self.name)
if self.modifier != 'static' and self.initializer is not None:
ostr.write(" = ")
self.initializer.to_stream(ostr, indent)
ostr.write(";")
ostr.write("\n")
def to_defn_stream(self, ostr, indent):
if self.modifier != 'static':
return
self._comment_before_to_stream(ostr, indent)
ostr.write(INDENT * indent)
ostr.write(self.type)
ostr.write(" ")
parents = []
parent = self.parent
while parent is not None:
parents.insert(0, parent)
parent = parent.parent
for parent in parents:
ostr.write(parent.name)
ostr.write("::")
ostr.write(self.name)
if self.initializer is not None:
ostr.write(" = ")
self.initializer.to_stream(ostr, indent)
ostr.write(";")
ostr.write("\n")
self._comment_after_to_stream(ostr, indent)
class Class(Entry):
def __init__(self):
super(Class, self).__init__()
self.name = None
self.namespace = None
self.type = 'class'
self.public_entries = []
self.protected_entries = []
self.private_entries = []
def to_decl_stream(self, ostr, indent=0):
if self.namespace is not None:
ostr.write("namespace ")
ostr.write(self.namespace)
ostr.write(" {\n")
ostr.write(INDENT * indent)
ostr.write("%s %s {\n" % (self.type, self.name,))
if len(self.public_entries) > 0:
ostr.write(INDENT * indent)
ostr.write("public:\n")
for e in self.public_entries:
e.to_decl_stream(ostr, indent + 1)
ostr.write("\n")
if len(self.protected_entries) > 0:
ostr.write(INDENT * indent)
ostr.write("protected:\n")
for e in self.protected_entries:
e.to_decl_stream(ostr, indent + 1)
ostr.write("\n")
if len(self.private_entries) > 0:
ostr.write(INDENT * indent)
ostr.write("private:\n")
for e in self.private_entries:
e.to_decl_stream(ostr, indent + 1)
ostr.write("\n")
ostr.write(INDENT * indent)
ostr.write("};\n")
if self.namespace is not None:
ostr.write("}\n")
def to_defn_stream(self, ostr, indent=0):
if self.namespace is not None:
ostr.write("namespace ")
ostr.write(self.namespace)
ostr.write(" {\n")
if len(self.public_entries) > 0:
for e in self.public_entries:
e.to_defn_stream(ostr, indent)
if len(self.protected_entries) > 0:
for e in self.protected_entries:
e.to_defn_stream(ostr, indent)
if len(self.private_entries) > 0:
for e in self.private_entries:
e.to_defn_stream(ostr, indent)
if self.namespace is not None:
ostr.write("}\n")
def gen_cpp_class(cls, namespace=None, type_map=None):
if type_map is None:
type_map = dict()
ocls = Class()
ocls.name = cls.get_type_name()
ocls.namespace = namespace
keys = Class()
keys.name = "Key"
keys.parent = ocls
keys.type = "struct"
ocls.public_entries.append(keys)
for k, v in cls.get_flat_type_info(cls).items():
member = DataMember(
"static", "const std::string",
k, StringLiteral(v.Attributes.sub_name or k)
)
member.comment_before = v.Annotations.doc
member.parent = keys
keys.public_entries.append(member)
ocls.to_decl_stream(sys.stdout)
sys.stdout.write("\n\n\n\n")
ocls.to_defn_stream(sys.stdout)
| plq/spyne | spyne/util/gencpp.py | Python | lgpl-2.1 | 7,178 |
# Copyright 2020 KMEE INFORMATICA LTDA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class ResCompany(models.Model):
_inherit = "res.company"
provedor_nfse = fields.Selection(
selection_add=[
("paulistana", "Paulistana"),
]
)
| OCA/l10n-brazil | l10n_br_nfse_paulistana/models/res_company.py | Python | agpl-3.0 | 321 |
from pylab import *
import numpy
import ardustat_library_simple as ard
import time
import subprocess
import os
import glob
import sys
#connecto to ardustat and setup resistance table
a = ard.ardustat()
a.trial_connect(7777)
a.debug = False
a.load_resistance_table(16)
#create arrays + a function for logging data
times = []
potential = []
current = []
time_start = time.time()
def blink():
a.rawwrite(" ")
time.sleep(.1)
aaaaa = 3
def appender(reading):
print reading['cell_ADC'],read['current']
potential.append(reading['cell_ADC'])
current.append(reading['current'])
times.append(time.time()-time_start)
#Step through values
output = 0
blink()
print a.ocv()
for i in range(0,10):
time.sleep(.1)
read = a.parsedread()
appender(read)
while output < 2:
output = output + .1
blink()
print a.potentiostat(output)
for i in range(0,3):
time.sleep(.1)
read = a.parsedread()
appender(read)
blink()
print a.ocv()
for i in range(0,10):
time.sleep(.1)
read = a.parsedread()
appender(read)
output = 0
while output < .001:
output = output + .0001
blink()
a.galvanostat(output)
for i in range(0,3):
time.sleep(.1)
read = a.parsedread()
appender(read)
blink()
print a.ocv()
for i in range(0,10):
time.sleep(.1)
read = a.parsedread()
appender(read)
#Clost socket and kill socket server
a.s.close()
a.p.kill()
#Make sure everything plots out realistically
subplot(3,1,1)
plot(times,potential,'.')
title("Potential vs. Time")
ylabel("Potential (V)")
subplot(3,1,2)
plot(times,current,'.')
title("Current vs. Time")
ylabel("Current (A)")
subplot(3,1,3)
plot(times,numpy.array(potential)/numpy.array(current))
title("Resistance vs. Time")
ylabel("Resistance (Ohms)")
xlabel("Time (s)")
show()
| kjiang8/Ardustat | Deprecated_Unsupported/Python_Client/sanitycheck_with_connect.py | Python | bsd-2-clause | 1,727 |
#
# Copyright 2004 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
class NotDAG (Exception):
"""Not a directed acyclic graph"""
pass
class CantHappen (Exception):
"""Can't happen"""
pass
| atzengin/OCC | ocvc-runtime/python/ocvc/oc/exceptions.py | Python | gpl-3.0 | 918 |
import logging
import StringIO
import csv
from django.utils.unittest.case import skipIf,skip
from datetime import datetime
from django.test import TestCase
from django.contrib.auth.models import User
from django.core import mail
from transit_subsidy.models import TransitSubsidy,Mode,TransitSubsidyModes,OfficeLocation
import transit_subsidy
from django.contrib.auth.models import User
logger = logging.getLogger('transit')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('[%(name)s] %(asctime)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class TransportationSubsidyViewTest(TestCase):
fixtures = ['offices.json','transit_modes.json', 'users.json']
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def setUp(self):
"""
Assumes valid user
"""
self.user = User.objects.get(username='ted')
is_logged_in = self.client.login(username='ted',password='ted')
self.assertTrue(is_logged_in, 'Client not able to login?! Check fixture data or User creation method in setUp.')
self.office = OfficeLocation.objects.order_by('city')[0]
def tearDown(self):
pass
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def test_transit_subsidy_withdrawl_date_should_be_None_on_re_enrollments(self):
pd = self.get_good_post_data()
pd.update( self.get_good_segment_data() )
response = self.client.post('/transit/', pd)
response = self.client.post('/transit/cancel')
response = self.client.post('/transit/', pd)
self.assertTrue( response.context['transit'].date_withdrawn == None, "Withdrawl date should be reset when re-enrolling")
def test_transit_subsidy_withdrawl(self):
pd = self.get_good_post_data()
pd.update( self.get_good_segment_data() )
response = self.client.post('/transit/', pd)
response = self.client.post('/transit/cancel')
self.assertTrue( response.context['transit'].date_withdrawn != None)
self.assertTemplateUsed(response,'transit_subsidy/cancel.html')
self.assertTrue( response.context['transit'].date_withdrawn != None)
def test_enrollment_notification_email(self):
transit = self._set_transit_subsidy()
transit_subsidy.views.send_enrollment_notification(self.user,transit)
self.assertEquals(1, len(mail.outbox))
message = mail.outbox[0].message().as_string()
self.assertTrue( message.find( '<p>Dear Ted,</p>') > -1 ,"[Dear Ted] not found in message body." )
self.assertTrue( message.find( 'Thank you' ) > -1 , "[Thank you] not found in message body.")
def test_send_enrollment_notification_email_from_http_request(self):
#The below was causing failure ONLY when executed in Jenkins:
response = self.client.post('/transit/', self.get_good_post_data() )
self.assertEquals(1, len(mail.outbox))
message = mail.outbox[0].message().as_string()
self.assertTrue( message.find( '<p>Dear Ted,</p>') > -1 ,"[Dear Ted] not found in message body." )
self.assertTrue( message.find( 'Thank you' ) > -1 , "[Thank you] not found in message body.")
def test_withdrawl_notification_email(self):
transit = self._set_transit_subsidy()
transit_subsidy.views.send_withdrawl_notification(self.user)
self.assertEquals(1, len(mail.outbox))
message = mail.outbox[0].message().as_string()
# print message
self.assertTrue( message.find( '<p>Dear Ted,</p>') > -1 ,"[Dear Ted] not found in message body." )
self.assertTrue( message.find( 'You have been withdrawn' ) > -1 , "[You have been withdrawn] not found in message body.")
def test_send_withdrawl_notification_email_from_http_request(self):
transit = self._set_transit_subsidy()
response = self.client.post('/transit/cancel')
self.assertEquals(1, len(mail.outbox))
message = mail.outbox[0].message().as_string()
# print message
self.assertTrue( message.find( '<p>Dear Ted,</p>') > -1 ,"[Dear Ted] not found in message body." )
self.assertTrue( message.find( 'You have been withdrawn' ) > -1 , "[You have been withdrawn] not found in message body.")
def test_returning_users_should_see_submitted_data(self):
self._set_transit_subsidy()
response = self.client.get('/transit/')
self.assertEquals(200,response.status_code)
self.assertTrue( response.context['returning_user'], 'Hmmmm. Code thinks there is no returning user')
self.assertEquals( {} , response.context['form'].errors)
#Major work to be done with CSV
# @skip('Changing requirements. Lots to do after initial release')
def test_csv_dump_should_work(self):
self._set_transit_subsidy()
response = self.client.get('/transit/csv')
normalized = StringIO.StringIO(response.content)
data = csv.DictReader(normalized)
actual = None
for item in data:
actual = item #too tired. end of day. should fix this
# print item
self.assertEquals( self.user.email, item['Email'] )
self.assertEquals( 125, int(item['Total Claim Amount']) )
def test_that_successful_transit_request_redirects_to_thankyou(self):
#The below was causing failure ONLY when executed in Jenkins:
response = self.client.post('/transit/', self.get_good_post_data() )
self.assertTemplateUsed(response,'transit_subsidy/thank_you.html')
# Make sure any new required fields are added to self.get_good_post_data()
def test_that_successful_transit_request_redirects_to_thankyou(self):
#The below was causing failure ONLY when executed in Jenkins:
response = self.client.post('/transit/', self.get_good_post_data() )
self.assertTemplateUsed(response,'transit_subsidy/thank_you.html')
def test_that_zipcode_error_is_present(self):
bad_post_data = {'city':'LA','state':'CA','zip':''}
response = self.client.post('/transit/', bad_post_data)
print response.status_code
self.assertFormError(response, 'form', 'origin_zip', u'This field is required.')
def test_that_bad_form_data_redirects_back_to_form(self):
bad_post_data = {}
response = self.client.post('/transit/', bad_post_data)
# print response.context
self.assertTemplateUsed(response, 'transit_subsidy/index.html', 'Should be thankyou.html if OK.')
def test_that_login_is_required(self):
response = self.client.post('/account/login/?next=/transit/', {'username': 'test_user', 'password': 'password'})
response = self.client.get('/transit/')
self.assertEquals(200, response.status_code, "Should be a 200.")
# self.assertTrue( self.client.login(username='test_user',password='password'), "Login failed for test_user." )
def test_that_redirect_works_with_new_user(self):
#make sure user is logged out!
self.client.logout()
response = self.client.get('/transit/')
self.assertRedirects(response, '/login/?next=/transit/')
def test_that_ts_template_is_fetched(self):
response = self.client.get('/transit/')
self.assertEqual(200, response.status_code, "Did't get to template assertion. Check login logic or db.")
self.assertTemplateUsed(response,'transit_subsidy/index.html')
def test_set_modes(self):
self.set_modes()
trans = TransitSubsidy.objects.all()[0]
modes = TransitSubsidyModes.objects.filter(transit_subsidy=trans)
self.assertEquals(50, modes[1].cost)
def test_post_segment_data_should_be_OK(self):
pd = self.get_good_post_data()
pd.update( self.get_good_segment_data() )
response = self.client.post('/transit/', pd)
#print response
self.assertTemplateUsed(response,'transit_subsidy/thank_you.html')
def test_transit_subsidy_withdrawl_GET_Request_is_rejected(self):
with self.assertRaises(Exception):
response = self.client.get('/transit/cancel')
#Util method
def get_good_post_data(self):
return { 'date_enrolled' : datetime.now(),
'timestamp' : datetime.now(),
'last_four_ssn': '1111',
'origin_street': '123 Main St.',
'origin_city':'Anytown',
'origin_state':'OO',
'origin_zip':'12345',
'destination': self.office.id,
'number_of_workdays': 20,
'daily_roundtrip_cost' : 8,
'daily_parking_cost': 4,
'amount': 16,
'total_commute_cost': 164,
'signature' : 'Ted Nugent'
}
def get_good_segment_data(self):
return { #Metro
'segment-type_1' : '1',
'segment-amount_1' : '12',
'segment-other_1' : '',
#Dash
'segment-type_2' : '3',
'segment-amount_2' : '12',
'segment-other_2' : '',
#Other
'segment-type_3' : '9',
'segment-amount_3' : '5',
'segment-other_3' : 'The Bus',
}
def set_modes(self):
#Metro
self.modes = Mode.objects.all()
trans = self._set_transit_subsidy()
_modes = TransitSubsidyModes(transit_subsidy=trans, mode=self.modes[0], cost=100)
_modes.save()
#Dash
_modes = TransitSubsidyModes(transit_subsidy=trans, mode=self.modes[1], cost=50)
_modes.save()
#Other
_modes = TransitSubsidyModes(transit_subsidy=trans, mode=self.modes[4], cost=5, other_mode='ScooterBus')
_modes.save()
def _set_transit_subsidy(self):
transit = TransitSubsidy()
office = OfficeLocation.objects.order_by('city')[0]
transit.user = self.user
transit.last_four_ssn = 3333
transit.destination = office
transit.date_enrolled = '2011-06-23'
transit.origin_street = '123 Main Street'
transit.origin_city = 'Anytown'
transit.origin_state = 'VA'
transit.origin_zip = '22222'
transit.number_of_workdays = 20
transit.daily_roundtrip_cost = 8
transit.daily_parking_cost = 4
transit.amount = 125
transit.total_commute_cost = 160
transit.dc_wmta_smartrip_id = '123-123-123'
transit.save()
return transit
| cfpb/transit_subsidy | tests/transit_subsidy/view_tests.py | Python | cc0-1.0 | 10,909 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""An experimental library for reading and converting SVG.
This is an experimental converter from SVG to RLG (ReportLab Graphics)
drawings. It converts mainly basic shapes, paths and simple text.
The current intended usage is either as module within other projects:
from svglib.svglib import svg2rlg
drawing = svg2rlg("foo.svg")
or from the command-line where right now it is usable as an SVG to PDF
converting tool named sv2pdf (which should also handle SVG files com-
pressed with gzip and extension .svgz).
"""
import sys
import os
import glob
import types
import re
import operator
import gzip
import xml.dom.minidom
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.graphics.shapes import *
from reportlab.graphics import renderPDF
from reportlab.lib import colors
from reportlab.lib.units import cm, inch, mm, pica, toLength
__version__ = "0.6.3"
__license__ = "LGPL 3"
__author__ = "Dinu Gherman"
__date__ = "2010-03-01"
pt = 1
LOGMESSAGES = 0
### helpers ###
def convertToFloats(aList):
"Convert number strings in list to floats (leave rest untouched)."
for i in xrange(len(aList)):
try:
aList[i] = float(aList[i])
except ValueError:
try:
aList[i] = aList[i].encode("ASCII")
except:
pass
return aList
def convertQuadraticToCubicPath(Q0, Q1, Q2):
"Convert a quadratic Bezier curve through Q0, Q1, Q2 to a cubic one."
C0 = Q0
C1 = (Q0[0]+2./3*(Q1[0]-Q0[0]), Q0[1]+2./3*(Q1[1]-Q0[1]))
C2 = (C1[0]+1./3*(Q2[0]-Q0[0]), C1[1]+1./3*(Q2[1]-Q0[1]))
C3 = Q2
return C0, C1, C2, C3
def fixSvgPath(aList):
"""Normalise certain "abnormalities" in SVG paths.
Basically, this reduces adjacent number values for h and v
operators to the sum of these numbers and those for H and V
operators to the last number only.
Returns a slightly more compact list if such reductions
were applied or a copy of the same list, otherwise.
"""
# this could also modify the path to contain an op code
# for each coord. tuple of a tuple sequence...
hPos, vPos, HPos, VPos, numPos = [], [], [], [], []
for i in xrange(len(aList)):
hPos.append(aList[i]=='h')
vPos.append(aList[i]=='v')
HPos.append(aList[i]=='H')
VPos.append(aList[i]=='V')
numPos.append(type(aList[i])==type(1.0))
fixedList = []
i = 0
while i < len(aList):
if hPos[i] + vPos[i] + HPos[i] + VPos[i] == 0:
fixedList.append(aList[i])
elif hPos[i] == 1 or vPos[i] == 1:
fixedList.append(aList[i])
sum = 0
j = i+1
while j < len(aList) and numPos[j] == 1:
sum = sum + aList[j]
j = j+1
fixedList.append(sum)
i = j-1
elif HPos[i] == 1 or VPos[i] == 1:
fixedList.append(aList[i])
last = 0
j = i+1
while j < len(aList) and numPos[j] == 1:
last = aList[j]
j = j+1
fixedList.append(last)
i = j-1
i = i+1
return fixedList
def normaliseSvgPath(attr):
"""Normalise SVG path.
This basically introduces operator codes for multi-argument
parameters. Also, it fixes sequences of consecutive M or m
operators to MLLL... and mlll... operators. It adds an empty
list as argument for Z and z only in order to make the resul-
ting list easier to iterate over.
E.g. "M 10 20, M 20 20, L 30 40, 40 40, Z"
-> ['M', [10, 20], 'L', [20, 20], 'L', [30, 40], 'L', [40, 40], 'Z', []]
"""
# operator codes mapped to the minimum number of expected arguments
ops = {'A':7, 'a':7,
'Q':4, 'q':4, 'T':2, 't':2, 'S':4, 's':4,
'M':2, 'L':2, 'm':2, 'l':2, 'H':1, 'V':1,
'h':1, 'v':1, 'C':6, 'c':6, 'Z':0, 'z':0}
# do some preprocessing
opKeys = ops.keys()
a = attr
a = a.replace(',', ' ')
a = string.replace(a, 'e-', 'ee')
a = string.replace(a, '-', ' -')
a = string.replace(a, 'ee', 'e-')
for op in opKeys:
a = a.replace(op, " %s " % op)
a = a.strip()
a = a.split()
a = convertToFloats(a)
a = fixSvgPath(a)
# insert op codes for each argument of an op with multiple arguments
res = []
i = 0
while i < len(a):
el = a[i]
if el in opKeys:
if el in ('z', 'Z'):
res.append(el)
res.append([])
else:
while i < len(a)-1:
if a[i+1] not in opKeys:
res.append(el)
res.append(a[i+1:i+1+ops[el]])
i = i + ops[el]
else:
break
i = i + 1
# fix sequences of M to one M plus a sequence of L operators,
# same for m and l.
for i in xrange(0, len(res), 2):
op, nums = res[i:i+2]
if i >= 2:
if op == 'M' == res[i-2]:
res[i] = 'L'
elif op == 'm' == res[i-2]:
res[i] = 'l'
return res
### attribute converters (from SVG to RLG)
class AttributeConverter:
"An abstract class to locate and convert attributes in a DOM instance."
def parseMultiAttributes(self, line):
"""Try parsing compound attribute string.
Return a dictionary with single attributes in 'line'.
"""
try:
line = line.encode("ASCII")
except:
pass
attrs = line.split(';')
attrs = [a.strip() for a in attrs]
attrs = filter(lambda a:len(a)>0, attrs)
newAttrs = {}
for a in attrs:
k, v = a.split(':')
k, v = [s.strip() for s in (k, v)]
newAttrs[k] = v
return newAttrs
def findAttr(self, svgNode, name):
"""Search an attribute with some name in some node or above.
First the node is searched, then its style attribute, then
the search continues in the node's parent node. If no such
attribute is found, '' is returned.
"""
# This needs also to lookup values like "url(#SomeName)"...
try:
attrValue = svgNode.getAttribute(name)
except:
return ''
if attrValue and attrValue != "inherit":
return attrValue
elif svgNode.getAttribute("style"):
dict = self.parseMultiAttributes(svgNode.getAttribute("style"))
if dict.has_key(name):
return dict[name]
else:
if svgNode.parentNode:
return self.findAttr(svgNode.parentNode, name)
return ''
def getAllAttributes(self, svgNode):
"Return a dictionary of all attributes of svgNode or those inherited by it."
dict = {}
if svgNode.parentNode and svgNode.parentNode == 'g':
dict.update(self.getAllAttributes(svgNode.parentNode))
if svgNode.nodeType == svgNode.ELEMENT_NODE:
style = svgNode.getAttribute("style")
if style:
d = self.parseMultiAttributes(style)
dict.update(d)
attrs = svgNode.attributes
for i in xrange(attrs.length):
a = attrs.item(i)
if a.name != "style":
dict[a.name.encode("ASCII")] = a.value
return dict
def id(self, svgAttr):
"Return attribute as is."
return svgAttr
def convertTransform(self, svgAttr):
"""Parse transform attribute string.
E.g. "scale(2) translate(10,20)"
-> [("scale", 2), ("translate", (10,20))]
"""
line = svgAttr
try:
line = line.encode("ASCII")
except:
pass
line = line.strip()
ops = line[:]
brackets = []
indices = []
for i in range(len(line)):
if line[i] in "()": brackets.append(i)
for i in range(0, len(brackets), 2):
bi, bj = brackets[i], brackets[i+1]
subline = line[bi+1:bj]
subline = subline.strip()
subline = subline.replace(',', ' ')
subline = re.sub("[ ]+", ',', subline)
indices.append(eval(subline))
ops = ops[:bi] + ' '*(bj-bi+1) + ops[bj+1:]
ops = ops.split()
assert len(ops) == len(indices)
result = []
for i in range(len(ops)):
result.append((ops[i], indices[i]))
return result
class Svg2RlgAttributeConverter(AttributeConverter):
"A concrete SVG to RLG attribute converter."
def convertLength(self, svgAttr, percentOf=100):
"Convert length to points."
text = svgAttr
if not text:
return 0.0
if text[-1] == '%':
if LOGMESSAGES:
print("Fiddling length unit: %")
return float(text[:-1]) / 100 * percentOf
elif text[-2:] == "pc":
return float(text[:-2]) * pica
newSize = text[:]
for u in "em ex px".split():
if newSize.find(u) >= 0:
if LOGMESSAGES:
print("Ignoring unit: {0}".format(u))
newSize = newSize.replace(u, '')
newSize = newSize.strip()
length = toLength(newSize)
return length
def convertLengthList(self, svgAttr):
"Convert a list of lengths."
t = svgAttr.replace(',', ' ')
t = t.strip()
t = re.sub("[ ]+", ' ', t)
a = t.split(' ')
a = list(map(self.convertLength, a))
return a
def convertColor(self, svgAttr):
"Convert string to a RL color object."
# fix it: most likely all "web colors" are allowed
predefined = "aqua black blue fuchsia gray green lime maroon navy "
predefined = predefined + "olive orange purple red silver teal white yellow "
predefined = predefined + "lawngreen indianred aquamarine lightgreen brown"
# This needs also to lookup values like "url(#SomeName)"...
text = svgAttr
if not text or text == "none":
return None
try:
text = text.encode("ASCII")
except:
pass
if text in predefined.split():
return getattr(colors, text)
elif text == "currentColor":
return "currentColor"
elif len(text) == 7 and text[0] == '#':
return colors.HexColor(text)
elif len(text) == 4 and text[0] == '#':
return colors.HexColor('#' + 2*text[1] + 2*text[2] + 2*text[3])
elif text[:3] == "rgb" and text.find('%') < 0:
t = text[:][3:]
t = t.replace('%', '')
tup = eval(t)
tup = list(map(lambda h:h[2:], list(map(hex, tup))))
tup = list(map(lambda h:(2-len(h))*'0'+h, tup))
col = "#%s%s%s" % tuple(tup)
return colors.HexColor(col)
elif text[:3] == 'rgb' and text.find('%') >= 0:
t = text[:][3:]
t = t.replace('%', '')
tup = eval(t)
tup = list(map(lambda c:c/100.0, tup))
col = apply(colors.Color, tup)
return col
if LOGMESSAGES:
print("Can't handle color:", text)
return None
def convertLineJoin(self, svgAttr):
return {"miter":0, "round":1, "bevel":2}[svgAttr]
def convertLineCap(self, svgAttr):
return {"butt":0, "round":1, "square":2}[svgAttr]
def convertDashArray(self, svgAttr):
strokeDashArray = self.convertLengthList(svgAttr)
return strokeDashArray
def convertDashOffset(self, svgAttr):
strokeDashOffset = self.convertLength(svgAttr)
return strokeDashOffset
def convertFontFamily(self, svgAttr):
# very hackish
fontMapping = {"sans-serif":"Helvetica",
"serif":"Times-Roman",
"monospace":"Courier"}
fontName = svgAttr
if not fontName:
return ''
try:
fontName = fontMapping[fontName]
except KeyError:
pass
if fontName not in ("Helvetica", "Times-Roman", "Courier"):
fontName = "Helvetica"
return fontName
class NodeTracker:
"""An object wrapper keeping track of arguments to certain method calls.
Instances wrap an object and store all arguments to one special
method, getAttribute(name), in a list of unique elements, usedAttrs.
"""
def __init__(self, anObject):
self.object = anObject
self.usedAttrs = []
def getAttribute(self, name):
# add argument to the history, if not already present
if name not in self.usedAttrs:
self.usedAttrs.append(name)
# forward call to wrapped object
return self.object.getAttribute(name)
# also getAttributeNS(uri, name)?
def __getattr__(self, name):
# forward attribute access to wrapped object
return getattr(self.object, name)
### the main meat ###
class SvgRenderer:
"""Renderer that renders an SVG file on a ReportLab Drawing instance.
This is the base class for walking over an SVG DOM document and
transforming it into a ReportLab Drawing instance.
"""
def __init__(self, path=None):
self.attrConverter = Svg2RlgAttributeConverter()
self.shapeConverter = Svg2RlgShapeConverter()
self.shapeConverter.svgSourceFile = path
self.handledShapes = self.shapeConverter.getHandledShapes()
self.drawing = None
self.mainGroup = Group()
self.definitions = {}
self.doesProcessDefinitions = 0
self.verbose = 0
self.level = 0
self.path = path
self.logFile = None
#if self.path:
# logPath = os.path.splitext(self.path)[0] + ".log"
# self.logFile = open(logPath, 'w')
def render(self, node, parent=None):
if parent == None:
parent = self.mainGroup
name = node.nodeName
if self.verbose:
format = "%s%s"
args = (' '*self.level, name)
#if not self.logFile:
# print format % args
#else:
# self.logFile.write((format+"\n") % args)
if name == "svg":
self.level = self.level + 1
n = NodeTracker(node)
drawing = self.renderSvg(n)
children = n.childNodes
for child in children:
if child.nodeType != 1:
continue
self.render(child, self.mainGroup)
self.level = self.level - 1
self.printUnusedAttributes(node, n)
elif name == "defs":
self.doesProcessDefinitions = 1
n = NodeTracker(node)
self.level = self.level + 1
parent.add(self.renderG(n))
self.level = self.level - 1
self.doesProcessDefinitions = 0
self.printUnusedAttributes(node, n)
elif name == 'a':
self.level = self.level + 1
n = NodeTracker(node)
item = self.renderA(n)
parent.add(item)
self.level = self.level - 1
self.printUnusedAttributes(node, n)
elif name == 'g':
self.level = self.level + 1
n = NodeTracker(node)
display = n.getAttribute("display")
if display != "none":
item = self.renderG(n)
parent.add(item)
if self.doesProcessDefinitions:
id = n.getAttribute("id")
self.definitions[id] = item
self.level = self.level - 1
self.printUnusedAttributes(node, n)
elif name == "symbol":
self.level = self.level + 1
n = NodeTracker(node)
item = self.renderSymbol(n)
# parent.add(item)
id = n.getAttribute("id")
if id:
self.definitions[id] = item
self.level = self.level - 1
self.printUnusedAttributes(node, n)
elif name in self.handledShapes:
methodName = "convert"+name[0].upper()+name[1:]
n = NodeTracker(node)
shape = getattr(self.shapeConverter, methodName)(n)
if shape:
self.shapeConverter.applyStyleOnShape(shape, n)
transform = n.getAttribute("transform")
display = n.getAttribute("display")
if transform and display != "none":
gr = Group()
self.shapeConverter.applyTransformOnGroup(transform, gr)
gr.add(shape)
parent.add(gr)
elif display != "none":
parent.add(shape)
self.printUnusedAttributes(node, n)
else:
if LOGMESSAGES:
print("Ignoring node: {0}".format(name))
def printUnusedAttributes(self, node, n):
allAttrs = self.attrConverter.getAllAttributes(node).keys()
unusedAttrs = []
for a in allAttrs:
if a not in n.usedAttrs:
unusedAttrs.append(a)
if self.verbose and unusedAttrs:
format = "%s-Unused: %s"
args = (" "*(self.level+1), unusedAttrs.join(", "))
#if not self.logFile:
# print format % args
#else:
# self.logFile.write((format+"\n") % args)
if LOGMESSAGES and unusedAttrs:
#print "Used attrs:", n.nodeName, n.usedAttrs
#print "All attrs:", n.nodeName, allAttrs
print("Unused attrs:", n.nodeName, unusedAttrs)
def renderTitle_(self, node):
# Main SVG title attr. could be used in the PDF document info field.
pass
def renderDesc_(self, node):
# Main SVG desc. attr. could be used in the PDF document info field.
pass
def renderSvg(self, node):
getAttr = node.getAttribute
width, height = list(map(getAttr, ("width", "height")))
width, height = list(map(self.attrConverter.convertLength, (width, height)))
viewBox = getAttr("viewBox")
print(viewBox)
if viewBox:
viewBox = self.attrConverter.convertLengthList(viewBox)
width, height = viewBox[2:4]
self.drawing = Drawing(width, height)
return self.drawing
def renderG(self, node, display=1):
getAttr = node.getAttribute
id, style, transform = list(map(getAttr, ("id", "style", "transform")))
#sw = map(getAttr, ("stroke-width",))
self.attrs = self.attrConverter.parseMultiAttributes(style)
gr = Group()
children = node.childNodes
for child in children:
if child.nodeType != 1:
continue
item = self.render(child, parent=gr)
if item and display:
gr.add(item)
if transform:
self.shapeConverter.applyTransformOnGroup(transform, gr)
return gr
def renderSymbol(self, node):
return self.renderG(node, display=0)
def renderA(self, node):
# currently nothing but a group...
# there is no linking info stored in shapes, maybe a group should?
return self.renderG(node)
def renderUse(self, node):
xlink_href = node.getAttributeNS("http://www.w3.org/1999/xlink", "href")
grp = Group()
try:
item = self.definitions[xlink_href[1:]]
grp.add(item)
transform = node.getAttribute("transform")
if transform:
self.shapeConverter.applyTransformOnGroup(transform, grp)
except KeyError:
if self.verbose and LOGMESSAGES:
print("Ignoring unavailable object width ID '{0}'.".format(xlink_href))
return grp
def finish(self):
height = self.drawing.height
self.mainGroup.scale(1, -1)
self.mainGroup.translate(0, -height)
self.drawing.add(self.mainGroup)
return self.drawing
class SvgShapeConverter:
"""An abstract SVG shape converter.
Implement subclasses with methods named 'renderX(node)', where
'X' should be the capitalised name of an SVG node element for
shapes, like 'Rect', 'Circle', 'Line', etc.
Each of these methods should return a shape object appropriate
for the target format.
"""
def __init__(self):
self.attrConverter = AttributeConverter()
self.svgSourceFile = ''
def getHandledShapes(self):
"Determine a list of handled shape elements."
items = dir(self)
items = self.__class__.__dict__.keys()
keys = []
for i in items:
keys.append(getattr(self, i))
keys = filter(lambda k:type(k) == types.MethodType, keys)
keys = list(map(lambda k:k.__name__, keys))
keys = filter(lambda k:k[:7] == "convert", keys)
keys = filter(lambda k:k != "convert", keys)
keys = list(map(lambda k:k[7:], keys))
shapeNames = [k.lower() for k in keys]
return shapeNames
class Svg2RlgShapeConverter(SvgShapeConverter):
"Converterer from SVG shapes to RLG (ReportLab Graphics) shapes."
def __init__(self):
self.attrConverter = Svg2RlgAttributeConverter()
self.svgSourceFile = ''
def convertLine(self, node):
getAttr = node.getAttribute
x1, y1, x2, y2 = list(map(getAttr, ("x1", "y1", "x2", "y2")))
x1, y1, x2, y2 = list(map(self.attrConverter.convertLength, (x1, y1, x2, y2)))
shape = Line(x1, y1, x2, y2)
return shape
def convertRect(self, node):
getAttr = node.getAttribute
x, y, width, height = list(map(getAttr, ('x', 'y', "width", "height")))
x, y, width, height = list(map(self.attrConverter.convertLength, (x, y, width, height)))
rx, ry = list(map(getAttr, ("rx", "ry")))
rx, ry = list(map(self.attrConverter.convertLength, (rx, ry)))
shape = Rect(x, y, width, height, rx=rx, ry=ry)
return shape
def convertCircle(self, node):
# not rendered if r == 0, error if r < 0.
getAttr = node.getAttribute
cx, cy, r = list(map(getAttr, ("cx", "cy", 'r')))
cx, cy, r = list(map(self.attrConverter.convertLength, (cx, cy, r)))
shape = Circle(cx, cy, r)
return shape
def convertEllipse(self, node):
getAttr = node.getAttribute
cx, cy, rx, ry = list(map(getAttr, ("cx", "cy", "rx", "ry")))
cx, cy, rx, ry = list(map(self.attrConverter.convertLength, (cx, cy, rx, ry)))
width, height = rx, ry
shape = Ellipse(cx, cy, width, height)
return shape
def convertPolyline(self, node):
getAttr = node.getAttribute
points = getAttr("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
# Need to use two shapes, because standard RLG polylines
# do not support filling...
gr = Group()
shape = Polygon(points)
self.applyStyleOnShape(shape, node)
shape.strokeColor = None
gr.add(shape)
shape = PolyLine(points)
self.applyStyleOnShape(shape, node)
gr.add(shape)
return gr
def convertPolygon(self, node):
getAttr = node.getAttribute
points = getAttr("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
shape = Polygon(points)
return shape
def convertText0(self, node):
getAttr = node.getAttribute
x, y = list(map(getAttr, ('x', 'y')))
if not x: x = '0'
if not y: y = '0'
text = ''
if node.firstChild.nodeValue:
try:
text = node.firstChild.nodeValue.encode("ASCII")
except:
text = "Unicode"
x, y = list(map(self.attrConv.convertLength, (x, y)))
shape = String(x, y, text)
self.applyStyleOnShape(shape, node)
gr = Group()
gr.add(shape)
gr.scale(1, -1)
gr.translate(0, -2*y)
return gr
def convertText(self, node):
attrConv = self.attrConverter
getAttr = node.getAttribute
x, y = list(map(getAttr, ('x', 'y')))
x, y = list(map(attrConv.convertLength, (x, y)))
gr = Group()
text = ''
chNum = len(node.childNodes)
frags = []
fragLengths = []
dx0, dy0 = 0, 0
x1, y1 = 0, 0
ff = attrConv.findAttr(node, "font-family") or "Helvetica"
ff = ff.encode("ASCII")
ff = attrConv.convertFontFamily(ff)
fs = attrConv.findAttr(node, "font-size") or "12"
fs = fs.encode("ASCII")
fs = attrConv.convertLength(fs)
for c in node.childNodes:
dx, dy = 0, 0
baseLineShift = 0
if c.nodeType == c.TEXT_NODE:
frags.append(c.nodeValue)
try:
tx = ''.join([chr(ord(f)) for f in frags[-1]])
except ValueError:
tx = "Unicode"
elif c.nodeType == c.ELEMENT_NODE and c.nodeName == "tspan":
frags.append(c.firstChild.nodeValue)
tx = ''.join([chr(ord(f)) for f in frags[-1]])
getAttr = c.getAttribute
y1 = getAttr('y')
y1 = attrConv.convertLength(y1)
dx, dy = list(map(getAttr, ("dx", "dy")))
dx, dy = list(map(attrConv.convertLength, (dx, dy)))
dx0 = dx0 + dx
dy0 = dy0 + dy
baseLineShift = getAttr("baseline-shift") or '0'
if baseLineShift in ("sub", "super", "baseline"):
baseLineShift = {"sub":-fs/2, "super":fs/2, "baseline":0}[baseLineShift]
else:
baseLineShift = attrConv.convertLength(baseLineShift, fs)
elif c.nodeType == c.ELEMENT_NODE and c.nodeName != "tspan":
continue
fragLengths.append(stringWidth(tx, ff, fs))
rl = reduce(operator.__add__, fragLengths[:-1], 0)
try:
text = ''.join([chr(ord(f)) for f in frags[-1]])
except ValueError:
text = "Unicode"
shape = String(x+rl, y-y1-dy0+baseLineShift, text)
self.applyStyleOnShape(shape, node)
if c.nodeType == c.ELEMENT_NODE and c.nodeName == "tspan":
self.applyStyleOnShape(shape, c)
gr.add(shape)
gr.scale(1, -1)
gr.translate(0, -2*y)
return gr
def convertPath(self, node):
d = node.getAttribute('d')
normPath = normaliseSvgPath(d)
pts, ops = [], []
lastMoveToOp = None
for i in xrange(0, len(normPath), 2):
op, nums = normPath[i:i+2]
# moveto, lineto absolute
if op in ('M', 'L'):
xn, yn = nums
pts = pts + [xn, yn]
if op == 'M':
ops.append(0)
lastMoveToOp = (op, xn, yn)
elif op == 'L':
ops.append(1)
# moveto, lineto relative
elif op == 'm':
xn, yn = nums
if len(pts) >= 2:
pts = pts + [pts[-2]+xn] + [pts[-1]+yn]
else:
pts = pts + [xn, yn]
if normPath[-2] in ('z', 'Z') and lastMoveToOp:
pts[-2] = xn + lastMoveToOp[-2]
pts[-1] = yn + lastMoveToOp[-1]
lastMoveToOp = (op, pts[-2], pts[-1])
if not lastMoveToOp:
lastMoveToOp = (op, xn, yn)
ops.append(0)
elif op == 'l':
xn, yn = nums
pts = pts + [pts[-2]+xn] + [pts[-1]+yn]
ops.append(1)
# horizontal/vertical line absolute
elif op in ('H', 'V'):
k = nums[0]
if op == 'H':
pts = pts + [k] + [pts[-1]]
elif op == 'V':
pts = pts + [pts[-2]] + [k]
ops.append(1)
# horizontal/vertical line relative
elif op in ('h', 'v'):
k = nums[0]
if op == 'h':
pts = pts + [pts[-2]+k] + [pts[-1]]
elif op == 'v':
pts = pts + [pts[-2]] + [pts[-1]+k]
ops.append(1)
# cubic bezier, absolute
elif op == 'C':
x1, y1, x2, y2, xn, yn = nums
pts = pts + [x1, y1, x2, y2, xn, yn]
ops.append(2)
elif op == 'S':
x2, y2, xn, yn = nums
xp, yp, x0, y0 = pts[-4:]
xi, yi = x0+(x0-xp), y0+(y0-yp)
# pts = pts + [xcp2, ycp2, x2, y2, xn, yn]
pts = pts + [xi, yi, x2, y2, xn, yn]
ops.append(2)
# cubic bezier, relative
elif op == 'c':
xp, yp = pts[-2:]
x1, y1, x2, y2, xn, yn = nums
pts = pts + [xp+x1, yp+y1, xp+x2, yp+y2, xp+xn, yp+yn]
ops.append(2)
elif op == 's':
xp, yp, x0, y0 = pts[-4:]
xi, yi = x0+(x0-xp), y0+(y0-yp)
x2, y2, xn, yn = nums
pts = pts + [xi, yi, x0+x2, y0+y2, x0+xn, y0+yn]
ops.append(2)
# quadratic bezier, absolute
elif op == 'Q':
x0, y0 = pts[-2:]
x1, y1, xn, yn = nums
xcp, ycp = x1, y1
(x0,y0), (x1,y1), (x2,y2), (xn,yn) = \
convertQuadraticToCubicPath((x0,y0), (x1,y1), (xn,yn))
pts = pts + [x1,y1, x2,y2, xn,yn]
ops.append(2)
elif op == 'T':
xp, yp, x0, y0 = pts[-4:]
xi, yi = x0+(x0-xcp), y0+(y0-ycp)
xcp, ycp = xi, yi
xn, yn = nums
(x0,y0), (x1,y1), (x2,y2), (xn,yn) = \
convertQuadraticToCubicPath((x0,y0), (xi,yi), (xn,yn))
pts = pts + [x1,y1, x2,y2, xn,yn]
ops.append(2)
# quadratic bezier, relative
elif op == 'q':
x0, y0 = pts[-2:]
x1, y1, xn, yn = nums
x1, y1, xn, yn = x0+x1, y0+y1, x0+xn, y0+yn
xcp, ycp = x1, y1
(x0,y0), (x1,y1), (x2,y2), (xn,yn) = \
convertQuadraticToCubicPath((x0,y0), (x1,y1), (xn,yn))
pts = pts + [x1,y1, x2,y2, xn,yn]
ops.append(2)
elif op == 't':
x0, y0 = pts[-2:]
xn, yn = nums
xn, yn = x0+xn, y0+yn
xi, yi = x0+(x0-xcp), y0+(y0-ycp)
xcp, ycp = xi, yi
(x0,y0), (x1,y1), (x2,y2), (xn,yn) = \
convertQuadraticToCubicPath((x0,y0), (xi,yi), (xn,yn))
pts = pts + [x1,y1, x2,y2, xn,yn]
ops.append(2)
# close path
elif op in ('Z', 'z'):
ops.append(3)
# arcs
else: #if op in unhandledOps.keys():
if LOGMESSAGES:
print("Suspicious path operator:", op)
if op in ('A', 'a'):
pts = pts + nums[-2:]
ops.append(1)
if LOGMESSAGES:
print("(Replaced with straight line)")
# hack because RLG has no "semi-closed" paths...
gr = Group()
if ops[-1] == 3:
shape1 = Path(pts, ops)
self.applyStyleOnShape(shape1, node)
fc = self.attrConverter.findAttr(node, "fill")
if not fc:
shape1.fillColor = colors.black
sc = self.attrConverter.findAttr(node, "stroke")
if not sc:
shape1.strokeColor = None
gr.add(shape1)
else:
shape1 = Path(pts, ops+[3])
self.applyStyleOnShape(shape1, node)
shape1.strokeColor = None
fc = self.attrConverter.findAttr(node, "fill")
if not fc:
shape1.fillColor = colors.black
gr.add(shape1)
shape2 = Path(pts, ops)
self.applyStyleOnShape(shape2, node)
shape2.fillColor = None
sc = self.attrConverter.findAttr(node, "stroke")
if not sc:
shape2.strokeColor = None
gr.add(shape2)
return gr
def convertImage(self, node):
if LOGMESSAGES:
print("Adding box instead image.")
getAttr = node.getAttribute
x, y, width, height = list(map(getAttr, ('x', 'y', "width", "height")))
x, y, width, height = list(map(self.attrConverter.convertLength, (x, y, width, height)))
xlink_href = node.getAttributeNS("http://www.w3.org/1999/xlink", "href")
try:
xlink_href = xlink_href.encode("ASCII")
except:
pass
xlink_href = os.path.join(os.path.dirname(self.svgSourceFile), xlink_href)
# print "***", x, y, width, height, xlink_href[:30]
magic = "data:image/jpeg;base64"
if xlink_href[:len(magic)] == magic:
pat = "data:image/(\w+?);base64"
ext = re.match(pat, magic).groups()[0]
import base64, md5
jpegData = base64.decodestring(xlink_href[len(magic):])
hashVal = md5.new(jpegData).hexdigest()
name = "images/img%s.%s" % (hashVal, ext)
path = os.path.join(dirname(self.svgSourceFile), name)
open(path, "wb").write(jpegData)
img = Image(x, y+height, width, -height, path)
# this needs to be removed later, not here...
# if exists(path): os.remove(path)
else:
xlink_href = os.path.join(os.path.dirname(self.svgSourceFile), xlink_href)
img = Image(x, y+height, width, -height, xlink_href)
return img
def applyTransformOnGroup(self, transform, group):
"""Apply an SVG transformation to a RL Group shape.
The transformation is the value of an SVG transform attribute
like transform="scale(1, -1) translate(10, 30)".
rotate(<angle> [<cx> <cy>]) is equivalent to:
translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>)
"""
tr = self.attrConverter.convertTransform(transform)
for op, values in tr:
if op == "scale":
if type(values) != types.TupleType:
values = (values, values)
apply(group.scale, values)
elif op == "translate":
try: # HOTFIX
values = values[0], values[1]
except TypeError:
return
apply(group.translate, values)
elif op == "rotate":
if type(values) != types.TupleType or len(values) == 1:
group.rotate(values)
elif len(values) == 3:
angle, cx, cy = values
group.translate(cx, cy)
group.rotate(angle)
group.translate(-cx, -cy)
elif op == "skewX":
group.skew(values, 0)
elif op == "skewY":
group.skew(0, values)
elif op == "matrix":
group.transform = values
else:
if LOGMESSAGES:
print("Ignoring transform:", op, values)
def applyStyleOnShape(self, shape, *nodes):
"Apply styles from SVG elements to an RLG shape."
# RLG-specific: all RLG shapes
"Apply style attributes of a sequence of nodes to an RL shape."
# tuple format: (svgAttr, rlgAttr, converter, default)
mappingN = (
("fill", "fillColor", "convertColor", "none"),
("stroke", "strokeColor", "convertColor", "none"),
("stroke-width", "strokeWidth", "convertLength", "0"),
("stroke-linejoin", "strokeLineJoin", "convertLineJoin", "0"),
("stroke-linecap", "strokeLineCap", "convertLineCap", "0"),
("stroke-dasharray", "strokeDashArray", "convertDashArray", "none"),
)
mappingF = (
("font-family", "fontName", "convertFontFamily", "Helvetica"),
("font-size", "fontSize", "convertLength", "12"),
("text-anchor", "textAnchor", "id", "start"),
)
ac = self.attrConverter
for node in nodes:
for mapping in (mappingN, mappingF):
if shape.__class__ != String and mapping == mappingF:
continue
for (svgAttrName, rlgAttr, func, default) in mapping:
try:
svgAttrValue = ac.findAttr(node, svgAttrName) or default
if svgAttrValue == "currentColor":
svgAttrValue = ac.findAttr(node.parentNode, "color") or default
meth = getattr(ac, func)
setattr(shape, rlgAttr, meth(svgAttrValue))
except:
pass
if shape.__class__ == String:
svgAttr = ac.findAttr(node, "fill") or "black"
setattr(shape, "fillColor", ac.convertColor(svgAttr))
def svg2rlg(path):
"Convert an SVG file to an RLG Drawing object."
# unzip .svgz file into .svg
unzipped = False
if os.path.splitext(path)[1].lower() == ".svgz":
data = gzip.GzipFile(path, "rb").read()
open(path[:-1], 'w').write(data)
path = path[:-1]
unzipped = True
# load SVG file
try:
doc = xml.dom.minidom.parse(path)
svg = doc.documentElement
except:
print("Failed to load input file!")
return
# convert to a RLG drawing
svgRenderer = SvgRenderer(path)
svgRenderer.render(svg)
drawing = svgRenderer.finish()
# remove unzipped .svgz file (.svg)
if unzipped:
os.remove(path)
return drawing
| simonv3/svglib | src/svglib/svglib.py | Python | lgpl-3.0 | 38,674 |
#!/usr/bin/env pvbatch
from paraview.simple import *
import pviz
import sys
oviz = pviz.viz(sys.argv) # instantiate viz object (and load data)
part = pviz.makeSlice( y = 0. ) # make slice
oviz.view.CameraViewUp = [0,0,1]
oviz.view.CameraPosition = [0,1,0]
ResetCamera() # auto-adapt camera to part extent
for var in part.PointData: # loop over node-centered data
varName = var.GetName() # get variable name
oviz.colorPartByVarName(part,varName,barPosition = 'right')
oviz.writeImage(varName) # save image
| ruizanthony/pviz | examples/simple/cut_y0.py | Python | lgpl-3.0 | 573 |
from prices import Price
from saleor.cart.models import Cart
from saleor.dashboard.order.forms import ChangeQuantityForm
from saleor.order import models
from saleor.order.utils import add_items_to_delivery_group
def test_total_property():
order = models.Order(total_net=20, total_tax=5)
assert order.total.gross == 25
assert order.total.tax == 5
assert order.total.net == 20
def test_total_property_empty_value():
order = models.Order(total_net=None, total_tax=None)
assert order.total is None
def test_total_setter():
price = Price(net=10, gross=20, currency='USD')
order = models.Order()
order.total = price
assert order.total_net.net == 10
assert order.total_tax.net == 10
def test_stock_allocation(billing_address, product_in_stock):
variant = product_in_stock.variants.get()
cart = Cart()
cart.save()
cart.add(variant, quantity=2)
order = models.Order.objects.create(billing_address=billing_address)
delivery_group = models.DeliveryGroup.objects.create(order=order)
add_items_to_delivery_group(delivery_group, cart.lines.all())
order_line = delivery_group.items.get()
stock = order_line.stock
assert stock.quantity_allocated == 2
def test_dashboard_change_quantity_form(request_cart_with_item, order):
cart = request_cart_with_item
group = models.DeliveryGroup.objects.create(order=order)
add_items_to_delivery_group(group, cart.lines.all())
order_line = group.items.get()
# Check available quantity validation
form = ChangeQuantityForm({'quantity': 9999},
instance=order_line)
assert not form.is_valid()
assert group.items.get().stock.quantity_allocated == 1
# Save same quantity
form = ChangeQuantityForm({'quantity': 1},
instance=order_line)
assert form.is_valid()
form.save()
assert group.items.get().stock.quantity_allocated == 1
# Increase quantity
form = ChangeQuantityForm({'quantity': 2},
instance=order_line)
assert form.is_valid()
form.save()
assert group.items.get().stock.quantity_allocated == 2
# Decrease quantity
form = ChangeQuantityForm({'quantity': 1},
instance=order_line)
assert form.is_valid()
form.save()
assert group.items.get().stock.quantity_allocated == 1
def test_order_discount(sale, order, request_cart_with_item):
cart = request_cart_with_item
group = models.DeliveryGroup.objects.create(order=order)
add_items_to_delivery_group(
group, cart.lines.all(), discounts=cart.discounts)
item = group.items.first()
assert item.get_price_per_item() == Price(currency="USD", net=5)
| HyperManTT/ECommerceSaleor | tests/test_order.py | Python | bsd-3-clause | 2,747 |
# mako/parsetree.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""defines the parse tree components for Mako templates."""
from mako import exceptions, ast, util, filters, compat
import re
class Node(object):
"""base class for a Node in the parse tree."""
def __init__(self, source, lineno, pos, filename):
self.source = source
self.lineno = lineno
self.pos = pos
self.filename = filename
@property
def exception_kwargs(self):
return {'source': self.source, 'lineno': self.lineno,
'pos': self.pos, 'filename': self.filename}
def get_children(self):
return []
def accept_visitor(self, visitor):
def traverse(node):
for n in node.get_children():
n.accept_visitor(visitor)
method = getattr(visitor, "visit" + self.__class__.__name__, traverse)
method(self)
class TemplateNode(Node):
"""a 'container' node that stores the overall collection of nodes."""
def __init__(self, filename):
super(TemplateNode, self).__init__('', 0, 0, filename)
self.nodes = []
self.page_attributes = {}
def get_children(self):
return self.nodes
def __repr__(self):
return "TemplateNode(%s, %r)" % (
util.sorted_dict_repr(self.page_attributes),
self.nodes)
class ControlLine(Node):
"""defines a control line, a line-oriented python line or end tag.
e.g.::
% if foo:
(markup)
% endif
"""
has_loop_context = False
def __init__(self, keyword, isend, text, **kwargs):
super(ControlLine, self).__init__(**kwargs)
self.text = text
self.keyword = keyword
self.isend = isend
self.is_primary = keyword in ['for', 'if', 'while', 'try', 'with']
self.nodes = []
if self.isend:
self._declared_identifiers = []
self._undeclared_identifiers = []
else:
code = ast.PythonFragment(text, **self.exception_kwargs)
self._declared_identifiers = code.declared_identifiers
self._undeclared_identifiers = code.undeclared_identifiers
def get_children(self):
return self.nodes
def declared_identifiers(self):
return self._declared_identifiers
def undeclared_identifiers(self):
return self._undeclared_identifiers
def is_ternary(self, keyword):
"""return true if the given keyword is a ternary keyword
for this ControlLine"""
return keyword in {
'if':set(['else', 'elif']),
'try':set(['except', 'finally']),
'for':set(['else'])
}.get(self.keyword, [])
def __repr__(self):
return "ControlLine(%r, %r, %r, %r)" % (
self.keyword,
self.text,
self.isend,
(self.lineno, self.pos)
)
class Text(Node):
"""defines plain text in the template."""
def __init__(self, content, **kwargs):
super(Text, self).__init__(**kwargs)
self.content = content
def __repr__(self):
return "Text(%r, %r)" % (self.content, (self.lineno, self.pos))
class Code(Node):
"""defines a Python code block, either inline or module level.
e.g.::
inline:
<%
x = 12
%>
module level:
<%!
import logger
%>
"""
def __init__(self, text, ismodule, **kwargs):
super(Code, self).__init__(**kwargs)
self.text = text
self.ismodule = ismodule
self.code = ast.PythonCode(text, **self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers
def undeclared_identifiers(self):
return self.code.undeclared_identifiers
def __repr__(self):
return "Code(%r, %r, %r)" % (
self.text,
self.ismodule,
(self.lineno, self.pos)
)
class Comment(Node):
"""defines a comment line.
# this is a comment
"""
def __init__(self, text, **kwargs):
super(Comment, self).__init__(**kwargs)
self.text = text
def __repr__(self):
return "Comment(%r, %r)" % (self.text, (self.lineno, self.pos))
class Expression(Node):
"""defines an inline expression.
${x+y}
"""
def __init__(self, text, escapes, **kwargs):
super(Expression, self).__init__(**kwargs)
self.text = text
self.escapes = escapes
self.escapes_code = ast.ArgumentList(escapes, **self.exception_kwargs)
self.code = ast.PythonCode(text, **self.exception_kwargs)
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
# TODO: make the "filter" shortcut list configurable at parse/gen time
return self.code.undeclared_identifiers.union(
self.escapes_code.undeclared_identifiers.difference(
set(filters.DEFAULT_ESCAPES.keys())
)
).difference(self.code.declared_identifiers)
def __repr__(self):
return "Expression(%r, %r, %r)" % (
self.text,
self.escapes_code.args,
(self.lineno, self.pos)
)
class _TagMeta(type):
"""metaclass to allow Tag to produce a subclass according to
its keyword"""
_classmap = {}
def __init__(cls, clsname, bases, dict):
if getattr(cls, '__keyword__', None) is not None:
cls._classmap[cls.__keyword__] = cls
super(_TagMeta, cls).__init__(clsname, bases, dict)
def __call__(cls, keyword, attributes, **kwargs):
if ":" in keyword:
ns, defname = keyword.split(':')
return type.__call__(CallNamespaceTag, ns, defname,
attributes, **kwargs)
try:
cls = _TagMeta._classmap[keyword]
except KeyError:
raise exceptions.CompileException(
"No such tag: '%s'" % keyword,
source=kwargs['source'],
lineno=kwargs['lineno'],
pos=kwargs['pos'],
filename=kwargs['filename']
)
return type.__call__(cls, keyword, attributes, **kwargs)
class Tag(compat.with_metaclass(_TagMeta, Node)):
"""abstract base class for tags.
<%sometag/>
<%someothertag>
stuff
</%someothertag>
"""
__keyword__ = None
def __init__(self, keyword, attributes, expressions,
nonexpressions, required, **kwargs):
"""construct a new Tag instance.
this constructor not called directly, and is only called
by subclasses.
:param keyword: the tag keyword
:param attributes: raw dictionary of attribute key/value pairs
:param expressions: a set of identifiers that are legal attributes,
which can also contain embedded expressions
:param nonexpressions: a set of identifiers that are legal
attributes, which cannot contain embedded expressions
:param \**kwargs:
other arguments passed to the Node superclass (lineno, pos)
"""
super(Tag, self).__init__(**kwargs)
self.keyword = keyword
self.attributes = attributes
self._parse_attributes(expressions, nonexpressions)
missing = [r for r in required if r not in self.parsed_attributes]
if len(missing):
raise exceptions.CompileException(
"Missing attribute(s): %s" %
",".join([repr(m) for m in missing]),
**self.exception_kwargs)
self.parent = None
self.nodes = []
def is_root(self):
return self.parent is None
def get_children(self):
return self.nodes
def _parse_attributes(self, expressions, nonexpressions):
undeclared_identifiers = set()
self.parsed_attributes = {}
for key in self.attributes:
if key in expressions:
expr = []
for x in re.compile(r'(\${.+?})',
re.S).split(self.attributes[key]):
m = re.compile(r'^\${(.+?)}$', re.S).match(x)
if m:
code = ast.PythonCode(m.group(1).rstrip(),
**self.exception_kwargs)
# we aren't discarding "declared_identifiers" here,
# which we do so that list comprehension-declared
# variables aren't counted. As yet can't find a
# condition that requires it here.
undeclared_identifiers = \
undeclared_identifiers.union(
code.undeclared_identifiers)
expr.append('(%s)' % m.group(1))
else:
if x:
expr.append(repr(x))
self.parsed_attributes[key] = " + ".join(expr) or repr('')
elif key in nonexpressions:
if re.search(r'\${.+?}', self.attributes[key]):
raise exceptions.CompileException(
"Attibute '%s' in tag '%s' does not allow embedded "
"expressions" % (key, self.keyword),
**self.exception_kwargs)
self.parsed_attributes[key] = repr(self.attributes[key])
else:
raise exceptions.CompileException(
"Invalid attribute for tag '%s': '%s'" %
(self.keyword, key),
**self.exception_kwargs)
self.expression_undeclared_identifiers = undeclared_identifiers
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
return self.expression_undeclared_identifiers
def __repr__(self):
return "%s(%r, %s, %r, %r)" % (self.__class__.__name__,
self.keyword,
util.sorted_dict_repr(self.attributes),
(self.lineno, self.pos),
self.nodes
)
class IncludeTag(Tag):
__keyword__ = 'include'
def __init__(self, keyword, attributes, **kwargs):
super(IncludeTag, self).__init__(
keyword,
attributes,
('file', 'import', 'args'),
(), ('file',), **kwargs)
self.page_args = ast.PythonCode(
"__DUMMY(%s)" % attributes.get('args', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
identifiers = self.page_args.undeclared_identifiers.\
difference(set(["__DUMMY"])).\
difference(self.page_args.declared_identifiers)
return identifiers.union(super(IncludeTag, self).
undeclared_identifiers())
class NamespaceTag(Tag):
__keyword__ = 'namespace'
def __init__(self, keyword, attributes, **kwargs):
super(NamespaceTag, self).__init__(
keyword, attributes,
('file',),
('name','inheritable',
'import','module'),
(), **kwargs)
self.name = attributes.get('name', '__anon_%s' % hex(abs(id(self))))
if not 'name' in attributes and not 'import' in attributes:
raise exceptions.CompileException(
"'name' and/or 'import' attributes are required "
"for <%namespace>",
**self.exception_kwargs)
if 'file' in attributes and 'module' in attributes:
raise exceptions.CompileException(
"<%namespace> may only have one of 'file' or 'module'",
**self.exception_kwargs
)
def declared_identifiers(self):
return []
class TextTag(Tag):
__keyword__ = 'text'
def __init__(self, keyword, attributes, **kwargs):
super(TextTag, self).__init__(
keyword,
attributes, (),
('filter'), (), **kwargs)
self.filter_args = ast.ArgumentList(
attributes.get('filter', ''),
**self.exception_kwargs)
def undeclared_identifiers(self):
return self.filter_args.\
undeclared_identifiers.\
difference(filters.DEFAULT_ESCAPES.keys()).union(
self.expression_undeclared_identifiers
)
class DefTag(Tag):
__keyword__ = 'def'
def __init__(self, keyword, attributes, **kwargs):
expressions = ['buffered', 'cached'] + [
c for c in attributes if c.startswith('cache_')]
super(DefTag, self).__init__(
keyword,
attributes,
expressions,
('name', 'filter', 'decorator'),
('name',),
**kwargs)
name = attributes['name']
if re.match(r'^[\w_]+$', name):
raise exceptions.CompileException(
"Missing parenthesis in %def",
**self.exception_kwargs)
self.function_decl = ast.FunctionDecl("def " + name + ":pass",
**self.exception_kwargs)
self.name = self.function_decl.funcname
self.decorator = attributes.get('decorator', '')
self.filter_args = ast.ArgumentList(
attributes.get('filter', ''),
**self.exception_kwargs)
is_anonymous = False
is_block = False
@property
def funcname(self):
return self.function_decl.funcname
def get_argument_expressions(self, **kw):
return self.function_decl.get_argument_expressions(**kw)
def declared_identifiers(self):
return self.function_decl.allargnames
def undeclared_identifiers(self):
res = []
for c in self.function_decl.defaults:
res += list(ast.PythonCode(c, **self.exception_kwargs).
undeclared_identifiers)
return set(res).union(
self.filter_args.\
undeclared_identifiers.\
difference(filters.DEFAULT_ESCAPES.keys())
).union(
self.expression_undeclared_identifiers
).difference(
self.function_decl.allargnames
)
class BlockTag(Tag):
__keyword__ = 'block'
def __init__(self, keyword, attributes, **kwargs):
expressions = ['buffered', 'cached', 'args'] + [
c for c in attributes if c.startswith('cache_')]
super(BlockTag, self).__init__(
keyword,
attributes,
expressions,
('name','filter', 'decorator'),
(),
**kwargs)
name = attributes.get('name')
if name and not re.match(r'^[\w_]+$',name):
raise exceptions.CompileException(
"%block may not specify an argument signature",
**self.exception_kwargs)
if not name and attributes.get('args', None):
raise exceptions.CompileException(
"Only named %blocks may specify args",
**self.exception_kwargs
)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
self.name = name
self.decorator = attributes.get('decorator', '')
self.filter_args = ast.ArgumentList(
attributes.get('filter', ''),
**self.exception_kwargs)
is_block = True
@property
def is_anonymous(self):
return self.name is None
@property
def funcname(self):
return self.name or "__M_anon_%d" % (self.lineno, )
def get_argument_expressions(self, **kw):
return self.body_decl.get_argument_expressions(**kw)
def declared_identifiers(self):
return self.body_decl.allargnames
def undeclared_identifiers(self):
return (self.filter_args.\
undeclared_identifiers.\
difference(filters.DEFAULT_ESCAPES.keys())
).union(self.expression_undeclared_identifiers)
class CallTag(Tag):
__keyword__ = 'call'
def __init__(self, keyword, attributes, **kwargs):
super(CallTag, self).__init__(keyword, attributes,
('args'), ('expr',), ('expr',), **kwargs)
self.expression = attributes['expr']
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.allargnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers.\
difference(self.code.declared_identifiers)
class CallNamespaceTag(Tag):
def __init__(self, namespace, defname, attributes, **kwargs):
super(CallNamespaceTag, self).__init__(
namespace + ":" + defname,
attributes,
tuple(attributes.keys()) + ('args', ),
(),
(),
**kwargs)
self.expression = "%s.%s(%s)" % (
namespace,
defname,
",".join(["%s=%s" % (k, v) for k, v in
self.parsed_attributes.items()
if k != 'args'])
)
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(
attributes.get('args', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.allargnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers.\
difference(self.code.declared_identifiers)
class InheritTag(Tag):
__keyword__ = 'inherit'
def __init__(self, keyword, attributes, **kwargs):
super(InheritTag, self).__init__(
keyword, attributes,
('file',), (), ('file',), **kwargs)
class PageTag(Tag):
__keyword__ = 'page'
def __init__(self, keyword, attributes, **kwargs):
expressions = ['cached', 'args', 'expression_filter', 'enable_loop'] + [
c for c in attributes if c.startswith('cache_')]
super(PageTag, self).__init__(
keyword,
attributes,
expressions,
(),
(),
**kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
self.filter_args = ast.ArgumentList(
attributes.get('expression_filter', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return self.body_decl.allargnames
| sunze/py_flask | venv/lib/python3.4/site-packages/mako/parsetree.py | Python | mit | 20,434 |
# Grayscale Binary Filter Example
#
# This script shows off the binary image filter. You may pass binary any
# number of thresholds to segment the image by.
import sensor, image, time
sensor.reset()
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.skip_frames(time = 2000)
clock = time.clock()
low_threshold = (0, 50)
high_threshold = (205, 255)
while(True):
# Test low threshold
for i in range(100):
clock.tick()
img = sensor.snapshot()
img.binary([low_threshold])
print(clock.fps())
# Test high threshold
for i in range(100):
clock.tick()
img = sensor.snapshot()
img.binary([high_threshold])
print(clock.fps())
# Test not low threshold
for i in range(100):
clock.tick()
img = sensor.snapshot()
img.binary([low_threshold], invert = 1)
print(clock.fps())
# Test not high threshold
for i in range(100):
clock.tick()
img = sensor.snapshot()
img.binary([high_threshold], invert = 1)
print(clock.fps())
| kwagyeman/openmv | scripts/examples/OpenMV/04-Image-Filters/grayscale_binary_filter.py | Python | mit | 1,098 |
# test if eval raises SyntaxError
try:
print(eval("[1, *a]"))
except SyntaxError:
print("SyntaxError")
| rubencabrera/micropython | tests/basics/builtin_eval_error.py | Python | mit | 112 |
# -*- python -*-
# Copyright (C) 2009-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/cbuild/slaves/oorts/crosstool-ng/builds/arm-none-eabi-win32/install/share/gcc-4.8.3/python'
libdir = '/cbuild/slaves/oorts/crosstool-ng/builds/arm-none-eabi-win32/install/arm-none-eabi/lib/thumb/v7ve/simdvfpv4/softfp'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
| darth-vader-lg/glcncrpi | tools/arm-bcm2708/gcc-linaro-arm-none-eabi-4.8-2014.04/arm-none-eabi/lib/thumb/v7ve/simdvfpv4/softfp/libstdc++.a-gdb.py | Python | gpl-3.0 | 2,462 |
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
SPELL=u'tàixī'
CN=u'太溪'
NAME=u'taixi41'
CHANNEL='kidney'
CHANNEL_FULLNAME='KidneyChannelofFoot-Shaoyin'
SEQ='KI3'
if __name__ == '__main__':
pass
| sinotradition/meridian | meridian/acupoints/taixi41.py | Python | apache-2.0 | 228 |
# Copyright (C) 2010-2017 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from optparse import make_option
from django.core.management.base import CommandError
from snf_django.management.commands import SynnefoCommand
from synnefo.management import common
from synnefo.logic import ips
from snf_django.lib.api import Credentials
class Command(SynnefoCommand):
help = "Allocate a new floating IP"
option_list = SynnefoCommand.option_list + (
make_option(
'--network',
dest='network_id',
help="The ID of the network to allocate the address from"),
make_option(
'--address',
dest='address',
help="The address to be allocated"),
make_option(
'--user',
dest='user',
default=None,
help='The owner of the floating IP'),
make_option("--project", dest="project",
help="Unique identifier of the project of the floating IP"),
)
@common.convert_api_faults
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
network_id = options['network_id']
address = options['address']
user = options['user']
project = options['project']
if not user:
raise CommandError("'user' is required for floating IP creation")
if network_id is not None:
network = common.get_resource("network", network_id)
if network.deleted:
raise CommandError("Network '%s' is deleted" % network.id)
if not network.floating_ip_pool:
raise CommandError("Network '%s' is not a floating IP pool."
% network)
credentials = Credentials(user)
floating_ip = ips.create_floating_ip(credentials,
project=project,
network_id=network_id,
address=address)
self.stdout.write("Created floating IP '%s'.\n" % floating_ip)
| grnet/synnefo | snf-cyclades-app/synnefo/logic/management/commands/floating-ip-create.py | Python | gpl-3.0 | 2,746 |
from fuzzconfig import FuzzConfig
import nonrouting
import fuzzloops
import re
cfg = FuzzConfig(job="REGCFG", device="LIFCL-40", sv="../shared/empty_40.v", tiles=["R2C2:PLC"])
def main():
cfg.setup()
empty = cfg.build_design(cfg.sv, {})
cfg.sv = "ff.v"
def per_slice(slicen):
for r in range(2):
def get_substs(regset="SET", sel="DL", lsrmode="LSR", srmode="LSR_OVER_CE", gsr="DISABLED", mux="", used="", arc=""):
return dict(z=slicen, k=str(r), mux=mux, regset=regset,
sel=sel, lsrmode=lsrmode, srmode=srmode, gsr=gsr, used=used, arc=arc)
def get_used_substs(used):
u = ""
arc = ""
if used == "YES":
u = ", .Q{}(q) ".format(r)
arc = "R2C2_JQ{}.R2C2_JQ{}_SLICE{}".format(str(("ABCD".index(slicen)*2)+r), r, slicen)
return get_substs(used=u, arc=arc)
def get_ddr_substs(ddr):
return get_substs(mux="REGDDR:{}".format(ddr))
def get_clkmux_substs(mux):
if mux == "CLK":
cm = "CLK:::CLK=#SIG"
elif mux == "INV":
cm = "CLK:::CLK=#INV"
elif mux == "1":
cm = "CONST:::CONST=1"
elif mux == "0":
cm = "#OFF"
elif mux == "DDR":
return get_substs(mux="REGDDR:ENABLED")
return get_substs(mux="CLKMUX:{}".format(cm))
def get_cemux_substs(mux):
if mux == "CE":
cm = "CE:::CE=#SIG"
elif mux == "INV":
cm = "CE:::CE=#INV"
return get_substs(mux="CEMUX:{}".format(cm))
def get_lsrmux_substs(mux):
if mux == "LSR":
cm = "LSR:::CE=#SIG"
elif mux == "INV":
cm = "LSR:::LSR=#INV"
elif mux == "0":
cm = "CONST:::CONST=0"
return get_substs(mux="LSRMUX:{}".format(cm))
nonrouting.fuzz_enum_setting(cfg, empty, "SLICE{}.REG{}.USED".format(slicen, r), ["YES", "NO"],
lambda x: get_used_substs(x), False,
desc="`YES` if SLICE {} register {} (Q{}) is used".format(slicen, r, r))
nonrouting.fuzz_enum_setting(cfg, empty, "SLICE{}.REG{}.REGSET".format(slicen, r), ["RESET", "SET"],
lambda x: get_substs(regset=x), True,
desc="SLICE {} register {} set/reset and init value".format(slicen, r))
nonrouting.fuzz_enum_setting(cfg, empty, "SLICE{}.REG{}.SEL".format(slicen, r), ["DL", "DF"],
lambda x: get_substs(sel=x), True,
desc="SLICE {} register {} data selection. `DL`=LUT output, `DF`=bypass (M{})".format(slicen, r, r))
nonrouting.fuzz_enum_setting(cfg, empty, "SLICE{}.REG{}.LSRMODE".format(slicen, r), ["LSR", "PRLD"],
lambda x: get_substs(lsrmode=x), True)
h = "A/B" if slicen in ("A", "B") else "C/D"
nonrouting.fuzz_enum_setting(cfg, empty, "SLICE{}.GSR".format(slicen, r), ["ENABLED", "DISABLED"],
lambda x: get_substs(gsr=x), False,
desc="if `ENABLED`, then FFs in SLICE {} are set/reset by user GSR signal".format(h))
nonrouting.fuzz_enum_setting(cfg, empty, "SLICE{}.SRMODE".format(slicen, r), ["ASYNC", "LSR_OVER_CE"],
lambda x: get_substs(srmode=x), False,
desc="selects asynchronous set/reset, or sync set/reset which overrides CE for FFs in SLICE {}".format(h))
nonrouting.fuzz_enum_setting(cfg, empty, "SLICE{}.REGDDR".format(slicen, r), ["ENABLED", "DISABLED"],
lambda x: get_ddr_substs(x), False,
desc="if ENABLED then FFs in SLICE {} are clocked by both edges of the clock".format(h))
nonrouting.fuzz_enum_setting(cfg, empty, "SLICE{}.CLKMUX".format(slicen, r), ["CLK", "INV", "0", "DDR"],
lambda x: get_clkmux_substs(x), False,
desc="selects clock polarity")
nonrouting.fuzz_enum_setting(cfg, empty, "SLICE{}.CEMUX".format(slicen, r), ["CE", "INV"],
lambda x: get_cemux_substs(x), False,
desc="selects clock enable polarity")
nonrouting.fuzz_enum_setting(cfg, empty, "SLICE{}.LSRMUX".format(slicen, r), ["LSR", "INV", "0"],
lambda x: get_lsrmux_substs(x), False,
desc="selects set/reset gating and inversion")
fuzzloops.parallel_foreach(["A", "B", "C", "D"], per_slice)
if __name__ == "__main__":
main()
| gatecat/prjoxide | fuzzers/LIFCL/011-reg-config/fuzzer.py | Python | isc | 4,624 |
"""Provides helper classes for testing option handling in pip
"""
import os
from pip._internal.cli import cmdoptions
from pip._internal.cli.base_command import Command
from pip._internal.commands import commands_dict
class FakeCommand(Command):
name = 'fake'
summary = name
def main(self, args):
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.add_option_group(index_opts)
return self.parse_args(args)
class AddFakeCommandMixin(object):
def setup(self):
self.environ_before = os.environ.copy()
commands_dict[FakeCommand.name] = FakeCommand
def teardown(self):
os.environ = self.environ_before
commands_dict.pop(FakeCommand.name)
| techtonik/pip | tests/lib/options_helpers.py | Python | mit | 792 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# sale_contractmanagement
# (C) 2014 big-consulting GmbH
# (C) 2014 OpenGlobe
# Author: Thorsten Vocks (openBIG.org)
# Author: Mikołaj Dziurzyński, Grzegorz Grzelak (OpenGlobe)
#
# All Rights reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Contracts for Software and Media Sales",
"version": "0.05 (8.0)",
"author": "openbig.org",
"website": "http://www.openbig.org",
"category": "CRM, Project, Sale",
"description": """
Sell products and manage the contract
=====================================
This module adds some functionality to clean up the
usability of contract maintenance. The purpose is to
forward sales order lines of licensed or subscribed
products to projects to create, apply or extend a
contract directly from a task.
Create tasks from order lines
-----------------------------
On confirmation of a sale order line Odoo creates automatically
a task in the defined project. This basic functionality from
odoo core was extended to forward all required informations
to create in a very short time a proper contract.
Furthermore it allows a smooth followup process to bill,
to distribute license and digital media or to apply SLA
terms and conditions.
Contract Creation
-----------------
Contract managers may open generated tasks to create directly
from this view a new contract or to assign an existing contract.
The automatically assigned contract templates apply product or
sale order related data like customer or contract
contact / contract ownwer, start and end dates, recurrent
billing and reminder options.
Contracts in sales menu
------------------------
After successfull contract creation they are visible
to the salespeople under the sales application menu
"Contracts", which is also a Odoo core functionality.
From this menu it is also possible to create or to work
on existing contracts. This may be the case, if
prolongations have to be sold or if salespeople needs
informations from the contract, like f.e. license keys
files or SLA terms and conditions.
Extensions
==========
By other modules it is possible to extend the modules
functionality on demand, f.e. by the module product_digital_media.
It is also possible to apply SLA terms and conditions by the
module project_sla from Daniel Reis under maintenance of
the Odoo Community Association.
Version Vistory
===============
# version 0.02:
* when creating a contract, the customer is added to followers of it -> usage with email tab on partner form\n
# version 0.03:
* New Contract button is now visible from task form (aside from sale order line form)
* Only customer related contracts under Contracts menu in sale_service
* Button allowing to go from project to its analytic account is now only visible to administrator (Settings right)
# version 0.04:
* Basic license management functionality
# version 0.05:
* Sale Contracts button on partner form
Contributors
============
* Thorsten Vocks (OpenBIG.org)
* Mikołaj Dziurzyński (OpenBIG.org)
""",
"depends": [
"account_analytic_analysis",
"project_sla",
"sale_service",
"product_digital_media",
],
"demo_xml": [],
"data": [
'data/email_template.xml',
'res_config_view.xml',
'wizard/res_license_update_wizard.xml',
'res_license.xml',
'res_partner_view.xml',
"account_invoice_view.xml",
"sale_contractmanagement_view.xml",
"product_view.xml",
"sale_view.xml",
'security/ir.model.access.csv',
],
"active": False,
"license": "AGPL-3",
"installable": True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| openbig/odoo-contract | sale_contractmanagement/__openerp__.py | Python | agpl-3.0 | 4,630 |
import sys
if sys.version_info[:2] == (2, 6): # pragma: no cover
import unittest2 as unittest
else: # pragma: no cover
import unittest
class Test_asbool(unittest.TestCase):
def _callFUT(self, s):
from waitress.adjustments import asbool
return asbool(s)
def test_s_is_None(self):
result = self._callFUT(None)
self.assertEqual(result, False)
def test_s_is_True(self):
result = self._callFUT(True)
self.assertEqual(result, True)
def test_s_is_False(self):
result = self._callFUT(False)
self.assertEqual(result, False)
def test_s_is_true(self):
result = self._callFUT('True')
self.assertEqual(result, True)
def test_s_is_false(self):
result = self._callFUT('False')
self.assertEqual(result, False)
def test_s_is_yes(self):
result = self._callFUT('yes')
self.assertEqual(result, True)
def test_s_is_on(self):
result = self._callFUT('on')
self.assertEqual(result, True)
def test_s_is_1(self):
result = self._callFUT(1)
self.assertEqual(result, True)
class TestAdjustments(unittest.TestCase):
def _makeOne(self, **kw):
from waitress.adjustments import Adjustments
return Adjustments(**kw)
def test_goodvars(self):
inst = self._makeOne(
host='host',
port='8080',
threads='5',
trusted_proxy='192.168.1.1',
url_scheme='https',
backlog='20',
recv_bytes='200',
send_bytes='300',
outbuf_overflow='400',
inbuf_overflow='500',
connection_limit='1000',
cleanup_interval='1100',
channel_timeout='1200',
log_socket_errors='true',
max_request_header_size='1300',
max_request_body_size='1400',
expose_tracebacks='true',
ident='abc',
asyncore_loop_timeout='5',
asyncore_use_poll=True,
unix_socket='/tmp/waitress.sock',
unix_socket_perms='777',
url_prefix='///foo/',
)
self.assertEqual(inst.host, 'host')
self.assertEqual(inst.port, 8080)
self.assertEqual(inst.threads, 5)
self.assertEqual(inst.trusted_proxy, '192.168.1.1')
self.assertEqual(inst.url_scheme, 'https')
self.assertEqual(inst.backlog, 20)
self.assertEqual(inst.recv_bytes, 200)
self.assertEqual(inst.send_bytes, 300)
self.assertEqual(inst.outbuf_overflow, 400)
self.assertEqual(inst.inbuf_overflow, 500)
self.assertEqual(inst.connection_limit, 1000)
self.assertEqual(inst.cleanup_interval, 1100)
self.assertEqual(inst.channel_timeout, 1200)
self.assertEqual(inst.log_socket_errors, True)
self.assertEqual(inst.max_request_header_size, 1300)
self.assertEqual(inst.max_request_body_size, 1400)
self.assertEqual(inst.expose_tracebacks, True)
self.assertEqual(inst.asyncore_loop_timeout, 5)
self.assertEqual(inst.asyncore_use_poll, True)
self.assertEqual(inst.ident, 'abc')
self.assertEqual(inst.unix_socket, '/tmp/waitress.sock')
self.assertEqual(inst.unix_socket_perms, 0o777)
self.assertEqual(inst.url_prefix, '/foo')
def test_badvar(self):
self.assertRaises(ValueError, self._makeOne, nope=True)
class TestCLI(unittest.TestCase):
def parse(self, argv):
from waitress.adjustments import Adjustments
return Adjustments.parse_args(argv)
def test_noargs(self):
opts, args = self.parse([])
self.assertDictEqual(opts, {'call': False, 'help': False})
self.assertSequenceEqual(args, [])
def test_help(self):
opts, args = self.parse(['--help'])
self.assertDictEqual(opts, {'call': False, 'help': True})
self.assertSequenceEqual(args, [])
def test_call(self):
opts, args = self.parse(['--call'])
self.assertDictEqual(opts, {'call': True, 'help': False})
self.assertSequenceEqual(args, [])
def test_both(self):
opts, args = self.parse(['--call', '--help'])
self.assertDictEqual(opts, {'call': True, 'help': True})
self.assertSequenceEqual(args, [])
def test_positive_boolean(self):
opts, args = self.parse(['--expose-tracebacks'])
self.assertDictContainsSubset({'expose_tracebacks': 'true'}, opts)
self.assertSequenceEqual(args, [])
def test_negative_boolean(self):
opts, args = self.parse(['--no-expose-tracebacks'])
self.assertDictContainsSubset({'expose_tracebacks': 'false'}, opts)
self.assertSequenceEqual(args, [])
def test_cast_params(self):
opts, args = self.parse([
'--host=localhost',
'--port=80',
'--unix-socket-perms=777'
])
self.assertDictContainsSubset({
'host': 'localhost',
'port': '80',
'unix_socket_perms':'777',
}, opts)
self.assertSequenceEqual(args, [])
def test_bad_param(self):
import getopt
self.assertRaises(getopt.GetoptError, self.parse, ['--no-host'])
| grepme/CMPUT410Lab01 | virt_env/virt1/lib/python2.7/site-packages/waitress-0.8.9-py2.7.egg/waitress/tests/test_adjustments.py | Python | apache-2.0 | 5,260 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
Used for process original data.
Created on Wed May 18 14:06:37 2016
@author: yzj
"""
# Core Library modules
import sys
ALPHABET = "ACGT"
class Seq:
def __init__(self, name, seq, no):
self.name = name
self.seq = seq.upper()
self.no = no
self.length = len(seq)
def __str__(self):
"""Output seq when 'print' method is called."""
return "%s\tNo:%s\tlength:%s\n%s" % (
self.name,
str(self.no),
str(self.length),
self.seq,
)
def IsUnderAlphabet(s, alphabet):
"""
#################################################################
Judge the string is within the scope of the alphabet or not.
:param s: The string.
:param alphabet: alphabet.
Return True or the error character.
#################################################################
"""
for e in s:
if e not in alphabet:
return e
return True
def IsFasta(seq):
"""
#################################################################
Judge the Seq object is in FASTA format.
Two situation:
1. No seq name.
2. Seq name is illegal.
3. No sequence.
:param seq: Seq object.
#################################################################
"""
if not seq.name:
error_info = "Error, sequence " + str(seq.no) + " has no sequence name."
print(seq)
sys.stderr.write(error_info)
return False
if -1 != seq.name.find(">"):
error_info = "Error, sequence " + str(seq.no) + " name has > character."
sys.stderr.write(error_info)
return False
if 0 == seq.length:
error_info = "Error, sequence " + str(seq.no) + " is null."
sys.stderr.write(error_info)
return False
return True
def ReadFasta(f):
"""
#################################################################
Read a fasta file.
:param f: HANDLE to input. e.g. sys.stdin, or open(<file>)
Return Seq obj list.
#################################################################
"""
name, seq = "", ""
count = 0
seq_list = []
lines = f.readlines()
for line in lines:
if not line:
break
if ">" == line[0]:
if 0 != count or (0 == count and seq != ""):
if IsFasta(Seq(name, seq, count)):
seq_list.append(Seq(name, seq, count))
else:
sys.exit(0)
seq = ""
name = line[1:].strip()
count += 1
else:
seq += line.strip()
count += 1
if IsFasta(Seq(name, seq, count)):
seq_list.append(Seq(name, seq, count))
else:
sys.exit(0)
return seq_list
def ReadFastaYield(f):
"""
#################################################################
Yields a Seq object.
:param f: HANDLE to input. e.g. sys.stdin, or open(<file>)
#################################################################
"""
name, seq = "", ""
count = 0
while True:
line = f.readline()
if not line:
break
if ">" == line[0]:
if 0 != count or (0 == count and seq != ""):
if IsFasta(Seq(name, seq, count)):
yield Seq(name, seq, count)
else:
sys.exit(0)
seq = ""
name = line[1:].strip()
count += 1
else:
seq += line.strip()
if IsFasta(Seq(name, seq, count)):
yield Seq(name, seq, count)
else:
sys.exit(0)
def ReadFastaCheckDna(f):
"""
#################################################################
Read the fasta file, and check its legality.
:param f: HANDLE to input. e.g. sys.stdin, or open(<file>)
Return the seq list.
#################################################################
"""
seq_list = []
for e in ReadFastaYield(f):
# print e
res = IsUnderAlphabet(e.seq, ALPHABET)
if res:
seq_list.append(e)
else:
error_info = (
"Sorry, sequence "
+ str(e.no)
+ " has character "
+ str(res)
+ ".(The character must be A or C or G or T)"
)
sys.stderr(error_info)
sys.exit(0)
return seq_list
def GetSequenceCheckDna(f):
"""
#################################################################
Read the fasta file.
Input: f: HANDLE to input. e.g. sys.stdin, or open(<file>)
Return the sequence list.
#################################################################
"""
sequence_list = []
for e in ReadFastaYield(f):
# print e
res = IsUnderAlphabet(e.seq, ALPHABET)
if res is not True:
error_info = (
"Sorry, sequence "
+ str(e.no)
+ " has character "
+ str(res)
+ ".(The character must be A, C, G or T)"
)
sys.stderr.write(error_info)
sys.exit(0)
else:
sequence_list.append(e.seq)
return sequence_list
def IsSequenceList(sequence_list):
"""
#################################################################
Judge the sequence list is within the scope of alphabet and
change the lowercase to capital.
#################################################################
"""
count = 0
new_sequence_list = []
for e in sequence_list:
e = e.upper()
count += 1
res = IsUnderAlphabet(e, ALPHABET)
if res is not True:
error_info = (
"Sorry, sequence "
+ str(count)
+ " has illegal character "
+ str(res)
+ ".(The character must be A, C, G or T)"
)
sys.stderr.write(error_info)
return False
else:
new_sequence_list.append(e)
return new_sequence_list
def GetData(input_data, desc=False):
"""
#################################################################
Get sequence data from file or list with check.
:param input_data: type file or list
:param desc: with this option, the return value will be a Seq object list(it only works in file object).
:return: sequence data or shutdown.
#################################################################
"""
if hasattr(input_data, "read"):
if desc is False:
return GetSequenceCheckDna(input_data)
else:
return ReadFastaCheckDna(input_data)
elif isinstance(input_data, list):
input_data = IsSequenceList(input_data)
if input_data is not False:
return input_data
else:
sys.exit(0)
else:
error_info = (
"Sorry, the parameter in get_data method must be list or file type."
)
sys.stderr.write(error_info)
sys.exit(0)
# Some basic function for generate feature vector
def Frequency(tol_str, tar_str):
"""
#################################################################
Generate the frequency of tar_str in tol_str.
:param tol_str: mother string.
:param tar_str: substring.
#################################################################
"""
i, j, tar_count = 0, 0, 0
len_tol_str = len(tol_str)
len_tar_str = len(tar_str)
while i < len_tol_str and j < len_tar_str:
if tol_str[i] == tar_str[j]:
i += 1
j += 1
if j >= len_tar_str:
tar_count += 1
i = i - j + 1
j = 0
else:
i = i - j + 1
j = 0
return tar_count
def WriteLibsvm(vector_list, label_list, write_file):
"""
#################################################################
Write the vector into disk in libSVM format.
#################################################################
"""
len_vector_list = len(vector_list)
len_label_list = len(label_list)
if len_vector_list == 0:
sys.stderr.write("The vector is none.")
sys.exit(1)
if len_label_list == 0:
sys.stderr.write("The label is none.")
sys.exit(1)
if len_vector_list != len_label_list:
sys.stderr.write("The length of vector and label is different.")
sys.exit(1)
with open(write_file, "w") as f:
len_vector = len(vector_list[0])
for i in range(len_vector_list):
temp_write = str(label_list[i])
for j in range(0, len_vector):
temp_write += " " + str(j + 1) + ":" + str(vector_list[i][j])
f.write(temp_write)
f.write("\n")
def GeneratePhycheValue(
k, phyche_index=None, all_property=False, extra_phyche_index=None
):
"""
#################################################################
Combine the user selected phyche_list, is_all_property and
extra_phyche_index to a new standard phyche_value.
#################################################################
"""
if phyche_index is None:
phyche_index = []
if extra_phyche_index is None:
extra_phyche_index = {}
diphyche_list = [
"Base stacking",
"Protein induced deformability",
"B-DNA twist",
"Dinucleotide GC Content",
"A-philicity",
"Propeller twist",
"Duplex stability:(freeenergy)",
"Duplex tability(disruptenergy)",
"DNA denaturation",
"Bending stiffness",
"Protein DNA twist",
"Stabilising energy of Z-DNA",
"Aida_BA_transition",
"Breslauer_dG",
"Breslauer_dH",
"Breslauer_dS",
"Electron_interaction",
"Hartman_trans_free_energy",
"Helix-Coil_transition",
"Ivanov_BA_transition",
"Lisser_BZ_transition",
"Polar_interaction",
"SantaLucia_dG",
"SantaLucia_dH",
"SantaLucia_dS",
"Sarai_flexibility",
"Stability",
"Stacking_energy",
"Sugimoto_dG",
"Sugimoto_dH",
"Sugimoto_dS",
"Watson-Crick_interaction",
"Twist",
"Tilt",
"Roll",
"Shift",
"Slide",
"Rise",
]
triphyche_list = [
"Dnase I",
"Bendability (DNAse)",
"Bendability (consensus)",
"Trinucleotide GC Content",
"Nucleosome positioning",
"Consensus_roll",
"Consensus-Rigid",
"Dnase I-Rigid",
"MW-Daltons",
"MW-kg",
"Nucleosome",
"Nucleosome-Rigid",
]
# Set and check physicochemical properties.
if 2 == k:
if all_property is True:
phyche_index = diphyche_list
else:
for e in phyche_index:
if e not in diphyche_list:
error_info = (
"Sorry, the physicochemical properties " + e + " is not exit."
)
import sys
sys.stderr.write(error_info)
sys.exit(0)
elif 3 == k:
if all_property is True:
phyche_index = triphyche_list
else:
for e in phyche_index:
if e not in triphyche_list:
error_info = (
"Sorry, the physicochemical properties " + e + " is not exit."
)
import sys
sys.stderr.write(error_info)
sys.exit(0)
# Generate phyche_value.
from PyBioMed.PyDNA.PyDNApsenacutil import GetPhycheIndex, ExtendPhycheIndex
return ExtendPhycheIndex(GetPhycheIndex(k, phyche_index), extra_phyche_index)
def ConvertPhycheIndexToDict(phyche_index):
"""
#################################################################
Convert phyche index from list to dict.
#################################################################
"""
# for e in phyche_index:
# print e
len_index_value = len(phyche_index[0])
k = 0
for i in range(1, 10):
if len_index_value < 4 ** i:
error_infor = "Sorry, the number of each index value is must be 4^k."
sys.stdout.write(error_infor)
sys.exit(0)
if len_index_value == 4 ** i:
k = i
break
from PyBioMed.PyDNA.PyDNAnacutil import MakeKmerList
kmer_list = MakeKmerList(k, ALPHABET)
# print kmer_list
len_kmer = len(kmer_list)
phyche_index_dict = {}
for kmer in kmer_list:
phyche_index_dict[kmer] = []
# print phyche_index_dict
phyche_index = list(zip(*phyche_index))
for i in range(len_kmer):
phyche_index_dict[kmer_list[i]] = list(phyche_index[i])
return phyche_index_dict
def StandardDeviation(value_list):
"""
#################################################################
Return standard deviation.
#################################################################
"""
from math import sqrt
from math import pow
n = len(value_list)
average_value = sum(value_list) * 1.0 / n
return sqrt(sum([pow(e - average_value, 2) for e in value_list]) * 1.0 / (n - 1))
def NormalizeIndex(phyche_index, is_convert_dict=False):
"""
#################################################################
Normalize the physicochemical index.
#################################################################
"""
normalize_phyche_value = []
for phyche_value in phyche_index:
average_phyche_value = sum(phyche_value) * 1.0 / len(phyche_value)
sd_phyche = StandardDeviation(phyche_value)
normalize_phyche_value.append(
[round((e - average_phyche_value) / sd_phyche, 2) for e in phyche_value]
)
if is_convert_dict is True:
return ConvertPhycheIndexToDict(normalize_phyche_value)
return normalize_phyche_value
def DNAChecks(s):
for e in s:
if e not in ALPHABET:
return e
return True
if __name__ == "__main__":
re = ["GACTGAACTGCACTTTGGTTTCATATTATTTGCTC"]
phyche_index = [
[
1.019,
-0.918,
0.488,
0.567,
0.567,
-0.070,
-0.579,
0.488,
-0.654,
-2.455,
-0.070,
-0.918,
1.603,
-0.654,
0.567,
1.019,
]
]
print(NormalizeIndex(phyche_index, is_convert_dict=False)[0])
| gadsbyfly/PyBioMed | PyBioMed/PyPretreat/PyPretreatDNA.py | Python | bsd-3-clause | 15,091 |
import os
import pytest
from stingray.simulator import transfer
class TestSimulator(object):
@classmethod
def setup_class(self):
arr = [[1 for j in range(5)] for i in range(10)]
self.transfer = transfer.TransferFunction(arr)
def test_incorrect_rows(self):
"""Test if exception is raised in case there is 1
or no row.
"""
arr = [[0 for j in range(5)] for i in range(1)]
with pytest.raises(ValueError):
transfer.TransferFunction(arr)
def test_incorrect_columns(self):
"""Test if exception is raised in case there is 1
or no column.
"""
arr = [[0 for j in range(1)] for i in range(10)]
with pytest.raises(ValueError):
transfer.TransferFunction(arr)
def test_time_response(self):
"""Test obtaining a time-resolved response."""
self.transfer.time_response()
def test_time_response_with_energy_ranges(self):
self.transfer.time_response(e0=3.3, e1=4.7)
def test_time_response_with_incorrect_ranges(self):
"""Test that incorrect energy ranges raises a
Value Error.
"""
with pytest.raises(ValueError):
self.transfer.time_response(e0=-1, e1=2)
with pytest.raises(ValueError):
self.transfer.time_response(e0=3, e1=12)
with pytest.raises(ValueError):
self.transfer.time_response(e0=3.1, e1=3.2)
def test_energy_response(self):
"""Test obtaining an energy-resolved response."""
self.transfer.energy_response()
def test_plot_with_incorrect_type(self):
with pytest.raises(ValueError):
self.transfer.plot('unsupported')
def test_plot_time(self):
self.transfer.plot(response='time')
def test_plot_energy(self):
self.transfer.plot(response='energy')
def test_plot_2d(self):
self.transfer.plot(response='2d')
def test_plot_with_save(self):
self.transfer.plot(save=True)
os.remove('out.png')
def test_plot_with_filename(self):
self.transfer.plot(save=True, filename='response.png')
os.remove('response.png')
def test_io_with_pickle(self):
self.transfer.write('transfer.pickle', format_='pickle')
tr = self.transfer.read('transfer.pickle', format_='pickle')
assert (tr.data == self.transfer.data).all()
os.remove('transfer.pickle')
def test_io_with_unsupported_type(self):
with pytest.raises(KeyError):
self.transfer.write('transfer', format_='unsupported')
self.transfer.write('transfer', format_='pickle')
with pytest.raises(KeyError):
self.transfer.read('transfer', format_='unsupported')
os.remove('transfer')
def test_simple_ir(self):
"""Test constructing a simple impulse response."""
t0, w = 100, 500
assert len(transfer.simple_ir(1, t0, w)), (t0+w)
def test_relativistic_ir(self):
"""
Test constructing a relativistic impulse response."""
t1, t3 = 3, 10
assert len(transfer.relativistic_ir(1, t1=t1, t3=t3)), (t1+t3)
| abigailStev/stingray | stingray/simulator/tests/test_transfer.py | Python | mit | 3,165 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from mock import Mock, patch
from sentry.models import Group
from sentry.testutils import TestCase
from sentry.tasks.post_process import (
post_process_group, record_affected_user, record_affected_code,
record_additional_tags
)
class PostProcessGroupTest(TestCase):
@patch('sentry.tasks.post_process.record_affected_code')
@patch('sentry.rules.processor.RuleProcessor.apply', Mock(return_value=[]))
def test_record_affected_code(self, mock_record_affected_code):
group = self.create_group(project=self.project)
event = self.create_event(group=group)
with self.settings(SENTRY_ENABLE_EXPLORE_CODE=False):
post_process_group(
event=event,
is_new=True,
is_regression=False,
is_sample=False,
)
assert not mock_record_affected_code.delay.called
with self.settings(SENTRY_ENABLE_EXPLORE_CODE=True):
post_process_group(
event=event,
is_new=True,
is_regression=False,
is_sample=False,
)
mock_record_affected_code.delay.assert_called_once_with(
event=event,
)
@patch('sentry.tasks.post_process.record_affected_user')
@patch('sentry.tasks.post_process.record_affected_code', Mock())
@patch('sentry.rules.processor.RuleProcessor.apply', Mock(return_value=[]))
def test_record_affected_user(self, mock_record_affected_user):
group = self.create_group(project=self.project)
event = self.create_event(group=group)
with self.settings(SENTRY_ENABLE_EXPLORE_USERS=False):
post_process_group(
event=event,
is_new=True,
is_regression=False,
is_sample=False,
)
assert not mock_record_affected_user.delay.called
with self.settings(SENTRY_ENABLE_EXPLORE_USERS=True):
post_process_group(
event=event,
is_new=True,
is_regression=False,
is_sample=False,
)
mock_record_affected_user.delay.assert_called_once_with(
event=event,
)
@patch('sentry.tasks.post_process.record_affected_user', Mock())
@patch('sentry.tasks.post_process.record_affected_code', Mock())
@patch('sentry.rules.processor.RuleProcessor')
def test_rule_processor(self, mock_processor):
group = self.create_group(project=self.project)
event = self.create_event(group=group)
mock_callback = Mock()
mock_futures = [Mock()]
mock_processor.return_value.apply.return_value = [
(mock_callback, mock_futures),
]
post_process_group(
event=event,
is_new=True,
is_regression=False,
is_sample=False,
)
mock_processor.assert_called_once_with(event, True, False, False)
mock_processor.return_value.apply.assert_called_once_with()
mock_callback.assert_called_once_with(event, mock_futures)
class RecordAffectedUserTest(TestCase):
def test_simple(self):
event = Group.objects.from_kwargs(1, message='foo', **{
'sentry.interfaces.User': {
'email': '[email protected]',
},
})
with patch.object(Group.objects, 'add_tags') as add_tags:
record_affected_user(event=event)
add_tags.assert_called_once_with(event.group, [
('sentry:user', 'email:[email protected]', {
'email': '[email protected]',
})
])
class RecordAdditionalTagsTest(TestCase):
def test_simple(self):
# TODO(dcramer): this test ideally would actually test that tags get
# added
event = Group.objects.from_kwargs(1, message='foo', **{
'sentry.interfaces.User': {
'email': '[email protected]',
},
})
with patch.object(Group.objects, 'add_tags') as add_tags:
record_additional_tags(event=event)
assert not add_tags.called
class RecordAffectedCodeTest(TestCase):
def test_simple(self):
event = Group.objects.from_kwargs(1, message='foo', **{
'sentry.interfaces.Exception': {
'values': [{
'type': 'TypeError',
'value': 'test',
'stacktrace': {
'frames': [{
'function': 'bar',
'filename': 'foo.py',
'in_app': True,
}],
},
}],
},
})
with patch.object(Group.objects, 'add_tags') as add_tags:
record_affected_code(event=event)
add_tags.assert_called_once_with(event.group, [
('sentry:filename', '1effb24729ae4c43efa36b460511136a', {
'filename': 'foo.py',
}),
('sentry:function', '7823c20ad591da0bbb78d083c118609c', {
'filename': 'foo.py',
'function': 'bar',
})
])
| llonchj/sentry | tests/sentry/tasks/post_process/tests.py | Python | bsd-3-clause | 5,333 |
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('show_repository')
@click.argument("toolShed_id", type=str)
@pass_context
@custom_exception
@json_output
def cli(ctx, toolShed_id):
"""Get details of a given Tool Shed repository as it is installed on this Galaxy instance.
Output:
Information about the tool
For example::
{'changeset_revision': 'b17455fb6222',
'ctx_rev': '8',
'owner': 'aaron',
'status': 'Installed',
'url': '/api/tool_shed_repositories/82de4a4c7135b20a'}
.. versionchanged:: 0.4.1
Changed method name from ``show_tool`` to ``show_repository`` to
better align with the Tool Shed concepts
"""
return ctx.gi.toolshed.show_repository(toolShed_id)
| galaxy-iuc/parsec | parsec/commands/toolshed/show_repository.py | Python | apache-2.0 | 875 |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.web.http_api.metadata.json import JSONSerializer
class JSONPSerializer(JSONSerializer):
"""Add prefix."""
_mime = 'application/javascript'
def _execute(self, results):
return "// fetched from Indico\n%s(%s);" % \
(self._query_params.get('jsonp', 'read'),
super()._execute(results))
| pferreir/indico | indico/web/http_api/metadata/jsonp.py | Python | mit | 563 |
"""This script finds all '.properties-MERGED' files and writes relative path, key, and value to a CSV file.
This script requires the python libraries: gitpython, jproperties, pyexcel-xlsx, xlsxwriter and pyexcel. As a
consequence, it also requires git >= 1.7.0 and python >= 3.4. This script relies on fetching 'HEAD' from current
branch. So make sure repo is on correct branch (i.e. develop).
"""
import sys
from envutil import get_proj_dir
from excelutil import write_results_to_xlsx
from gitutil import get_property_file_entries, get_commit_id, get_git_root
from csvutil import write_results_to_csv
import argparse
from outputtype import OutputType
from propentry import convert_to_output
def main():
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(description='Gathers all key-value pairs within .properties-MERGED files into '
'one file.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(dest='output_path', type=str, help='The path to the output file. The output path should be'
' specified as a relative path with the dot slash notation '
'(i.e. \'./outputpath.xlsx\') or an absolute path.')
parser.add_argument('-r', '--repo', dest='repo_path', type=str, required=False,
help='The path to the repo. If not specified, path of script is used.')
parser.add_argument('-o', '--output-type', dest='output_type', type=OutputType, choices=list(OutputType),
required=False, help="The output type. Currently supports 'csv' or 'xlsx'.", default='xlsx')
parser.add_argument('-nc', '--no-commit', dest='no_commit', action='store_true', default=False,
required=False, help="Suppresses adding commits to the generated header.")
parser.add_argument('-nt', '--no-translated-col', dest='no_translated_col', action='store_true', default=False,
required=False, help="Don't include a column for translation.")
args = parser.parse_args()
repo_path = args.repo_path if args.repo_path is not None else get_git_root(get_proj_dir())
output_path = args.output_path
show_commit = not args.no_commit
output_type = args.output_type
translated_col = not args.no_translated_col
commit_id = get_commit_id(repo_path, 'HEAD') if show_commit else None
processing_result = convert_to_output(get_property_file_entries(repo_path), commit_id, translated_col)
# based on https://stackoverflow.com/questions/60208/replacements-for-switch-statement-in-python
{
OutputType.csv: write_results_to_csv,
OutputType.xlsx: write_results_to_xlsx
}[output_type](processing_result, output_path)
sys.exit(0)
if __name__ == "__main__":
main()
| eugene7646/autopsy | release_scripts/localization_scripts/allbundlesscript.py | Python | apache-2.0 | 2,950 |
# coding=utf-8
"""Test cases for Zinnia's feeds"""
try:
from urllib.parse import urljoin
except ImportError: # Python 2
from urlparse import urljoin
from django.test import TestCase
from django.utils import timezone
from django.utils.encoding import smart_text
from django.contrib.sites.models import Site
from django.utils.translation import activate
from django.utils.translation import deactivate
from django.test.utils import override_settings
from django.core.files.base import ContentFile
from django.utils.feedgenerator import Atom1Feed
from django.utils.feedgenerator import DefaultFeed
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.storage import default_storage
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.tests.utils import skipIfCustomUser
import django_comments as comments
from tagging.models import Tag
from zinnia.managers import HIDDEN
from zinnia.managers import PUBLISHED
from zinnia.models.entry import Entry
from zinnia.models.author import Author
from zinnia.tests.utils import datetime
from zinnia.tests.utils import urlEqual
from zinnia.models.category import Category
from zinnia.flags import PINGBACK, TRACKBACK
from zinnia import feeds
from zinnia.feeds import EntryFeed
from zinnia.feeds import ZinniaFeed
from zinnia.feeds import LatestEntries
from zinnia.feeds import CategoryEntries
from zinnia.feeds import AuthorEntries
from zinnia.feeds import TagEntries
from zinnia.feeds import SearchEntries
from zinnia.feeds import EntryDiscussions
from zinnia.feeds import EntryComments
from zinnia.feeds import EntryPingbacks
from zinnia.feeds import EntryTrackbacks
from zinnia.feeds import LatestDiscussions
from zinnia.signals import disconnect_entry_signals
from zinnia.signals import disconnect_discussion_signals
@skipIfCustomUser
@override_settings(
ROOT_URLCONF='zinnia.tests.implementations.urls.default'
)
class FeedsTestCase(TestCase):
"""Test cases for the Feed classes provided"""
def setUp(self):
disconnect_entry_signals()
disconnect_discussion_signals()
activate('en')
self.site = Site.objects.get_current()
self.author = Author.objects.create(username='admin',
first_name='Root',
last_name='Bloody',
email='[email protected]')
self.category = Category.objects.create(title='Tests', slug='tests')
self.entry_ct_id = ContentType.objects.get_for_model(Entry).pk
def tearDown(self):
deactivate()
def create_published_entry(self):
params = {'title': 'My test entry',
'content': 'My test content with image '
'<img src="/image.jpg" />',
'slug': 'my-test-entry',
'tags': 'tests',
'creation_date': datetime(2010, 1, 1, 12),
'status': PUBLISHED}
entry = Entry.objects.create(**params)
entry.sites.add(self.site)
entry.categories.add(self.category)
entry.authors.add(self.author)
return entry
def create_discussions(self, entry):
comment = comments.get_model().objects.create(
comment='My Comment',
user=self.author,
user_name='admin',
content_object=entry,
site=self.site,
submit_date=timezone.now())
pingback = comments.get_model().objects.create(
comment='My Pingback',
user=self.author,
content_object=entry,
site=self.site,
submit_date=timezone.now())
pingback.flags.create(user=self.author, flag=PINGBACK)
trackback = comments.get_model().objects.create(
comment='My Trackback',
user=self.author,
content_object=entry,
site=self.site,
submit_date=timezone.now())
trackback.flags.create(user=self.author, flag=TRACKBACK)
return [comment, pingback, trackback]
def test_entry_feed(self):
entry = self.create_published_entry()
feed = EntryFeed()
self.assertEqual(feed.item_pubdate(entry), entry.creation_date)
self.assertEqual(feed.item_updateddate(entry), entry.last_update)
self.assertEqual(feed.item_categories(entry), [self.category.title])
self.assertEqual(feed.item_author_name(entry),
self.author.__str__())
self.assertEqual(feed.item_author_email(entry), self.author.email)
self.assertEqual(
feed.item_author_link(entry),
'http://example.com/authors/%s/' % self.author.username)
# Test a NoReverseMatch for item_author_link
self.author.username = '[]'
self.author.save()
feed.item_author_name(entry)
self.assertEqual(feed.item_author_link(entry), 'http://example.com')
def test_entry_feed_enclosure(self):
entry = self.create_published_entry()
feed = EntryFeed()
self.assertEqual(
feed.item_enclosure_url(entry), 'http://example.com/image.jpg')
self.assertEqual(feed.item_enclosure_length(entry), '100000')
self.assertEqual(feed.item_enclosure_mime_type(entry), 'image/jpeg')
entry.content = 'My test content with image <img src="image.jpg" />'
entry.save()
self.assertEqual(
feed.item_enclosure_url(entry), 'http://example.com/image.jpg')
self.assertEqual(feed.item_enclosure_length(entry), '100000')
self.assertEqual(feed.item_enclosure_mime_type(entry), 'image/jpeg')
entry.content = 'My test content with image ' \
'<img src="http://test.com/image.jpg" />'
entry.save()
self.assertEqual(
feed.item_enclosure_url(entry), 'http://test.com/image.jpg')
self.assertEqual(feed.item_enclosure_length(entry), '100000')
self.assertEqual(feed.item_enclosure_mime_type(entry), 'image/jpeg')
path = default_storage.save('enclosure.png', ContentFile('Content'))
entry.image = path
entry.save()
self.assertEqual(feed.item_enclosure_url(entry),
urljoin('http://example.com', entry.image.url))
self.assertEqual(feed.item_enclosure_length(entry), '7')
self.assertEqual(feed.item_enclosure_mime_type(entry), 'image/png')
default_storage.delete(path)
entry.image = 'invalid_image_without_extension'
entry.save()
self.assertEqual(feed.item_enclosure_url(entry),
urljoin('http://example.com', entry.image.url))
self.assertEqual(feed.item_enclosure_length(entry), '100000')
self.assertEqual(feed.item_enclosure_mime_type(entry), 'image/jpeg')
def test_entry_feed_enclosure_issue_134(self):
entry = self.create_published_entry()
feed = EntryFeed()
entry.content = 'My test content with image <img xsrc="image.jpg" />'
entry.save()
self.assertEqual(
feed.item_enclosure_url(entry), None)
def test_latest_entries(self):
self.create_published_entry()
feed = LatestEntries()
self.assertEqual(feed.link(), '/')
self.assertEqual(len(feed.items()), 1)
self.assertEqual(feed.get_title(None), 'Latest entries')
self.assertEqual(
feed.description(),
'The latest entries on the site example.com')
def test_category_entries(self):
self.create_published_entry()
feed = CategoryEntries()
self.assertEqual(feed.get_object('request', '/tests/'), self.category)
self.assertEqual(len(feed.items(self.category)), 1)
self.assertEqual(feed.link(self.category), '/categories/tests/')
self.assertEqual(
feed.get_title(self.category),
'Entries for the category %s' % self.category.title)
self.assertEqual(
feed.description(self.category),
'The latest entries categorized under %s' % self.category.title)
self.category.description = 'Category description'
self.assertEqual(feed.description(self.category),
'Category description')
def test_category_title_non_ascii(self):
self.create_published_entry()
self.category.title = smart_text('Catégorie')
self.category.save()
feed = CategoryEntries()
self.assertEqual(feed.get_title(self.category),
'Entries for the category %s' % self.category.title)
self.assertEqual(
feed.description(self.category),
'The latest entries categorized under %s' % self.category.title)
def test_author_entries(self):
self.create_published_entry()
feed = AuthorEntries()
self.assertEqual(feed.get_object('request', 'admin'), self.author)
self.assertEqual(len(feed.items(self.author)), 1)
self.assertEqual(feed.link(self.author), '/authors/admin/')
self.assertEqual(feed.get_title(self.author),
'Entries for the author %s' %
self.author.__str__())
self.assertEqual(feed.description(self.author),
'The latest entries by %s' %
self.author.__str__())
def test_author_title_non_ascii(self):
self.author.first_name = smart_text('Léon')
self.author.last_name = 'Bloom'
self.author.save()
self.create_published_entry()
feed = AuthorEntries()
self.assertEqual(feed.get_title(self.author),
smart_text('Entries for the author %s' %
self.author.__str__()))
self.assertEqual(feed.description(self.author),
smart_text('The latest entries by %s' %
self.author.__str__()))
def test_tag_entries(self):
self.create_published_entry()
feed = TagEntries()
tag = Tag(name='tests')
self.assertEqual(feed.get_object('request', 'tests').name, 'tests')
self.assertEqual(len(feed.items('tests')), 1)
self.assertEqual(feed.link(tag), '/tags/tests/')
self.assertEqual(feed.get_title(tag),
'Entries for the tag %s' % tag.name)
self.assertEqual(feed.description(tag),
'The latest entries tagged with %s' % tag.name)
def test_tag_title_non_ascii(self):
entry = self.create_published_entry()
tag_unicode = smart_text('accentué')
entry.tags = tag_unicode
entry.save()
feed = TagEntries()
tag = Tag(name=tag_unicode)
self.assertEqual(feed.get_title(tag),
'Entries for the tag %s' % tag_unicode)
self.assertEqual(feed.description(tag),
'The latest entries tagged with %s' % tag_unicode)
def test_search_entries(self):
class FakeRequest:
def __init__(self, val):
self.GET = {'pattern': val}
self.create_published_entry()
feed = SearchEntries()
self.assertRaises(ObjectDoesNotExist,
feed.get_object, FakeRequest('te'))
self.assertEqual(feed.get_object(FakeRequest('test')), 'test')
self.assertEqual(len(feed.items('test')), 1)
self.assertEqual(feed.link('test'), '/search/?pattern=test')
self.assertEqual(feed.get_title('test'),
"Search results for '%s'" % 'test')
self.assertEqual(
feed.description('test'),
"The entries containing the pattern '%s'" % 'test')
def test_latest_discussions(self):
entry = self.create_published_entry()
self.create_discussions(entry)
feed = LatestDiscussions()
self.assertEqual(feed.link(), '/')
self.assertEqual(len(feed.items()), 3)
self.assertEqual(feed.get_title(None), 'Latest discussions')
self.assertEqual(
feed.description(),
'The latest discussions on the site example.com')
def test_entry_discussions(self):
entry = self.create_published_entry()
comments = self.create_discussions(entry)
feed = EntryDiscussions()
self.assertEqual(feed.get_object(
'request', 2010, 1, 1, entry.slug), entry)
self.assertEqual(feed.link(entry), '/2010/01/01/my-test-entry/')
self.assertEqual(len(feed.items(entry)), 3)
self.assertEqual(feed.item_pubdate(comments[0]),
comments[0].submit_date)
self.assertEqual(feed.item_link(comments[0]),
'/comments/cr/%i/%i/#c%i' %
(self.entry_ct_id, entry.pk, comments[0].pk))
self.assertEqual(feed.item_author_name(comments[0]),
self.author.__str__())
self.assertEqual(feed.item_author_email(comments[0]),
'[email protected]')
self.assertEqual(feed.item_author_link(comments[0]), '')
self.assertEqual(feed.get_title(entry),
'Discussions on %s' % entry.title)
self.assertEqual(
feed.description(entry),
'The latest discussions on the entry %s' % entry.title)
def test_feed_for_hidden_entry_issue_277(self):
entry = self.create_published_entry()
entry.status = HIDDEN
entry.save()
feed = EntryDiscussions()
self.assertEqual(feed.get_object(
'request', 2010, 1, 1, entry.slug), entry)
@override_settings(USE_TZ=False)
def test_feed_discussions_no_timezone_issue_277(self):
entry = self.create_published_entry()
entry.creation_date = datetime(2014, 1, 1, 23)
entry.save()
feed = EntryDiscussions()
self.assertEqual(feed.get_object(
'request', 2014, 1, 1, entry.slug), entry)
@override_settings(USE_TZ=True, TIME_ZONE='Europe/Paris')
def test_feed_discussions_with_timezone_issue_277(self):
entry = self.create_published_entry()
entry.creation_date = datetime(2014, 1, 1, 23)
entry.save()
feed = EntryDiscussions()
self.assertEqual(feed.get_object(
'request', 2014, 1, 2, entry.slug), entry)
def test_entry_comments(self):
entry = self.create_published_entry()
comments = self.create_discussions(entry)
feed = EntryComments()
self.assertEqual(list(feed.items(entry)), [comments[0]])
self.assertEqual(feed.item_link(comments[0]),
'/comments/cr/%i/%i/#comment-%i-by-admin' %
(self.entry_ct_id, entry.pk, comments[0].pk))
self.assertEqual(feed.get_title(entry),
'Comments on %s' % entry.title)
self.assertEqual(
feed.description(entry),
'The latest comments on the entry %s' % entry.title)
self.assertTrue(urlEqual(
feed.item_enclosure_url(comments[0]),
'http://www.gravatar.com/avatar/e64c7d89f26b'
'd1972efa854d13d7dd61?s=80&r=g'))
self.assertEqual(feed.item_enclosure_length(entry), '100000')
self.assertEqual(feed.item_enclosure_mime_type(entry), 'image/jpeg')
def test_entry_pingbacks(self):
entry = self.create_published_entry()
comments = self.create_discussions(entry)
feed = EntryPingbacks()
self.assertEqual(list(feed.items(entry)), [comments[1]])
self.assertEqual(feed.item_link(comments[1]),
'/comments/cr/%i/%i/#pingback-%i' %
(self.entry_ct_id, entry.pk, comments[1].pk))
self.assertEqual(feed.get_title(entry),
'Pingbacks on %s' % entry.title)
self.assertEqual(
feed.description(entry),
'The latest pingbacks on the entry %s' % entry.title)
def test_entry_trackbacks(self):
entry = self.create_published_entry()
comments = self.create_discussions(entry)
feed = EntryTrackbacks()
self.assertEqual(list(feed.items(entry)), [comments[2]])
self.assertEqual(feed.item_link(comments[2]),
'/comments/cr/%i/%i/#trackback-%i' %
(self.entry_ct_id, entry.pk, comments[2].pk))
self.assertEqual(feed.get_title(entry),
'Trackbacks on %s' % entry.title)
self.assertEqual(
feed.description(entry),
'The latest trackbacks on the entry %s' % entry.title)
def test_entry_feed_no_authors(self):
entry = self.create_published_entry()
entry.authors.clear()
feed = EntryFeed()
self.assertEqual(feed.item_author_name(entry), None)
def test_entry_feed_rss_or_atom(self):
original_feeds_format = feeds.FEEDS_FORMAT
feeds.FEEDS_FORMAT = ''
feed = LatestEntries()
self.assertEqual(feed.feed_type, DefaultFeed)
feeds.FEEDS_FORMAT = 'atom'
feed = LatestEntries()
self.assertEqual(feed.feed_type, Atom1Feed)
self.assertEqual(feed.subtitle, feed.description)
feeds.FEEDS_FORMAT = original_feeds_format
def test_title_with_sitename_implementation(self):
feed = ZinniaFeed()
self.assertRaises(NotImplementedError, feed.title)
feed = LatestEntries()
self.assertEqual(feed.title(), 'example.com - Latest entries')
def test_discussion_feed_with_same_slugs(self):
"""
https://github.com/Fantomas42/django-blog-zinnia/issues/104
OK, Here I will reproduce the original case: getting a discussion
type feed, with a same slug.
The correction of this case, will need some changes in the
get_object method.
"""
entry = self.create_published_entry()
feed = EntryDiscussions()
self.assertEqual(feed.get_object(
'request', 2010, 1, 1, entry.slug), entry)
params = {'title': 'My test entry, part II',
'content': 'My content ',
'slug': 'my-test-entry',
'tags': 'tests',
'creation_date': datetime(2010, 2, 1, 12),
'status': PUBLISHED}
entry_same_slug = Entry.objects.create(**params)
entry_same_slug.sites.add(self.site)
entry_same_slug.authors.add(self.author)
self.assertEqual(feed.get_object(
'request', 2010, 2, 1, entry_same_slug.slug), entry_same_slug)
| Maplecroft/django-blog-zinnia | zinnia/tests/test_feeds.py | Python | bsd-3-clause | 18,700 |
# TODO: Setup
| mjkmoynihan/ReleaseRadar | setup.py | Python | apache-2.0 | 15 |
import abc
import string
import numpy as np
class BaseConstituent(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def speed(self, astro):
pass
@abc.abstractmethod
def V(self, astro):
pass
@abc.abstractmethod
def u(self, astro):
pass
@abc.abstractmethod
def f(self, astro):
pass
def __init__(self, name):
self.name = name
def astro_xdo(self, a):
return [a['T+h-s'], a['s'], a['h'], a['p'], a['N'], a['pp'], a['90']]
def astro_speeds(self, a):
return np.array([each.speed for each in self.astro_xdo(a)])
def astro_values(self, a):
return np.array([each.value for each in self.astro_xdo(a)])
# Consider two out of phase constituents which travel at the same speed to
# be identical
def __eq__(self, c):
return np.all(self.coefficients[:-1] == c.coefficients[:-1])
def __hash__(self):
return hash(tuple(self.coefficients[:-1]))
| sealevelresearch/pytides | pytides/constituent/base_constituent.py | Python | mit | 993 |
# ebscopy __init__
import os
import logging
from ebscopy import *
log_levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL,
}
if os.environ.get('EDS_LOG_LEVEL') in log_levels.keys():
log_level = log_levels[os.environ.get('EDS_LOG_LEVEL')]
else:
log_level = logging.WARNING
logging.basicConfig(
filename='/tmp/ebscopy-%s.log' % (os.getpid()),
level=log_level,
format='%(asctime)s %(levelname)s %(module)s.%(funcName)s: %(message)s'
)
#EOF
| jessejensen/ebscopy | ebscopy/__init__.py | Python | gpl-3.0 | 629 |
from nose.tools import *
from git_orm.testcases import GitTestCase
from git_orm import transaction, GitError
class TestException(Exception): pass
class TestTransaction(GitTestCase):
def test_commit(self):
transaction.begin()
trans = transaction.current()
trans.set_blob(['foo'], 'bar'.encode('utf-8'))
trans.add_message('foobar')
self.assert_commit_count(0)
transaction.commit()
self.assert_commit_count(1)
self.assert_file_exists('foo')
def test_commit_nochanges(self):
transaction.begin()
self.assert_commit_count(0)
assert_raises(GitError, transaction.commit)
self.assert_commit_count(0)
transaction.rollback()
def test_commmit_nomessage(self):
transaction.begin()
trans = transaction.current()
trans.set_blob(['foo'], 'bar'.encode('utf-8'))
self.assert_commit_count(0)
assert_raises(GitError, transaction.commit)
self.assert_commit_count(0)
transaction.rollback()
def test_rollback(self):
def _foo():
with transaction.wrap() as trans:
trans.set_blob(['foo'], 'bar'.encode('utf-8'))
raise TestException()
assert_raises(TestException, _foo)
self.assert_commit_count(0)
def test_current(self):
assert_raises(GitError, transaction.current)
with transaction.wrap():
transaction.current()
assert_raises(GitError, transaction.current)
class TestTransactionWrap(GitTestCase):
def test_wrap(self):
with transaction.wrap() as trans:
trans.set_blob(['foo'], 'bar'.encode('utf-8'))
trans.add_message('foobar')
self.assert_commit_count(0)
self.assert_commit_count(1)
self.assert_file_exists('foo')
def test_nochanges(self):
with transaction.wrap():
pass
self.assert_commit_count(0)
def test_exception(self):
def _foo():
with transaction.wrap() as trans:
trans.set_blob(['foo'], 'bar'.encode('utf-8'))
self.assert_commit_count(0)
raise TestException()
assert_raises(TestException, _foo)
self.assert_commit_count(0)
class TestTransactionBlob(GitTestCase):
def test_set_get(self):
with transaction.wrap() as trans:
trans.add_message('dummy')
trans.set_blob(['foo'], 'bar'.encode('utf-8'))
eq_(trans.get_blob(['foo']).decode('utf-8'), 'bar')
self.assert_file_exists('foo')
def test_set_get_separate_transactions(self):
with transaction.wrap() as trans:
trans.add_message('dummy')
trans.set_blob(['foo'], 'bar'.encode('utf-8'))
self.assert_file_exists('foo')
with transaction.wrap() as trans:
eq_(trans.get_blob(['foo']).decode('utf-8'), 'bar')
self.assert_file_exists('foo')
def test_exists(self):
with transaction.wrap() as trans:
trans.add_message('dummy')
trans.set_blob(['foo'], 'bar'.encode('utf-8'))
ok_(trans.exists(['foo']))
def test_exists_separate_transaction(self):
with transaction.wrap() as trans:
trans.set_blob(['foo'], 'bar'.encode('utf-8'))
trans.add_message('dummy')
with transaction.wrap() as trans:
ok_(trans.exists(['foo']))
def test_list_blobs(self):
with transaction.wrap() as trans:
trans.add_message('dummy')
trans.set_blob(['foo'], 'bar'.encode('utf-8'))
eq_(trans.list_blobs([]), set(['foo']))
def test_list_blobs_separate_transaction(self):
with transaction.wrap() as trans:
trans.set_blob(['foo'], 'bar'.encode('utf-8'))
trans.add_message('dummy')
with transaction.wrap() as trans:
eq_(trans.list_blobs([]), set(['foo']))
| natano/python-git-orm | git_orm/tests/test_transaction.py | Python | isc | 3,962 |
import random
# generate a random bit order
# you'll need to save this mapping permanently, perhaps just hardcode it
# map how ever many bits you need to represent your integer space
mapping = range(34)
mapping.reverse()
# alphabet for changing from base 10
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# shuffle the bits
def encode(n):
result = 0
for i, b in enumerate(mapping):
b1 = 1 << i
b2 = 1 << b
if n & b1:
result |= b2
return enbase(result)
# unshuffle the bits
def decode(n):
result = 0
for i, b in enumerate(mapping):
b1 = 1 << i
b2 = 1 << b
if n & b2:
result |= b1
return debase(result)
# change the base
def enbase(x):
n = len(chars)
if x < n:
return chars[x]
return enbase(x/n) + chars[x%n]
# go back to base 10
def debase(x):
n = len(chars)
result = 0
for i, c in enumerate(reversed(x)):
result += chars.index(c) * (n**i)
return result
| klanestro/vortaro | words/tools.py | Python | gpl-3.0 | 1,001 |
import ConfigParser
import requests
import datetime
import pytz
import pika
import sys
import json
from pymongo import MongoClient
integration_name = 'venmo'
access_token = ''
venmo_id = ''
g_user_id = ''
connection = None
channel = None
def setup_pika():
global connection
global channel
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
def wait_for_response():
print "Waiting for response from slack " + integration_name
channel.basic_consume(callback, queue=integration_name, no_ack=True, consumer_tag=integration_name)
channel.start_consuming()
def send_message(user, message):
payload = {}
payload['user'] = user
payload['message'] = message
channel.basic_publish(exchange='',routing_key='input',body=json.dumps(payload))
print "Sending message to service"
print "\nMessage: " + json.dumps(payload)
def send_message_and_await_response(user, message):
send_message(user, message)
wait_for_response()
def send_message_and_exit(user, message):
send_message(user, message)
connection.close()
def cleanup():
print 'running cleanup'
try:
print 'trying to delete queue'
channel.queue_delete(queue=integration_name)
except:
print 'failed delete'
try:
print 'trying to close connection from failed delete queue'
connection.close()
except:
print 'failed failed close'
print 'returning cleanup'
return
print 'suceeded close'
print 'returning cleanup'
return
try:
print 'trying to close connection'
connection.close()
except:
print 'failed close'
return
print 'exiting cleanup'
# This is what you do when you get back a response
def callback(ch, method, properties, body):
channel.stop_consuming()
body_dict = json.loads(body)
parse_message(body_dict['message'])
# Connects to mongo and returns a MongoClient
def connect_to_mongo():
credentials = ConfigParser.ConfigParser()
credentials.read("../credentials.ini")
host = credentials.get("Mongo", "connection")
user = credentials.get("Mongo", "user")
password = credentials.get("Mongo", "password")
db = credentials.get("Mongo", "database")
connection_url = "mongodb://" + user + ":" + password + "@" + host + "/" + db + "?authSource=admin"
client = MongoClient(connection_url)
return client[db]
def update_database(user_id, db, access_token, expires_date, refresh_token):
return db.users.update_one({'_id': user_id},
{'$set': {
'venmo': {
'access_token': access_token,
'expires_in': expires_date,
'refresh_token': refresh_token
}
},
'$currentDate': {'lastModified': True}
})
def get_access_token(user_id):
config = ConfigParser.ConfigParser()
config.read('../credentials.ini')
db = connect_to_mongo()
venmo_auth = db.users.find_one({'_id': user_id}, {'venmo': 1})
if (venmo_auth == None or venmo_auth['venmo']['access_token'] == ''):
user_doc = db.users.find_one({'_id': user_id})
if (user_doc == None):
create_user_doc = db.users.insert_one({'_id': user_id})
create_venmo_auth = update_database(user_id, db, '', '', '')
auth_url = 'https://api.venmo.com/v1/oauth/authorize?client_id=' + config.get('Venmo', 'clientId') + '&scope=make_payments%20access_payment_history%20access_feed%20access_profile%20access_email%20access_phone%20access_balance%20access_friends&response_type=code'
url_message = ('Authenticate to Venmo with the following URL: ' + auth_url + ' then send back the auth code in this format\n'
'venmo code CODE')
send_message_and_await_response(user_id, url_message)
print 'returning from get_access'
return None
else:
expires_date = venmo_auth['venmo']['expires_in'].replace(tzinfo = pytz.utc)
if (expires_date < datetime.datetime.utcnow().replace(tzinfo = pytz.utc)):
post_data = {
'client_id': config.get('Venmo', 'clientId'),
'client_secret': config.get('Venmo', 'clientSecret'),
'refresh_token': venmo_auth['venmo']['refresh_token']
}
response = requests.post('https://api.venmo.com/v1/oauth/access_token', post_data)
response_dict = response.json()
access_token = response_dict['access_token']
expires_in = response_dict['expires_in']
expires_date = (datetime.datetime.utcnow().replace(tzinfo = pytz.utc) + datetime.timedelta(seconds=expires_in))
update_database(user_id, db, access_token, expires_date, response_dict['refresh_token'])
return access_token
return venmo_auth['venmo']['access_token']
def complete_auth(code):
config = ConfigParser.ConfigParser()
config.read('../credentials.ini')
db = connect_to_mongo()
post_data = {
'client_id': config.get('Venmo', 'clientId'),
'client_secret': config.get('Venmo', 'clientSecret'),
'code': code
}
response = requests.post('https://api.venmo.com/v1/oauth/access_token', post_data)
response_dict = response.json()
access_token = response_dict['access_token']
expires_in = response_dict['expires_in']
expires_date = (datetime.datetime.utcnow().replace(tzinfo = pytz.utc) + datetime.timedelta(seconds=expires_in))
refresh_token = response_dict['refresh_token']
global g_user_id
update_access_token = update_database(g_user_id, db, access_token, expires_date, refresh_token)
send_message_and_exit(g_user_id, 'Authentication complete!')
print 'completed auth'
def _get_venmo_id():
global access_token
response = requests.get('http://api.venmo.com/v1/me?access_token=' + access_token)
response_dict = response.json()
if ('error' in response_dict):
venmo_error(response_dict['error'])
global venmo_id
venmo_id = response_dict['data']['user']['id']
def _get_pagination(initial, access_token):
final_list = []
while True:
final_list += initial['data']
if (not initial['pagination'] or initial['pagination']['next'] == None):
break
else:
response = requests.get(initial['pagination']['next'] + '&access_token=' + access_token)
response_dict = response.json()
if ('error' in response_dict):
venmo_error(response_dict['error'])
initial = response_dict
return final_list
def _find_friend(list, username):
for friend in list:
if (friend['username'].lower() == username.lower()):
return friend['id']
return None
def get_venmo_balance():
global access_token
response = requests.get('https://api.venmo.com/v1/me?access_token=' + access_token)
response_dict = response.json()
if ('error' in response_dict):
venmo_error(response_dict['error'])
global g_user_id
send_message_and_exit(g_user_id, response_dict['data']['balance'])
def venmo_payment(audience, which, amount, note, recipients):
global access_token
global venmo_id
url = 'https://api.venmo.com/v1/payments'
amount_str = str(amount)
if (which == 'charge'):
amount_str = '-' + amount_str
friends_response = requests.get('https://api.venmo.com/v1/users/' + venmo_id + '/friends?access_token=' + access_token)
friends_response_dict = friends_response.json()
if ('error' in friends_response_dict):
venmo_error(friends_response_dict['error'])
full = _get_pagination(friends_response_dict, access_token)
final_message = ''
for r in recipients:
post_data = {
'access_token': access_token
}
if r.startswith('phone:'):
id = r[6:]
post_data['phone'] = id
elif r.startswith('email:'):
id = r[6:]
post_data['email'] = id
else:
id = _find_friend(full, r)
if (id == None):
parse_error('You are not friends with ' + r)
return
post_data['user_id'] = id
post_data['note'] = note
post_data['amount'] = amount_str
post_data['audience'] = audience
response = requests.post(url, post_data)
response_dict = response.json()
if ('error' in response_dict):
final_message += response_dict['error']['message'] + '\n'
else:
name = ''
target = response_dict['data']['payment']['target']
if (target['type'] == 'user'):
name = target['user']['display_name']
elif (target['type'] == 'phone'):
name = target['phone']
elif (target['type'] == 'email'):
name = target['email']
if (amount_str.startswith('-')):
final_message += 'Successfully charged ' + name + ' $' '{:0,.2f}'.format(response_dict['data']['payment']['amount']) + ' for ' + response_dict['data']['payment']['note'] + '. Audience is ' + audience + '.\n'
else:
final_message += 'Successfully paid ' + name + ' $' '{:0,.2f}'.format(response_dict['data']['payment']['amount']) + ' for ' + response_dict['data']['payment']['note'] + '. Audience is ' + audience + '.\n'
global g_user_id
send_message_and_exit(g_user_id, final_message)
def venmo_pending(which):
global access_token
global venmo_id
message = ''
url = 'https://api.venmo.com/v1/payments?access_token=' + access_token + '&status=pending'
response = requests.get(url)
response_dict = response.json()
if ('error' in response_dict):
venmo_error(response_dict['error'])
full = _get_pagination(response_dict, access_token)
for pending in response_dict['data']:
if (which == 'to'):
if (pending['actor']['id'] != venmo_id):
message += pending['actor']['display_name'] + ' requests $' + '{:0,.2f}'.format(pending['amount']) + ' for ' + pending['note'] + ' | ID: ' + pending['id'] + '\n'
elif (which == 'from'):
if (pending['actor']['id'] == venmo_id):
if (pending['target']['type'] == 'user'):
message += pending['target']['user']['display_name'] + ' owes you $' + '{:0,.2f}'.format(pending['amount']) + ' ' + pending['note'] + ' | ID: ' + pending['id'] + '\n'
global g_user_id
if (message != ''):
send_message_and_exit(g_user_id, message[0:-1])
else:
send_message_and_exit(g_user_id, 'No pending Venmos')
def venmo_complete(which, number):
global access_token
url = 'https://api.venmo.com/v1/payments/' + str(number)
action = ''
if (which == 'accept'):
action = 'approve'
elif (which == 'reject'):
action = 'deny'
put_data = {
'access_token': access_token,
'action': action
}
response = requests.put(url, put_data)
response_dict = response.json()
if ('error' in response_dict):
venmo_error(response_dict['error'])
def help():
message = ('Venmo help\n'
'Commands:\n'
'venmo balance\n'
' returns your Venmo balance\n'
'venmo (audience) pay/charge amount for note to recipients\n'
' example: venmo public charge $10.00 for lunch to testuser phone:5555555555 email:[email protected]\n'
' audience (optional) = public OR friends OR private\n'
' defaults to friends if omitted\n'
' pay/charge = pay OR charge\n'
' amount = Venmo amount\n'
' note = Venmo message\n'
' recipients = list of recipients, can specify Venmo username, phone number prefixed with phone: or email prefixed with email:\n'
'venmo pending (to OR from)\n'
' returns pending venmo charges, defaults to to\n'
' also returns ID for payment completion\n'
'venmo complete accept/reject number\n'
' accept OR reject a payment with the given ID\n'
'venmo code code\n'
' code = Venmo authentication code'
'venmo help\n'
' this help message')
global g_user_id
send_message_and_exit(g_user_id, message)
def venmo_error(dict):
global g_user_id
send_message_and_exit(g_user_id, dict['message'])
exit()
def parse_error(error_message):
global g_user_id
send_message_and_exit(g_user_id, error_message)
def _find_last_str_in_list(list, str):
index = -1
for i in range(len(list)):
if (list[i].lower() == str.lower()):
index = i
return index
def parse_message(message):
split_message = message.split()
if (len(split_message) == 1):
help()
elif (split_message[1].lower() == 'help'):
help()
elif (split_message[1].lower() == 'code'):
complete_auth(split_message[2])
elif (split_message[1].lower() == 'balance'):
get_venmo_balance()
elif (split_message[1].lower() == 'pending'):
if (len(split_message) == 2):
venmo_pending('to')
elif (len(split_message) == 3):
which = split_message[2].lower()
if (which == 'to' or which == 'from'):
venmo_pending(which)
else:
parse_error('Valid pending commands\npending\npending to\npending from')
else:
parse_error('Valid pending commands\npending\npending to\npending from')
elif (split_message[1].lower() == 'complete'):
if (len(split_message) == 4):
which = split_message[2].lower()
if (which == 'accept' or which == 'reject'):
number = -1
try:
number = int(split_message[3])
except:
parse_error('Payment completion number must be a number')
return
venmo_complete(which, number)
else:
parse_error('Valid complete commands\nvenmo complete accept #\nvenmo complete reject #')
else:
parse_error('Valid complete commands\nvenmo complete accept #\nvenmo complete reject #')
elif (len(split_message) <= 2):
parse_error('Invalid payment string')
elif (split_message[1].lower() == 'charge' or split_message[2].lower() == 'charge' or
split_message[1].lower() == 'pay' or split_message[2].lower() == 'pay'):
audience = 'friends'
if (split_message[2].lower() == 'charge' or split_message[2].lower() == 'pay'):
audience = split_message[1].lower()
if (audience != 'public' and audience != 'friends' and audience != 'private'):
parse_error('Valid payment sharing commands\npublic\nfriend\nprivate')
return
del split_message[1]
which = split_message[1]
if (len(split_message) <= 6):
parse_error('Invalid payment string')
return
amount_str = split_message[2]
amount = 0
if (amount_str.startswith('$')):
amount_str = amount_str[1:]
try:
amount = float(amount_str)
except:
parse_error('Invalid amount')
return
if (split_message[3].lower() != 'for'):
parse_error('Invalid payment string')
return
to_index = _find_last_str_in_list(split_message, 'to')
if (to_index < 5):
parse_error('Could not find recipients')
return
note = ' '.join(split_message[4:to_index])
recipients = split_message[to_index + 1:]
venmo_payment(audience, which, amount, note, recipients)
def main(args):
setup_pika()
start_message = json.loads(args)
user_id = start_message['user']
global g_user_id
g_user_id = user_id
global access_token
access_token = get_access_token(user_id)
if (access_token != None):
_get_venmo_id()
parse_message(start_message['message'])
else:
print 'access_token was none'
cleanup()
if __name__ == '__main__':
main(sys.argv) | golf1052/yhackslackpack | integrations/venmo/venmo.py | Python | mit | 16,352 |
# tests/products/test_ninja.py ----------------------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
# ----------------------------------------------------------------------------
import argparse
import os
import platform
import shutil
import sys
import tempfile
import unittest
try:
# py2
from StringIO import StringIO
except ImportError:
# py3
from io import StringIO
from build_swift.build_swift.wrappers import xcrun
from swift_build_support import shell
from swift_build_support.products import Ninja
from swift_build_support.targets import StdlibDeploymentTarget
from swift_build_support.toolchain import host_toolchain
from swift_build_support.workspace import Workspace
class NinjaTestCase(unittest.TestCase):
def setUp(self):
# Setup workspace
tmpdir1 = os.path.realpath(tempfile.mkdtemp())
tmpdir2 = os.path.realpath(tempfile.mkdtemp())
os.makedirs(os.path.join(tmpdir1, 'ninja'))
self.workspace = Workspace(source_root=tmpdir1,
build_root=tmpdir2)
self.host = StdlibDeploymentTarget.host_target()
# Setup toolchain
self.toolchain = host_toolchain()
self.toolchain.cc = '/path/to/cc'
self.toolchain.cxx = '/path/to/cxx'
# Setup args
self.args = argparse.Namespace(
build_ninja=True,
darwin_deployment_version_osx="10.9")
# Setup shell
shell.dry_run = True
self._orig_stdout = sys.stdout
self._orig_stderr = sys.stderr
self.stdout = StringIO()
self.stderr = StringIO()
sys.stdout = self.stdout
sys.stderr = self.stderr
def tearDown(self):
shutil.rmtree(self.workspace.build_root)
shutil.rmtree(self.workspace.source_root)
sys.stdout = self._orig_stdout
sys.stderr = self._orig_stderr
shell.dry_run = False
self.workspace = None
self.toolchain = None
self.args = None
def test_ninja_bin_path(self):
ninja_build = Ninja.new_builder(
args=self.args,
toolchain=self.toolchain,
workspace=self.workspace,
host=self.host)
self.assertEqual(ninja_build.ninja_bin_path,
os.path.join(
self.workspace.build_dir('build', 'ninja'),
'ninja'))
def test_build(self):
ninja_build = Ninja.new_builder(
args=self.args,
toolchain=self.toolchain,
workspace=self.workspace,
host=self.host)
ninja_build.build()
expect_env = ""
if platform.system() == "Darwin":
expect_env = (
"env "
"'CFLAGS=-isysroot {sysroot}' "
"CXX={cxx} "
"'LDFLAGS=-isysroot {sysroot}' "
).format(
cxx=self.toolchain.cxx,
sysroot=xcrun.sdk_path('macosx')
)
elif self.toolchain.cxx:
expect_env = (
"env "
"CXX={cxx} "
).format(
cxx=self.toolchain.cxx,
)
self.assertEqual(self.stdout.getvalue(), """\
+ rm -rf {build_dir}
+ cp -r {source_dir} {build_dir}
+ pushd {build_dir}
+ {expect_env}{python} configure.py --bootstrap
+ popd
""".format(source_dir=self._platform_quote(
self.workspace.source_dir('ninja')),
build_dir=self._platform_quote(
self.workspace.build_dir('build', 'ninja')),
expect_env=expect_env,
python=self._platform_quote(sys.executable)))
def _platform_quote(self, path):
if platform.system() == 'Windows':
return "'{}'".format(path)
else:
return path
| rudkx/swift | utils/swift_build_support/tests/products/test_ninja.py | Python | apache-2.0 | 4,144 |
Subsets and Splits